Browse Source

!1674 code_sync_0520

From: @ding_fei_fei
Reviewed-by: @liucunwei,@lilongfei15
Signed-off-by: @liucunwei
tags/v1.3.0
mindspore-ci-bot Gitee 4 years ago
parent
commit
840fa69b81
100 changed files with 3729 additions and 2209 deletions
  1. +26
    -0
      cmake/external_libs/protoc.cmake
  2. +12
    -6
      ge/CMakeLists.txt
  3. +69
    -1
      ge/client/ge_api.cc
  4. +0
    -1
      ge/common/auth/file_saver.cc
  5. +71
    -45
      ge/common/dump/dump_manager.cc
  6. +5
    -0
      ge/common/dump/dump_manager.h
  7. +17
    -6
      ge/common/dump/dump_properties.cc
  8. +13
    -2
      ge/common/dump/dump_properties.h
  9. +13
    -2
      ge/common/helper/model_helper.cc
  10. +4
    -0
      ge/common/kernel_store.cc
  11. +15
    -4
      ge/common/model_parser/model_parser.cc
  12. +10
    -6
      ge/common/model_saver.cc
  13. +20
    -7
      ge/common/op/ge_op_utils.cc
  14. +34
    -14
      ge/common/profiling/ge_profiling.cc
  15. +136
    -51
      ge/common/profiling/profiling_manager.cc
  16. +2
    -0
      ge/common/profiling/profiling_manager.h
  17. +9
    -4
      ge/common/properties_manager.cc
  18. +44
    -26
      ge/common/util.cc
  19. +80
    -33
      ge/engine_manager/dnnengine_manager.cc
  20. +7
    -0
      ge/executor/CMakeLists.txt
  21. +18
    -1
      ge/executor/ge_executor.cc
  22. +0
    -3
      ge/ge_inference.mk
  23. +2
    -1
      ge/ge_local_engine/engine/ge_local_engine.cc
  24. +20
    -13
      ge/ge_local_engine/engine/host_cpu_engine.cc
  25. +22
    -10
      ge/ge_local_engine/ops_kernel_store/ge_local_ops_kernel_builder.cc
  26. +2
    -1
      ge/ge_local_engine/ops_kernel_store/op/ge_deleted_op.cc
  27. +4
    -2
      ge/ge_local_engine/ops_kernel_store/op/op_factory.cc
  28. +0
    -3
      ge/ge_runner.mk
  29. +89
    -46
      ge/generator/ge_generator.cc
  30. +22
    -15
      ge/generator/generator_api.cc
  31. +60
    -56
      ge/graph/build/graph_builder.cc
  32. +6
    -6
      ge/graph/build/label_allocator.cc
  33. +11
    -11
      ge/graph/build/logical_stream_allocator.cc
  34. +120
    -60
      ge/graph/build/memory/block_mem_assigner.cc
  35. +23
    -13
      ge/graph/build/memory/block_mem_assigner.h
  36. +311
    -10
      ge/graph/build/memory/graph_mem_assigner.cc
  37. +35
    -2
      ge/graph/build/memory/graph_mem_assigner.h
  38. +10
    -8
      ge/graph/build/memory/hybrid_mem_assigner.cc
  39. +2
    -4
      ge/graph/build/memory/hybrid_mem_assigner.h
  40. +12
    -9
      ge/graph/build/memory/memory_assigner.cc
  41. +21
    -19
      ge/graph/build/memory/var_mem_assign_util.cc
  42. +107
    -81
      ge/graph/build/model_builder.cc
  43. +1
    -1
      ge/graph/build/model_builder.h
  44. +22
    -18
      ge/graph/build/run_context.cc
  45. +80
    -66
      ge/graph/build/stream_allocator.cc
  46. +5
    -7
      ge/graph/build/stream_graph_optimizer.cc
  47. +75
    -60
      ge/graph/build/task_generator.cc
  48. +2
    -2
      ge/graph/common/bcast.cc
  49. +57
    -13
      ge/graph/common/omg_util.cc
  50. +15
    -0
      ge/graph/common/omg_util.h
  51. +143
    -63
      ge/graph/execute/graph_execute.cc
  52. +14
    -2
      ge/graph/execute/graph_execute.h
  53. +13
    -9
      ge/graph/label/case_label_maker.cc
  54. +10
    -10
      ge/graph/label/if_label_maker.cc
  55. +21
    -17
      ge/graph/label/label_maker.cc
  56. +3
    -3
      ge/graph/label/partitioned_call_label_maker.cc
  57. +10
    -10
      ge/graph/label/while_label_maker.cc
  58. +10
    -0
      ge/graph/load/graph_loader.cc
  59. +69
    -83
      ge/graph/load/model_manager/cpu_queue_schedule.cc
  60. +77
    -68
      ge/graph/load/model_manager/data_dumper.cc
  61. +1
    -1
      ge/graph/load/model_manager/data_inputer.cc
  62. +524
    -396
      ge/graph/load/model_manager/davinci_model.cc
  63. +11
    -8
      ge/graph/load/model_manager/davinci_model.h
  64. +115
    -35
      ge/graph/load/model_manager/model_manager.cc
  65. +15
    -1
      ge/graph/load/model_manager/model_manager.h
  66. +16
    -3
      ge/graph/load/model_manager/model_utils.cc
  67. +6
    -7
      ge/graph/load/model_manager/task_info/end_graph_task_info.cc
  68. +4
    -5
      ge/graph/load/model_manager/task_info/event_record_task_info.cc
  69. +6
    -8
      ge/graph/load/model_manager/task_info/event_wait_task_info.cc
  70. +3
    -4
      ge/graph/load/model_manager/task_info/fusion_start_task_info.cc
  71. +2
    -2
      ge/graph/load/model_manager/task_info/fusion_stop_task_info.cc
  72. +27
    -24
      ge/graph/load/model_manager/task_info/hccl_task_info.cc
  73. +70
    -64
      ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc
  74. +1
    -0
      ge/graph/load/model_manager/task_info/kernel_ex_task_info.h
  75. +183
    -191
      ge/graph/load/model_manager/task_info/kernel_task_info.cc
  76. +2
    -0
      ge/graph/load/model_manager/task_info/kernel_task_info.h
  77. +10
    -9
      ge/graph/load/model_manager/task_info/label_goto_ex_task_info.cc
  78. +7
    -8
      ge/graph/load/model_manager/task_info/label_set_task_info.cc
  79. +18
    -21
      ge/graph/load/model_manager/task_info/label_switch_by_index_task_info.cc
  80. +8
    -8
      ge/graph/load/model_manager/task_info/memcpy_addr_async_task_info.cc
  81. +6
    -8
      ge/graph/load/model_manager/task_info/memcpy_async_task_info.cc
  82. +4
    -5
      ge/graph/load/model_manager/task_info/model_exit_task_info.cc
  83. +4
    -4
      ge/graph/load/model_manager/task_info/profiler_trace_task_info.cc
  84. +11
    -11
      ge/graph/load/model_manager/task_info/stream_active_task_info.cc
  85. +24
    -21
      ge/graph/load/model_manager/task_info/stream_switch_task_info.cc
  86. +24
    -24
      ge/graph/load/model_manager/task_info/stream_switchn_task_info.cc
  87. +5
    -7
      ge/graph/load/model_manager/task_info/super_kernel/super_kernel.cc
  88. +9
    -11
      ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc
  89. +1
    -1
      ge/graph/load/model_manager/task_info/task_info.cc
  90. +18
    -2
      ge/graph/load/model_manager/task_info/task_info.h
  91. +3
    -3
      ge/graph/manager/graph_caching_allocator.cc
  92. +1
    -1
      ge/graph/manager/graph_caching_allocator.h
  93. +184
    -70
      ge/graph/manager/graph_manager.cc
  94. +25
    -9
      ge/graph/manager/graph_manager.h
  95. +3
    -2
      ge/graph/manager/graph_manager_utils.cc
  96. +5
    -2
      ge/graph/manager/graph_manager_utils.h
  97. +3
    -113
      ge/graph/manager/graph_mem_allocator.cc
  98. +2
    -106
      ge/graph/manager/graph_mem_allocator.h
  99. +116
    -0
      ge/graph/manager/graph_mem_manager.cc
  100. +141
    -0
      ge/graph/manager/graph_mem_manager.h

+ 26
- 0
cmake/external_libs/protoc.cmake View File

@@ -48,8 +48,14 @@ function(protobuf_generate comp c_var h_var)
endif()
set(${c_var})
set(${h_var})
set(_add_target FALSE)

foreach(file ${ARGN})
if("${file}" STREQUAL "TARGET")
set(_add_target TRUE)
continue()
endif()

get_filename_component(abs_file ${file} ABSOLUTE)
get_filename_component(file_name ${file} NAME_WE)
get_filename_component(file_dir ${abs_file} PATH)
@@ -67,11 +73,18 @@ function(protobuf_generate comp c_var h_var)
OUTPUT "${proto_output_path}/${file_name}.pb.cc" "${proto_output_path}/${file_name}.pb.h"
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
COMMAND ${CMAKE_COMMAND} -E make_directory "${proto_output_path}"
COMMAND ${CMAKE_COMMAND} -E echo "generate proto cpp_out ${comp} by ${abs_file}"
COMMAND ${protoc_EXECUTABLE} -I${file_dir} --cpp_out=${proto_output_path} ${abs_file}
DEPENDS protoc_build ${abs_file}
COMMENT "Running C++ protocol buffer compiler on ${file}" VERBATIM )
endforeach()

if(_add_target)
add_custom_target(
${comp} DEPENDS ${${c_var}} ${${h_var}}
)
endif()

set_source_files_properties(${${c_var}} ${${h_var}} PROPERTIES GENERATED TRUE)
set(${c_var} ${${c_var}} PARENT_SCOPE)
set(${h_var} ${${h_var}} PARENT_SCOPE)
@@ -84,8 +97,14 @@ function(protobuf_generate_py comp py_var)
return()
endif()
set(${py_var})
set(_add_target FALSE)

foreach(file ${ARGN})
if("${file}" STREQUAL "TARGET")
set(_add_target TRUE)
continue()
endif()

get_filename_component(abs_file ${file} ABSOLUTE)
get_filename_component(file_name ${file} NAME_WE)
get_filename_component(file_dir ${abs_file} PATH)
@@ -102,11 +121,18 @@ function(protobuf_generate_py comp py_var)
OUTPUT "${proto_output_path}/${file_name}_pb2.py"
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
COMMAND ${CMAKE_COMMAND} -E make_directory "${proto_output_path}"
COMMAND ${CMAKE_COMMAND} -E echo "generate proto cpp_out ${comp} by ${abs_file}"
COMMAND ${protoc_EXECUTABLE} -I${file_dir} --python_out=${proto_output_path} ${abs_file}
DEPENDS protoc_build ${abs_file}
COMMENT "Running PYTHON protocol buffer compiler on ${file}" VERBATIM )
endforeach()

if(_add_target)
add_custom_target(
${comp} DEPENDS ${${py_var}}
)
endif()

set_source_files_properties(${${py_var}} PROPERTIES GENERATED TRUE)
set(${py_var} ${${py_var}} PARENT_SCOPE)



+ 12
- 6
ge/CMakeLists.txt View File

@@ -173,10 +173,12 @@ set(TRAIN_SRC_LIST
"graph/manager/graph_manager_utils.cc"
"graph/manager/graph_mem_allocator.cc"
"graph/manager/graph_caching_allocator.cc"
"graph/manager/session_scope_mem_allocator.cc"
"graph/manager/graph_var_manager.cc"
"graph/manager/host_mem_manager.cc"
"graph/manager/rdma_pool_allocator.cc"
"graph/manager/host_mem_allocator.cc"
"graph/manager/graph_mem_manager.cc"
"graph/manager/memory_api.cc"
"graph/manager/model_manager/event_manager.cc"
"graph/manager/trans_var_data_utils.cc"
@@ -270,7 +272,6 @@ set(TRAIN_SRC_LIST
"graph/passes/identity_pass.cc"
"graph/passes/ref_identity_delete_op_pass.cc"
"graph/passes/infershape_pass.cc"
"graph/passes/isolated_op_remove_pass.cc"
"graph/passes/iterator_op_pass.cc"
"graph/passes/link_gen_mask_nodes_pass.cc"
"graph/passes/merge_pass.cc"
@@ -307,6 +308,7 @@ set(TRAIN_SRC_LIST
"graph/passes/merge_to_stream_merge_pass.cc"
"graph/passes/merge_input_memcpy_pass.cc"
"graph/passes/switch_to_stream_switch_pass.cc"
"graph/passes/mark_force_unknown_for_cond_pass.cc"
"graph/passes/attach_stream_label_pass.cc"
"graph/passes/switch_dead_branch_elimination.cc"
"graph/passes/replace_transshape_pass.cc"
@@ -316,13 +318,11 @@ set(TRAIN_SRC_LIST
"graph/passes/transop_without_reshape_fusion_pass.cc"
"graph/passes/transpose_transdata_pass.cc"
"graph/passes/unused_const_pass.cc"
"graph/passes/unused_op_remove_pass.cc"
"graph/passes/var_is_initialized_op_pass.cc"
"graph/passes/parallel_concat_start_op_pass.cc"
"graph/passes/cond_pass.cc"
"graph/passes/cond_remove_pass.cc"
"graph/passes/for_pass.cc"
"graph/passes/variable_format_pass.cc"
"graph/passes/variable_op_pass.cc"
"graph/passes/variable_prepare_op_pass.cc"
"graph/passes/variable_ref_delete_op_pass.cc"
@@ -391,6 +391,8 @@ set(TRAIN_SRC_LIST
"hybrid/node_executor/partitioned_call/partitioned_call_node_executor.cc"
"hybrid/node_executor/hccl/hccl_node_executor.cc"
"hybrid/node_executor/rts/rts_node_executor.cc"
"hybrid/node_executor/rts/rts_node_task.cc"
"hybrid/node_executor/rts/rts_task_factory.cc"
"hybrid/node_executor/node_executor.cc"
"hybrid/node_executor/task_context.cc"
"hybrid/hybrid_davinci_model.cc"
@@ -475,6 +477,8 @@ set(INFER_SRC_LIST
"graph/manager/host_mem_allocator.cc"
"graph/manager/graph_mem_allocator.cc"
"graph/manager/graph_caching_allocator.cc"
"graph/manager/session_scope_mem_allocator.cc"
"graph/manager/graph_mem_manager.cc"
"model/ge_model.cc"
"model/ge_root_model.cc"
"graph/common/transop_util.cc"
@@ -519,12 +523,10 @@ set(INFER_SRC_LIST
"graph/passes/dimension_adjust_pass.cc"
"graph/passes/get_original_format_pass.cc"
"graph/passes/shape_operate_op_remove_pass.cc"
"graph/passes/unused_op_remove_pass.cc"
"graph/passes/assert_pass.cc"
"graph/passes/dropout_pass.cc"
"graph/passes/infershape_pass.cc"
"graph/passes/unused_const_pass.cc"
"graph/passes/isolated_op_remove_pass.cc"
"graph/passes/permute_pass.cc"
"graph/passes/ctrl_edge_transfer_pass.cc"
"graph/passes/end_of_sequence_add_control_pass.cc"
@@ -582,6 +584,7 @@ set(INFER_SRC_LIST
"graph/passes/merge_to_stream_merge_pass.cc"
"graph/passes/merge_input_memcpy_pass.cc"
"graph/passes/switch_to_stream_switch_pass.cc"
"graph/passes/mark_force_unknown_for_cond_pass.cc"
"graph/passes/attach_stream_label_pass.cc"
"graph/passes/multi_batch_pass.cc"
"graph/passes/multi_batch_clone_pass.cc"
@@ -606,7 +609,6 @@ set(INFER_SRC_LIST
"graph/passes/switch_logic_remove_pass.cc"
"graph/passes/switch_data_edges_bypass.cc"
"graph/passes/merge_pass.cc"
"graph/passes/variable_format_pass.cc"
"graph/passes/variable_op_pass.cc"
"graph/passes/cast_remove_pass.cc"
"graph/passes/transpose_transdata_pass.cc"
@@ -746,6 +748,7 @@ target_include_directories(ge_runner SYSTEM PRIVATE
${GE_CODE_DIR}/../inc/external
${GE_CODE_DIR}/../inc/cce
${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external
${GE_CODE_DIR}/../abl/adump/external
#### blue zone
${ASCEND_DIR}/driver/include
${ASCEND_DIR}/fwkacllib/include
@@ -822,6 +825,7 @@ target_include_directories(ge_compiler SYSTEM PRIVATE
${GE_CODE_DIR}/../inc/external
${GE_CODE_DIR}/../inc/cce
${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external
${GE_CODE_DIR}/../abl/adump/external
#### blue zone ####
${ASCEND_DIR}/driver/include
${ASCEND_DIR}/fwkacllib/include
@@ -982,6 +986,7 @@ target_include_directories(atc_stub_ge_compiler PRIVATE
#### yellow zone ####
${GE_CODE_DIR}/../inc/cce
${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external
${GE_CODE_DIR}/../abl/adump/external
#### blue zone ####
${ASCEND_DIR}/driver/include
${ASCEND_DIR}/fwkacllib/include
@@ -1022,6 +1027,7 @@ target_include_directories(fwk_stub_ge_runner PRIVATE
#### yellow zone ####
${GE_CODE_DIR}/../inc/cce
${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external
${GE_CODE_DIR}/../abl/adump/external
#### blue zone ####
${ASCEND_DIR}/driver/include
${ASCEND_DIR}/fwkacllib/include


+ 69
- 1
ge/client/ge_api.cc View File

@@ -598,6 +598,47 @@ Status Session::RunGraph(uint32_t graph_id, const std::vector<Tensor> &inputs, s
return ret;
}

// Run Graph with stream Asynchronously
Status Session::RunGraphWithStreamAsync(uint32_t graph_id, void *stream, const std::vector<Tensor> &inputs,
std::vector<Tensor> &outputs) {
ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther);
GELOGT(TRACE_INIT, "Session run graph with stream async start");

ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id);
std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
if (instance_ptr == nullptr) {
GELOGE(GE_CLI_GE_NOT_INITIALIZED,
"[Run][Graph]Run graph with stream asyn failed, the GELib instance is nullptr,"
"session id = %lu, graph id = %u, stream = %p.", sessionId_, graph_id, stream);
REPORT_INNER_ERROR("E19999",
"Run graph with stream asyn failed, the GELib instance is nullptr"
"session id = %lu, graph id = %u, stream = %p.", sessionId_, graph_id, stream);
return FAILED;
}
if (!instance_ptr->InitFlag()) {
GELOGE(GE_CLI_GE_NOT_INITIALIZED,
"[Run][Graph]Run graph with stream asyn failed, the GELib instance is not init,"
"session id = %lu, graph id = %u, stream = %p.", sessionId_, graph_id, stream);
REPORT_INNER_ERROR("E19999",
"Run graph with stream asyn failed, the GELib instance is not init,"
"session id = %lu, graph id = %u, stream = %p.", sessionId_, graph_id, stream);
return FAILED;
}
GELOGT(TRACE_RUNNING, "Run Graph Run graph with stream asyn.");
Status ret = instance_ptr->SessionManagerObj().RunGraphWithStreamAsync(sessionId_, graph_id, stream, inputs,
outputs);
if (ret != SUCCESS) {
GELOGE(ret, "[Run][Graph]Run graph with stream asyn Failed,"
"error code = %u, session id = %lu, graph id = %u, stream = %p.", ret, sessionId_, graph_id, stream);
REPORT_CALL_ERROR("E19999", "[Run][Graph]Run graph with stream asyn failed, error code = %u, session id = %lu,"
"graph id = %u, stream = %p.", ret, sessionId_, graph_id, stream);
return FAILED;
}

GELOGT(TRACE_STOP, "Session run graph with stream async finished");
return SUCCESS;
}

// Register Call Back
Status Session::RegisterCallBackFunc(const std::string &key, const pCallBackFunc &callback) {
ErrorManager::GetInstance().GenWorkStreamIdDefault();
@@ -640,8 +681,35 @@ Status Session::BuildGraph(uint32_t graph_id, const std::vector<InputTensorInfo>
return SUCCESS;
}

// Build Graph
Status Session::BuildGraph(uint32_t graph_id, const std::vector<ge::Tensor> &inputs) {
ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther);
ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id);
std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
GELOGE(GE_CLI_GE_NOT_INITIALIZED,
"[Build][Graph]Failed, the GELib instance is nullptr or is not InitFlag, "
"session_id %lu, graph_id %u", sessionId_, graph_id);
REPORT_INNER_ERROR("E19999",
"Build graph failed, the GELib instance is nullptr or is not InitFlag, "
"session_id %lu, graph_id %u", sessionId_, graph_id);
return FAILED;
}
GELOGT(TRACE_RUNNING, "Building Graph");
Status ret = instance_ptr->SessionManagerObj().BuildGraph(sessionId_, graph_id, inputs);
if (ret != SUCCESS) {
GELOGE(ret,
"[Build][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.",
ret, sessionId_, graph_id);
REPORT_CALL_ERROR("E19999", "Build graph failed , error code:%u, "
"session_id:%lu, graph_id:%u", ret, sessionId_, graph_id);
return FAILED;
}
return SUCCESS;
}

// Run Graph Asynchronously
Status Session::RunGraphAsync(uint32_t graph_id, const std::vector<InputTensorInfo> &inputs,
Status Session::RunGraphAsync(uint32_t graph_id, const std::vector<ge::Tensor> &inputs,
RunAsyncCallback callback) {
ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute);
ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id);


+ 0
- 1
ge/common/auth/file_saver.cc View File

@@ -128,7 +128,6 @@ Status FileSaver::SaveWithFileHeader(const std::string &file_path, const ModelFi

Status FileSaver::SaveWithFileHeader(const std::string &file_path, const ModelFileHeader &file_header,
ModelPartitionTable &model_partition_table,

const std::vector<ModelPartition> &partition_datas) {
GE_CHK_BOOL_RET_STATUS(!partition_datas.empty() && model_partition_table.num != 0
&& model_partition_table.num == partition_datas.size(), FAILED,


+ 71
- 45
ge/common/dump/dump_manager.cc View File

@@ -23,6 +23,7 @@ const char *const kDumpOFF = "OFF";
const char *const kDumpoff = "off";
const char *const kDumpOn = "on";
const uint64_t kInferSessionId = 0;
const uint32_t kAllOverflow = 3;
} // namespace
namespace ge {
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpManager &DumpManager::GetInstance() {
@@ -30,78 +31,103 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpManager &DumpManager::GetIn
return instance;
}

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpManager::SetDumpConf(const DumpConfig &dump_config) {
DumpProperties dump_properties;
std::string dump_status;
std::string dump_path;
std::string dump_mode;
std::string dump_op_switch;

if (dump_config.dump_status.empty()) {
bool DumpManager::NeedDoDump(const DumpConfig &dump_config, DumpProperties &dump_properties) {
if (dump_config.dump_status.empty() && dump_config.dump_debug.empty()) {
dump_properties_map_.emplace(kInferSessionId, dump_properties);
GELOGI("Dump does not open");
return SUCCESS;
return false;
}

dump_status = dump_config.dump_status;
GELOGI("Dump status is %s", dump_status.c_str());
if (dump_config.dump_status == kDumpoff || dump_config.dump_status == kDumpOFF) {
GELOGI("Dump status is %s, dump debug is %s.", dump_config.dump_status.c_str(), dump_config.dump_debug.c_str());
if ((dump_config.dump_status == kDumpoff || dump_config.dump_status == kDumpOFF) &&
dump_config.dump_debug == kDumpoff) {
dump_properties.ClearDumpPropertyValue();
dump_properties_map_.emplace(kInferSessionId, dump_properties);
return SUCCESS;
return false;
}
if (dump_config.dump_status == kDumpOn && dump_config.dump_debug == kDumpOn) {
GELOGW("Not support coexistence of dump debug and dump status.");
return false;
}
dump_properties.SetDumpStatus(dump_status);
return true;
}

dump_op_switch = dump_config.dump_op_switch;
dump_properties.SetDumpOpSwitch(dump_op_switch);
if (dump_op_switch == kDumpoff && dump_config.dump_list.empty()) {
dump_properties_map_.emplace(kInferSessionId, dump_properties);
GELOGE(PARAM_INVALID, "[Check][DumpList]Invalid, dump_op_switch is %s",
dump_op_switch.c_str());
REPORT_INNER_ERROR("E19999", "Dump list check invalid, dump_op_switch is %s",
dump_op_switch.c_str());
return PARAM_INVALID;
void DumpManager::SetDumpDebugConf(const DumpConfig &dump_config, DumpProperties &dump_properties) {
if (dump_config.dump_debug == kDumpOn) {
GELOGI("Only do overflow detection, dump debug is %s.", dump_config.dump_debug.c_str());
dump_properties.InitInferOpDebug();
dump_properties.SetOpDebugMode(kAllOverflow);
}
}

if (!dump_config.dump_list.empty()) {
for (auto model_dump : dump_config.dump_list) {
std::string model_name = model_dump.model_name;
GELOGI("Dump model is %s", model_name.c_str());
std::set<std::string> dump_layers;
for (auto layer : model_dump.layers) {
GELOGI("Dump layer is %s in model", layer.c_str());
dump_layers.insert(layer);
}
dump_properties.AddPropertyValue(model_name, dump_layers);
void DumpManager::SetDumpList(const DumpConfig &dump_config, DumpProperties &dump_properties) {
for (const auto &model_dump : dump_config.dump_list) {
std::string model_name = model_dump.model_name;
GELOGI("Dump model is %s", model_name.c_str());
std::set<std::string> dump_layers;
for (const auto &layer : model_dump.layers) {
GELOGI("Dump layer is %s in model", layer.c_str());
dump_layers.insert(layer);
}
dump_properties.AddPropertyValue(model_name, dump_layers);
}
}

Status DumpManager::SetNormalDumpConf(const DumpConfig &dump_config, DumpProperties &dump_properties) {
if (dump_config.dump_status == kDumpOn) {
GELOGI("Only do normal dump process, dump status is %s.", dump_config.dump_status.c_str());
dump_properties.SetDumpStatus(dump_config.dump_status);
std::string dump_op_switch = dump_config.dump_op_switch;
dump_properties.SetDumpOpSwitch(dump_op_switch);
if (dump_op_switch == kDumpoff && dump_config.dump_list.empty()) {
dump_properties_map_.emplace(kInferSessionId, dump_properties);
GELOGE(PARAM_INVALID, "[Check][DumpList]Invalid, dump_op_switch is %s", dump_op_switch.c_str());
REPORT_INNER_ERROR("E19999", "Dump list check invalid, dump_op_switch is %s", dump_op_switch.c_str());
return PARAM_INVALID;
}
if (dump_op_switch == kDumpOn) {
GELOGI("Start to dump model and single op,dump op switch is %s", dump_op_switch.c_str());

if (!dump_config.dump_list.empty()) {
if (dump_op_switch == kDumpOn) {
GELOGI("Start to dump model and single op, dump op switch is %s", dump_op_switch.c_str());
} else {
GELOGI("Only dump model, dump op switch is %s", dump_op_switch.c_str());
}
SetDumpList(dump_config, dump_properties);
} else {
GELOGI("Only dump model,dump op switch is %s", dump_op_switch.c_str());
GELOGI("Only dump single op, dump op switch is %s", dump_op_switch.c_str());
}
} else {
GELOGI("Only dump single op,dump op switch is %s", dump_op_switch.c_str());
GELOGI("Dump mode is %s", dump_config.dump_mode.c_str());
dump_properties.SetDumpMode(dump_config.dump_mode);
}
return SUCCESS;
}

dump_path = dump_config.dump_path;
Status DumpManager::SetDumpPath(const DumpConfig &dump_config, DumpProperties &dump_properties) {
std::string dump_path = dump_config.dump_path;
if (dump_path.empty()) {
GELOGE(PARAM_INVALID, "[Check][DumpPath]It is empty");
REPORT_INNER_ERROR("E19999", "Dump path check is empty");
return PARAM_INVALID;
}

if (dump_path[dump_path.size() - 1] != '/') {
dump_path = dump_path + "/";
}
dump_path = dump_path + CurrentTimeInStr() + "/";
GELOGI("Dump path is %s", dump_path.c_str());
dump_properties.SetDumpPath(dump_path);
return SUCCESS;
}

dump_mode = dump_config.dump_mode;
GELOGI("Dump mode is %s", dump_mode.c_str());
dump_properties.SetDumpMode(dump_mode);
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpManager::SetDumpConf(const DumpConfig &dump_config) {
DumpProperties dump_properties;
if (!NeedDoDump(dump_config, dump_properties)) {
GELOGD("No need do dump process.");
return SUCCESS;
}
SetDumpDebugConf(dump_config, dump_properties);
GE_CHK_STATUS_RET(SetNormalDumpConf(dump_config, dump_properties), "[Init][DumpConf] failed when dump status is on.");
GE_CHK_STATUS_RET(SetDumpPath(dump_config, dump_properties), "[Init][DumpPath] failed.");
dump_properties_map_[kInferSessionId] = dump_properties;

return SUCCESS;
}



+ 5
- 0
ge/common/dump/dump_manager.h View File

@@ -34,6 +34,11 @@ class DumpManager {
void RemoveDumpProperties(uint64_t session_id);

private:
bool NeedDoDump(const DumpConfig &dump_config, DumpProperties &dump_properties);
void SetDumpDebugConf(const DumpConfig &dump_config, DumpProperties &dump_properties);
Status SetDumpPath(const DumpConfig &dump_config, DumpProperties &dump_properties);
Status SetNormalDumpConf(const DumpConfig &dump_config, DumpProperties &dump_properties);
void SetDumpList(const DumpConfig &dump_config, DumpProperties &dump_properties);
std::mutex mutex_;
std::map<uint64_t, DumpProperties> dump_properties_map_;
};


+ 17
- 6
ge/common/dump/dump_properties.cc View File

@@ -53,7 +53,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitByOpti
dump_path_.clear();
dump_step_.clear();
dump_mode_.clear();
is_op_debug_ = false;
is_train_op_debug_ = false;
is_infer_op_debug_ = false;
op_debug_mode_ = 0;

std::string enable_dump;
@@ -124,7 +125,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::ClearDumpI
dump_mode_.clear();
dump_op_switch_.clear();
dump_status_.clear();
is_op_debug_ = false;
is_train_op_debug_ = false;
is_infer_op_debug_ = false;
op_debug_mode_ = 0;
}

@@ -203,6 +205,14 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::string &DumpProperti
return dump_status_;
}

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitInferOpDebug() {
is_infer_op_debug_ = true;
}

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetOpDebugMode(const uint32_t &op_debug_mode) {
op_debug_mode_ = op_debug_mode;
}

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetDumpOpSwitch(
const std::string &dump_op_switch) {
dump_op_switch_ = dump_op_switch;
@@ -237,7 +247,8 @@ void DumpProperties::CopyFrom(const DumpProperties &other) {
dump_op_switch_ = other.dump_op_switch_;

model_dump_properties_map_ = other.model_dump_properties_map_;
is_op_debug_ = other.is_op_debug_;
is_train_op_debug_ = other.is_train_op_debug_;
is_infer_op_debug_ = other.is_infer_op_debug_;
op_debug_mode_ = other.op_debug_mode_;
}
}
@@ -254,15 +265,15 @@ void DumpProperties::SetDumpDebugOptions() {

if (dump_debug_mode == OP_DEBUG_AICORE) {
GELOGD("ge.exec.dumpDebugMode=aicore_overflow, op debug is open.");
is_op_debug_ = true;
is_train_op_debug_ = true;
op_debug_mode_ = kAicoreOverflow;
} else if (dump_debug_mode == OP_DEBUG_ATOMIC) {
GELOGD("ge.exec.dumpDebugMode=atomic_overflow, op debug is open.");
is_op_debug_ = true;
is_train_op_debug_ = true;
op_debug_mode_ = kAtomicOverflow;
} else if (dump_debug_mode == OP_DEBUG_ALL) {
GELOGD("ge.exec.dumpDebugMode=all, op debug is open.");
is_op_debug_ = true;
is_train_op_debug_ = true;
op_debug_mode_ = kAllOverflow;
} else {
GELOGW("ge.exec.dumpDebugMode is invalid.");


+ 13
- 2
ge/common/dump/dump_properties.h View File

@@ -65,16 +65,26 @@ class DumpProperties {

const std::string &GetDumpStatus() const;

void InitInferOpDebug();

bool IsInferOpDebug() const {
return is_infer_op_debug_;
}

void SetDumpOpSwitch(const std::string &dump_op_switch);

const std::string &GetDumpOpSwitch() const;

bool IsOpDebugOpen() const { return is_op_debug_; }
bool IsOpDebugOpen() const {
return is_train_op_debug_ || is_infer_op_debug_;
}

bool IsDumpOpen() const;

bool IsSingleOpNeedDump() const;

void SetOpDebugMode(const uint32_t &op_debug_mode);

uint32_t GetOpDebugMode() const { return op_debug_mode_; }

const std::string &GetEnableDump() const {return enable_dump_;}
@@ -96,7 +106,8 @@ class DumpProperties {
std::string dump_op_switch_;
std::map<std::string, std::set<std::string>> model_dump_properties_map_;

bool is_op_debug_ = false;
bool is_train_op_debug_ = false;
bool is_infer_op_debug_ = false;
uint32_t op_debug_mode_ = 0;
};
}


+ 13
- 2
ge/common/helper/model_helper.cc View File

@@ -21,6 +21,7 @@
#include "framework/omg/version.h"
#include "graph/debug/ge_attr_define.h"
#include "graph/utils/graph_utils.h"
#include "framework/omg/omg_inner_types.h"

using std::string;
using domi::ModelTaskDef;
@@ -304,7 +305,6 @@ Status ModelHelper::SaveAllModelPartiton(std::shared_ptr<OmFileSaveHelper>& om_f
return FAILED;
}


if (SaveModelTaskDef(om_file_save_helper, ge_model, task_buffer, model_index) != SUCCESS) {
GELOGE(FAILED, "[Save][TaskDef]Failed, model %s, model index %zu",
ge_model->GetName().c_str(), model_index);
@@ -333,6 +333,10 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::SaveToOmMod
ge::Buffer model_buffer;
ge::Buffer task_buffer;

GE_CHK_BOOL_EXEC(ge::AttrUtils::SetStr(*(ge_model.get()), ATTR_MODEL_ATC_CMDLINE,
domi::GetContext().atc_cmdline),
GELOGE(FAILED, "SetStr for atc_cmdline failed.");
return FAILED);
auto ret = SaveAllModelPartiton(om_file_save_helper, ge_model, model_buffer, task_buffer);
if (ret != SUCCESS) {
GELOGE(ret, "[Save][AllModelPartition]Failed, model %s, error_code %u",
@@ -386,9 +390,12 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::SaveToOmRoo
REPORT_INNER_ERROR("E19999", "GraphBuilder SaveModel received invalid "
"file name prefix");
return FAILED);

if (!is_unknown_shape) {
auto &model_root = name_to_ge_model.begin()->second;
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetStr(*(model_root.get()), ATTR_MODEL_ATC_CMDLINE,
domi::GetContext().atc_cmdline),
GELOGE(FAILED, "SetStr for atc_cmdline failed.");
return FAILED);
return SaveToOmModel(model_root, save_param, output_file, model);
}

@@ -396,6 +403,10 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::SaveToOmRoo
GE_CHECK_NOTNULL(om_file_save_helper);

auto &first_ge_model = name_to_ge_model.at(ge_root_model->GetRootGraph()->GetName());
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetStr(*(first_ge_model.get()), ATTR_MODEL_ATC_CMDLINE,
domi::GetContext().atc_cmdline),
GELOGE(FAILED, "SetStr for atc_cmdline failed.");
return FAILED);

// ge root model must be the first to be loaded
vector<string> model_names{ge_root_model->GetRootGraph()->GetName()};


+ 4
- 0
ge/common/kernel_store.cc View File

@@ -38,6 +38,10 @@ bool KernelStore::Build() {
buffer_.resize(total_len);
} catch (std::bad_alloc &e) {
GELOGE(ge::MEMALLOC_FAILED, "All build memory failed, memory size %zu", total_len);
GELOGE(ge::MEMALLOC_FAILED, "[Malloc][Memmory]Resize buffer failed, memory size %zu, "
"exception %s", total_len, e.what());
REPORT_CALL_ERROR("E19999", "Resize buffer failed, memory size %zu, exception %s",
total_len, e.what());
return false;
}



+ 15
- 4
ge/common/model_parser/model_parser.cc View File

@@ -31,18 +31,24 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::LoadFro
ge::ModelData &model_data) {
std::string real_path = RealPath(model_path);
if (real_path.empty()) {
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "Model file path '%s' is invalid", model_path);
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "[Check][Param]Model file path %s is invalid",
model_path);
REPORT_CALL_ERROR("E19999", "Model file path %s is invalid", model_path);
return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID;
}

if (GetFileLength(model_path) == -1) {
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "File size not valid, file: %s.", model_path);
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "[Check][Param]File size not valid, file %s",
model_path);
REPORT_INNER_ERROR("E19999", "File size not valid, file %s", model_path);
return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID;
}

std::ifstream fs(real_path.c_str(), std::ifstream::binary);
if (!fs.is_open()) {
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "Open file: %s failed, error: %s", model_path, strerror(errno));
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "[Open][File]Failed, file %s, error %s",
model_path, strerror(errno));
REPORT_CALL_ERROR("E19999", "Open file %s failed, error %s", model_path, strerror(errno));
return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID;
}

@@ -57,6 +63,10 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::LoadFro
char *data = new (std::nothrow) char[len];
if (data == nullptr) {
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Load model From file failed, bad memory allocation occur. (need:%u)", len);
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "[Load][ModelFromFile]Failed, "
"bad memory allocation occur(need %u), file %s", len, model_path);
REPORT_CALL_ERROR("E19999", "Load model from file %s failed, "
"bad memory allocation occur(need %u)", model_path, len);
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}

@@ -105,7 +115,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::ParseMo
model_len = file_header->length;
GELOGD("Model_len is %u, model_file_head_len is %zu.", model_len, sizeof(ModelFileHeader));
} else {
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Invalid model. ModelEncryptType not supported.");
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Param]Invalid, model encrypt type not supported");
REPORT_CALL_ERROR("E19999","Invalid model, encrypt type not supported");
res = ACL_ERROR_GE_PARAM_INVALID;
}



+ 10
- 6
ge/common/model_saver.cc View File

@@ -33,7 +33,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelSaver::SaveJsonToFi
const Json &model) {
Status ret = SUCCESS;
if (file_path == nullptr || SUCCESS != CheckPath(file_path)) {
GELOGE(FAILED, "Check output file failed.");
GELOGE(FAILED, "[Check][OutputFile]Failed, file %s", file_path);
REPORT_CALL_ERROR("E19999", "Output file %s check invalid", file_path);
return FAILED;
}
std::string model_str;
@@ -41,11 +42,12 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelSaver::SaveJsonToFi
model_str = model.dump(kInteval, ' ', false, Json::error_handler_t::ignore);
} catch (std::exception &e) {
ErrorManager::GetInstance().ATCReportErrMessage("E19007", {"exception"}, {e.what()});
GELOGE(FAILED, "Failed to convert JSON to string, reason: %s.", e.what());
GELOGE(FAILED, "[Convert][File]Failed to convert JSON to string, file %s, reason %s",
file_path, e.what());
return FAILED;
} catch (...) {
ErrorManager::GetInstance().ATCReportErrMessage("E19008");
GELOGE(FAILED, "Failed to convert JSON to string.");
GELOGE(FAILED, "[Convert][File]Failed to convert JSON to string, file %s", file_path);
return FAILED;
}

@@ -59,7 +61,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelSaver::SaveJsonToFi
int32_t fd = mmOpen2(real_path, M_RDWR | M_CREAT | O_TRUNC, mode);
if (fd == EN_ERROR || fd == EN_INVALID_PARAM) {
ErrorManager::GetInstance().ATCReportErrMessage("E19001", {"file", "errmsg"}, {file_path, strerror(errno)});
GELOGE(FAILED, "Open file[%s] failed. errmsg:%s", file_path, strerror(errno));
GELOGE(FAILED, "[Open][File]Failed, file %s, errmsg %s", file_path, strerror(errno));
return FAILED;
}
const char *model_char = model_str.c_str();
@@ -70,12 +72,14 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelSaver::SaveJsonToFi
ErrorManager::GetInstance().ATCReportErrMessage(
"E19004", {"file", "errmsg"}, {file_path, strerror(errno)});
// Need to both print the error info of mmWrite and mmClose, so return ret after mmClose
GELOGE(FAILED, "Write to file failed. errno:%ld, errmsg:%s", mmpa_ret, strerror(errno));
GELOGE(FAILED, "[Write][Data]To file %s failed. errno %ld, errmsg %s",
file_path, mmpa_ret, strerror(errno));
ret = FAILED;
}
// Close file
if (mmClose(fd) != EN_OK) {
GELOGE(FAILED, "Close file failed. errmsg:%s", strerror(errno));
GELOGE(FAILED, "[Close][File]Failed, file %s, errmsg %s", file_path, strerror(errno));
REPORT_CALL_ERROR("E19999", "Close file %s failed, errmsg %s", file_path, strerror(errno));
ret = FAILED;
}
return ret;


+ 20
- 7
ge/common/op/ge_op_utils.cc View File

@@ -62,6 +62,10 @@ const uint32_t SWITCH_TRUE_OUTPUT = 1;
const uint32_t SWITCH_DATA_INPUT = 0;
const uint32_t SWITCH_PRED_INPUT = 1;

// Merge
const uint32_t MERGE_DATA_OUTPUT = 0;
const uint32_t MERGE_INDEX_OUTPUT = 1;

// FunctionOp
const uint32_t IF_COND_INPUT = 0;
const uint32_t FOR_START_INPUT = 0;
@@ -239,7 +243,8 @@ Status OpUtils::SetDataByDataType(size_t out_size, const std::vector<char *> &ch
const std::vector<char *> &chunk_output, GeTensor *output) {
unique_ptr<T[]> output_data(new (std::nothrow) T[out_size]());
if (output_data == nullptr) {
GELOGE(MEMALLOC_FAILED, "New buf failed");
GELOGE(MEMALLOC_FAILED, "[Malloc][Data]New buf failed");
REPORT_CALL_ERROR("E19999", "New buf failed");
return INTERNAL_ERROR;
}

@@ -275,7 +280,8 @@ Status OpUtils::SetOutputSliceDataByDataType(void *data, int64_t data_size, cons
int64_t dim_i = input_dims[i];
int64_t stride_i = stride[i];
if (dim_i == 0) {
GELOGE(PARAM_INVALID, "Dim_i of size tensor can't be 0.");
GELOGE(PARAM_INVALID, "[Check][Param]Invalid, Dim_i of size tensor is 0");
REPORT_INNER_ERROR("E19999", "Dim_i of size tensor is 0, invalid");
return PARAM_INVALID;
}
chunk_size = chunk_size / dim_i;
@@ -299,7 +305,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OpUtils::SetOutputSliceD
void *data, int64_t data_size, int32_t data_type, std::vector<int64_t> &input_dims, std::vector<int64_t> &begin,
std::vector<int64_t> &output_dims, GeTensor *output, std::vector<int64_t> &stride) {
if (data == nullptr || output == nullptr) {
GELOGE(PARAM_INVALID, "Input param is nullptr.");
GELOGE(PARAM_INVALID, "[Check][Param]Input param is nullptr");
REPORT_INNER_ERROR("E19999", "Input param is nullptr");
return PARAM_INVALID;
}

@@ -436,14 +443,18 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OpUtils::SetWeights(ge::
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status
OpUtils::GetShapeDataFromConstTensor(const ConstGeTensorPtr &tensor, DataType type, std::vector<int64_t> &dims) {
if (tensor == nullptr) {
GELOGE(PARAM_INVALID, "Input tensor is nullptr");
GELOGE(PARAM_INVALID, "[Check][Param]Input tensor is nullptr");
REPORT_INNER_ERROR("E19999","Input tensor is nullptr");
return PARAM_INVALID;
}

// If the tensor data is a vector, the shape dimension must be 1
if (tensor->GetTensorDesc().GetShape().GetDims().size() > 1) {
GELOGE(PARAM_INVALID, "The dimension of the input tensor shape cannot be more than 1, it is %zu",
GELOGE(PARAM_INVALID, "[Check][Param]The dimension of the input tensor shape "
"cannot be more than 1, it is %zu",
tensor->GetTensorDesc().GetShape().GetDims().size());
REPORT_CALL_ERROR("E19999", "The dimension of the input tensor shape %zu invalid, "
"more than 1", tensor->GetTensorDesc().GetShape().GetDims().size());
return PARAM_INVALID;
}

@@ -462,8 +473,10 @@ OpUtils::GetShapeDataFromConstTensor(const ConstGeTensorPtr &tensor, DataType ty
dims.push_back(shape_data[i]);
}
} else {
GELOGE(PARAM_INVALID, "Data type only can be DT_INT32 or DT_INT64. type is %s",
TypeUtils::DataTypeToSerialString(type).c_str());
GELOGE(PARAM_INVALID, "[Check][DataType]Invalid, type only can be DT_INT32 or DT_INT64, "
"type is %s", TypeUtils::DataTypeToSerialString(type).c_str());
REPORT_INNER_ERROR("E19999", "Data type %s check invalid, only can be DT_INT32 or DT_INT64",
TypeUtils::DataTypeToSerialString(type).c_str());
return PARAM_INVALID;
}



+ 34
- 14
ge/common/profiling/ge_profiling.cc View File

@@ -67,11 +67,13 @@ bool TransProfConfigToParam(const ProfCommandHandleData &profCommand, vector<str

bool isProfConfigValid(const uint32_t *deviceid_list, uint32_t device_nums) {
if (deviceid_list == nullptr) {
GELOGE(ge::PARAM_INVALID, "deviceIdList is nullptr");
GELOGE(ge::PARAM_INVALID, "[Check][DeviceIDList]Invalid, it is nullptr");
REPORT_INNER_ERROR("E19999", "Device id list is nullptr");
return false;
}
if (device_nums == 0 || device_nums > MAX_DEV_NUM) {
GELOGE(ge::PARAM_INVALID, "The device nums: %u is invalid.", device_nums);
GELOGE(ge::PARAM_INVALID, "[Check][DeviceNums]Invalid, device nums: %u", device_nums);
REPORT_INNER_ERROR("E19999", "DeviceNums %u check invalid", device_nums);
return false;
}

@@ -79,12 +81,16 @@ bool isProfConfigValid(const uint32_t *deviceid_list, uint32_t device_nums) {
int32_t dev_count = 0;
rtError_t rt_err = rtGetDeviceCount(&dev_count);
if (rt_err != RT_ERROR_NONE) {
GELOGE(ge::INTERNAL_ERROR, "Get the Device count fail.");
GELOGE(ge::INTERNAL_ERROR, "[Get][DeviceCount]Failed, error_code %d", rt_err);
REPORT_CALL_ERROR("E19999", "Get device count failed, error_code %d", rt_err);
return false;
}

if (device_nums > static_cast<uint32_t>(dev_count)) {
GELOGE(ge::PARAM_INVALID, "Device num(%u) is not in range 1 ~ %d.", device_nums, dev_count);
GELOGE(ge::PARAM_INVALID, "[Check][Param]Device num %u is not in range [1,%d]",
device_nums, dev_count);
REPORT_INNER_ERROR("E19999", "Device num %u check invalid, it is not in range [1,%d]",
device_nums, dev_count);
return false;
}

@@ -92,11 +98,14 @@ bool isProfConfigValid(const uint32_t *deviceid_list, uint32_t device_nums) {
for (size_t i = 0; i < device_nums; ++i) {
uint32_t dev_id = deviceid_list[i];
if (dev_id >= static_cast<uint32_t>(dev_count)) {
GELOGE(ge::PARAM_INVALID, "Device id %u is not in range 0 ~ %d(exclude %d)", dev_id, dev_count, dev_count);
GELOGE(ge::PARAM_INVALID, "[Check][DeviceId]Device id %u is not in range [0,%d)",
dev_id, dev_count);
REPORT_CALL_ERROR("E19999", "Device id %u is not in range [0,%d)", dev_id, dev_count);
return false;
}
if (record.count(dev_id) > 0) {
GELOGE(ge::PARAM_INVALID, "Device id %u is duplicatedly set", dev_id);
GELOGE(ge::PARAM_INVALID, "[Check][DeviceId]Device id %u is duplicatedly set", dev_id);
REPORT_CALL_ERROR("E19999", "Device id %u is not unique, duplicatedly set", dev_id);
return false;
}
record.insert(dev_id);
@@ -106,7 +115,8 @@ bool isProfConfigValid(const uint32_t *deviceid_list, uint32_t device_nums) {

ge::Status RegProfCtrlCallback(MsprofCtrlCallback func) {
if (func == nullptr) {
GELOGE(ge::PARAM_INVALID, "Msprof ctrl callback is nullptr.");
GELOGE(ge::PARAM_INVALID, "[Check][Param]Msprof ctrl callback is nullptr");
REPORT_INNER_ERROR("E19999", "Msprof ctrl callback is nullptr");
return ge::PARAM_INVALID;
}
if (ge::ProfilingManager::Instance().GetMsprofCallback().msprofCtrlCallback != nullptr) {
@@ -119,13 +129,15 @@ ge::Status RegProfCtrlCallback(MsprofCtrlCallback func) {

ge::Status RegProfSetDeviceCallback(MsprofSetDeviceCallback func) {
if (func == nullptr) {
GELOGE(ge::PARAM_INVALID, "MsprofSetDeviceCallback callback is nullptr.");
GELOGE(ge::PARAM_INVALID, "[Check][Param]MsprofSetDeviceCallback callback is nullptr");
REPORT_INNER_ERROR("E19999", "MsprofSetDeviceCallback callback is nullptr");
return ge::PARAM_INVALID;
}
// Pass MsprofSetDeviceCallback to runtime
ge::Status rt_ret = rtRegDeviceStateCallback(kRtSetDeviceRegName.c_str(), static_cast<rtDeviceStateCallback>(func));
if (rt_ret != ge::SUCCESS) {
GELOGE(rt_ret, "Pass MsprofSetDeviceCallback to runtime failed!");
GELOGE(rt_ret, "[Pass][MsprofSetDeviceCallback]To runtime failed, ret 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Pass MsprofSetDeviceCallback to runtime failed, ret 0x%X", rt_ret);
return rt_ret;
}
return ge::SUCCESS;
@@ -133,7 +145,8 @@ ge::Status RegProfSetDeviceCallback(MsprofSetDeviceCallback func) {

ge::Status RegProfReporterCallback(MsprofReporterCallback func) {
if (func == nullptr) {
GELOGE(ge::PARAM_INVALID, "MsprofReporterCallback callback is nullptr.");
GELOGE(ge::PARAM_INVALID, "[Check][Param]MsprofReporterCallback callback is nullptr");
REPORT_INNER_ERROR("E19999", "MsprofReporterCallback callback is nullptr");
return ge::PARAM_INVALID;
}
if (ge::ProfilingManager::Instance().GetMsprofCallback().msprofReporterCallback != nullptr) {
@@ -144,7 +157,10 @@ ge::Status RegProfReporterCallback(MsprofReporterCallback func) {
// Pass MsprofReporterCallback to runtime
ge::Status rt_ret = rtSetMsprofReporterCallback(func);
if (rt_ret != ge::SUCCESS) {
GELOGE(rt_ret, "Pass MsprofReporterCallback to runtime failed!!");
GELOGE(rt_ret, "[Pass][Param]Pass MsprofReporterCallback to runtime failed, error_code %u",
rt_ret);
REPORT_CALL_ERROR("E19999", "Pass MsprofReporterCallback to runtime failed, error_code %u",
rt_ret);
return rt_ret;
}
// Pass MsprofReporterCallback to hccl
@@ -167,9 +183,10 @@ ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t le
if (!isProfConfigValid(prof_config_param->devIdList, prof_config_param->devNums)) {
return ge::FAILED;
}
if (!TransProfConfigToParam(*prof_config_param, prof_params)) {
GELOGE(ge::PARAM_INVALID, "Transfer profilerConfig to string vector failed");
GELOGE(ge::PARAM_INVALID, "[Check][Param]Transfer profilerConfig to string vector failed");
REPORT_CALL_ERROR("E19999", "Transfer profilerConfig to string vector failed");
return ge::PARAM_INVALID;
}
}
@@ -188,7 +205,10 @@ ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t le
}
ge::Status ret = graph_loader.CommandHandle(command);
if (ret != ge::SUCCESS) {
GELOGE(ret, "Handle profiling command failed");
GELOGE(ret, "[Handle][Command]Handle profiling command failed, command type %s, error_code %u",
iter->second.c_str(), ret);
REPORT_CALL_ERROR("E19999", "Handle profiling command failed, command type %s, error_code %u",
iter->second.c_str(), ret);
return ge::FAILED;
}



+ 136
- 51
ge/common/profiling/profiling_manager.cc View File

@@ -87,21 +87,26 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ge::Status ProfilingManager::In
struct MsprofGeOptions prof_conf = {{ 0 }};
Status ret = InitFromOptions(options, prof_conf);
if (ret != SUCCESS) {
GELOGE(ret, "Failed to init profiling.");
GELOGE(ret, "[Init][Profiling]Failed, error_code %u", ret);
REPORT_CALL_ERROR("E19999", "Init profiling failed, error_code %u", ret);
return ret;
}

if (is_execute_profiling_) {
if (prof_cb_.msprofCtrlCallback == nullptr) {
GELOGE(ge::PARAM_INVALID, "MsprofCtrlCallback callback is nullptr.");
GELOGE(ge::PARAM_INVALID, "[Check][Param]MsprofCtrlCallback callback is nullptr");
REPORT_INNER_ERROR("E19999", "MsprofCtrlCallback callback is nullptr");
return ge::PARAM_INVALID;
}
int32_t cb_ret = prof_cb_.msprofCtrlCallback(
static_cast<uint32_t>(MsprofCtrlCallbackType::MSPROF_CTRL_INIT_GE_OPTIONS),
static_cast<void *>(&prof_conf), sizeof(MsprofGeOptions));
if (cb_ret != 0) {
GELOGE(FAILED, "Call msprofCtrlCallback failed, type:%u, return:%d",
GELOGE(FAILED, "[Call][msprofCtrlCallback]Failed, type %u, return %d",
static_cast<uint32_t>(MsprofCtrlCallbackType::MSPROF_CTRL_INIT_GE_OPTIONS), cb_ret);
REPORT_CALL_ERROR("E19999", "Call msprofCtrlCallback failed, type %u, return %d",
static_cast<uint32_t>(MsprofCtrlCallbackType::MSPROF_CTRL_INIT_GE_OPTIONS),
cb_ret);
return FAILED;
}
GELOGI("Profiling init success");
@@ -122,7 +127,10 @@ ge::Status ProfilingManager::InitFromOptions(const Options &options, MsprofGeOpt
// enable profiling by ge option
if (strncpy_s(prof_conf.options, MSPROF_OPTIONS_DEF_LEN_MAX, options.profiling_options.c_str(),
MSPROF_OPTIONS_DEF_LEN_MAX - 1) != EOK) {
GELOGE(INTERNAL_ERROR, "copy profiling_options failed.");
GELOGE(INTERNAL_ERROR, "[copy][ProfilingOptions]Failed, options %s",
options.profiling_options.c_str());
REPORT_CALL_ERROR("E19999", "Copy profiling_options %s failed",
options.profiling_options.c_str());
return INTERNAL_ERROR;
}
is_execute_profiling_ = true;
@@ -147,13 +155,17 @@ ge::Status ProfilingManager::InitFromOptions(const Options &options, MsprofGeOpt
// Parse json str for bp fp
Status ret = ParseOptions(prof_conf.options);
if (ret != ge::SUCCESS) {
GELOGE(ge::PARAM_INVALID, "Parse training trace param failed.");
GELOGE(ge::PARAM_INVALID, "[Parse][Options]Parse training trace param %s failed, error_code %u",
prof_conf.options, ret);
REPORT_CALL_ERROR("E19999", "Parse training trace param %s failed, error_code %u",
prof_conf.options, ret);
return ge::PARAM_INVALID;
}

if (strncpy_s(prof_conf.jobId, MSPROF_OPTIONS_DEF_LEN_MAX, options.job_id.c_str(), MSPROF_OPTIONS_DEF_LEN_MAX - 1) !=
EOK) {
GELOGE(INTERNAL_ERROR, "copy job_id failed.");
GELOGE(INTERNAL_ERROR, "[Copy][JobId]Failed, original job_id %s", options.job_id.c_str());
REPORT_CALL_ERROR("E19999", "Copy job_id %s failed", options.job_id.c_str());
return INTERNAL_ERROR;
}
GELOGI("Job id: %s, original job id: %s.", prof_conf.jobId, options.job_id.c_str());
@@ -163,7 +175,8 @@ ge::Status ProfilingManager::InitFromOptions(const Options &options, MsprofGeOpt

ge::Status ProfilingManager::ParseOptions(const std::string &options) {
if (options.empty()) {
GELOGE(ge::PARAM_INVALID, "Profiling options is empty.");
GELOGE(ge::PARAM_INVALID, "[Check][Param]Profiling options is empty");
REPORT_INNER_ERROR("E19999", "Profiling options is empty");
return ge::PARAM_INVALID;
}
try {
@@ -178,7 +191,9 @@ ge::Status ProfilingManager::ParseOptions(const std::string &options) {
}
GELOGI("GE profiling training trace:%s", training_trace.c_str());
if (training_trace != "on") {
GELOGE(ge::PARAM_INVALID, "Training trace param:%s is invalid.", training_trace.c_str());
GELOGE(ge::PARAM_INVALID, "[Check][Param]Training trace param:%s is invalid.",
training_trace.c_str());
REPORT_INNER_ERROR("E19999", "Training trace param:%s is invalid.", training_trace.c_str());
return ge::PARAM_INVALID;
}
fp_point_ = prof_options[kFpPoint];
@@ -188,7 +203,8 @@ ge::Status ProfilingManager::ParseOptions(const std::string &options) {
}
is_training_trace_ = true;
} catch (...) {
GELOGE(FAILED, "Json prof_conf options is invalid.");
GELOGE(FAILED, "[Check][Param]Json prof_conf options is invalid");
REPORT_INNER_ERROR("E19999", "Json prof_conf options is invalid");
return ge::PARAM_INVALID;
}
return ge::SUCCESS;
@@ -202,7 +218,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::StopProf
if (device_num != 0) {
auto device_id_ptr = std::unique_ptr<uint32_t[]>(new (std::nothrow) uint32_t[device_num]);
if (device_id_ptr == nullptr) {
GELOGE(FAILED, "Stop profiling: device id ptr is null.");
GELOGE(FAILED, "[Stop][Profiling]Device id ptr is null.");
REPORT_INNER_ERROR("E19999", "Stop profiling, device id ptr is null");
return;
}
for (int32_t i = 0; i < device_num; i++) {
@@ -216,7 +233,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::StopProf

// stop profiling
if (prof_cb_.msprofCtrlCallback == nullptr) {
GELOGE(ge::PARAM_INVALID, "MsprofCtrlCallback callback is nullptr.");
GELOGE(ge::PARAM_INVALID, "[Check][Param]MsprofCtrlCallback callback is nullptr");
REPORT_INNER_ERROR("E19999", "MsprofCtrlCallback callback is nullptr");
return;
}
int32_t cb_ret = prof_cb_.msprofCtrlCallback(static_cast<uint32_t>(MsprofCtrlCallbackType::MSPROF_CTRL_FINALIZE),
@@ -278,10 +296,14 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::Profilin
try {
reported_data = task_info.dump(kInteval, ' ', false, Json::error_handler_t::ignore);
} catch (std::exception &e) {
GELOGE(FAILED, "Failed to convert JSON to string, reason: %s.", e.what());
GELOGE(FAILED, "[Convert][ReportData]Failed to convert json to string, reason %s.",
e.what());
REPORT_CALL_ERROR("E19999", "Failed to convert reported_data from json to string, reason %s",
e.what());
return ;
} catch (...) {
GELOGE(FAILED, "Failed to convert JSON to string.");
GELOGE(FAILED, "[Convert][ReportedData]Failed to convert JSON to string");
REPORT_CALL_ERROR("E19999", "Failed to convert reported data from json to string");
return;
}
reported_data.append(",")
@@ -300,7 +322,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::Profil
index_id, model_id, tag_id);
rt_ret = rtProfilerTraceEx(index_id, model_id, tag_id, stream);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "[Call][rtProfilerTraceEx] failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][rtProfilerTraceEx]Failed, ret 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtProfilerTraceEx failed, ret 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
GELOGD("Profiling Step Info TraceTask execute async success, index_id = %lu, model_id = %lu, tag_id = %u",
@@ -314,7 +337,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::Profil
uint32_t stream_id = 0;
rt_ret = rtGetTaskIdAndStreamID(&task_id, &stream_id);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "[Get][RtsInfo] task_id and stream_id failed, ret: 0x%X.", rt_ret);
GELOGE(RT_FAILED, "[Get][RtsInfo]Task_id and stream_id failed, ret 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Get task_id and stream_id failed, ret 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
GELOGD("Get profiling args, task_id[%u], stream_id[%u]", task_id, stream_id);
@@ -333,8 +357,13 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::Profil
reported_data = step_info.dump(kInteval, ' ', false, Json::error_handler_t::ignore);
} catch (std::exception &e) {
GELOGE(FAILED, "Failed to convert JSON to string, reason: %s.", e.what());
GELOGE(FAILED, "[Convert][ReportedData]Failed to convert from json to string, reason: %s",
e.what());
REPORT_CALL_ERROR("E19999", "Failed to convert reported data from json to string, reason: %s",
e.what());
} catch (...) {
GELOGE(FAILED, "Failed to convert JSON to string.");
GELOGE(FAILED, "[Convert][ReportedData]Failed to convert from json to string");
REPORT_CALL_ERROR("E19999", "Failed to convert reported data from json to string");
}
reported_data.append(",")
.append("\n");
@@ -390,7 +419,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ReportPr
int32_t logic_device_id = 0;
rtError_t rt_ret = rtGetDevice(&logic_device_id);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(rt_ret, "runtime get logic_device_id failed, current logic_device_id:%d", logic_device_id);
GELOGE(rt_ret, "[Get][LogicDeviceId]Failed, ret 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Get logic device id failed, ret 0x%X", rt_ret);
return;
}
GELOGD("current logic_device_id:%d", logic_device_id);
@@ -452,7 +482,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfMo
// register Framework to profiling
int32_t cb_ret = PluginInit();
if (cb_ret != 0) {
GELOGE(cb_ret, "profiling plugin init failed, ret:%d", cb_ret);
GELOGE(cb_ret, "[Init][ProfilingPlugin]Failed, ret %d", cb_ret);
REPORT_CALL_ERROR("E19999", "Init profiling plugin failed, ret %d", cb_ret);
return cb_ret;
}
GELOGI("Prof subscribe: model load profiling on.");
@@ -465,7 +496,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfMo
device[0] = davinci_model->GetDeviceId();
rtError_t rt_ret = rtProfilerStart(module, device_num, device);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(FAILED, "Runtime profiler start failed.");
GELOGE(FAILED, "[Start][Profiler]Malloc buffer failed, ret 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Malloc buffer failed when start profiling, ret 0x%X", rt_ret);
return FAILED;
}
UpdateSubscribeDeviceModuleMap(kProfModelSubscribe, device[0], module);
@@ -473,7 +505,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfMo
// Report profiling data
Status p_ret = davinci_model->ReportProfilingData();
if (p_ret != SUCCESS) {
GELOGE(p_ret, "Report profiling data failed.");
GELOGE(p_ret, "[Report][ProfilingData]Failed, ret %u", p_ret);
REPORT_CALL_ERROR("E19999", "Report profiling data failed, ret %u", p_ret);
return p_ret;
}
#endif
@@ -499,13 +532,17 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfMo
// The same device_id, only stop at last time
rtError_t rt_ret = rtProfilerStop(subs_dev_module_[device[0]].module, dev_num, device);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(FAILED, "Runtime profiler stop failed.");
GELOGE(FAILED, "[Stop][Profiler]Malloc buffer Failed, ret %d", rt_ret);
REPORT_CALL_ERROR("E19999", "Malloc buffer failed when stop profiling, ret %d", rt_ret);
return FAILED;
}
}
UpdateSubscribeDeviceModuleMap(kProfModelUnsubscribe, device[0], subs_dev_module_[device[0]].module);
} else {
GELOGE(FAILED, "The device_id:%u has not been subscribed, do not need to cancel.", device[0]);
GELOGE(FAILED, "[Cancel][DeviceId]The device_id %u has not been subscribed, "
"do not need to cancel", device[0]);
REPORT_CALL_ERROR("E19999", "The device_id %u has not been subscribed, do not need to cancel",
device[0]);
return FAILED;
}

@@ -527,14 +564,16 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfIn
// register Framework to profiling
int32_t cb_ret = PluginInit();
if (cb_ret != 0) {
GELOGE(cb_ret, "profiling plugin init failed, ret:%d", cb_ret);
GELOGE(cb_ret, "[Init][ProfilingPlugin]Failed, ret %d", cb_ret);
REPORT_CALL_ERROR("E19999", "Init profiling plugin failed, ret %d", cb_ret);
return cb_ret;
}

int32_t device_num = -1;
rtError_t rt_ret = rtProfilerStart(model_load_mask, device_num, nullptr);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(FAILED, "Runtime profiler start failed.");
GELOGE(FAILED, "[Start][Profiler]Malloc buffer failed, ret 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Malloc buffer failed when start profiling, ret 0x%X", rt_ret);
return FAILED;
}
is_load_profiling_ = true;
@@ -563,7 +602,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfFi
int32_t dev_num = -1;
rtError_t rt_ret = rtProfilerStop(PROF_MODEL_LOAD_MASK, dev_num, nullptr);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(FAILED, "Runtime profiler stop failed.");
GELOGE(FAILED, "[Stop][Profiler]Malloc buffer failed, ret 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Malloc buffer failed when stop profiling, ret 0x%X", rt_ret);
return FAILED;
}
for (auto device_id_module : device_id_module_map_) {
@@ -572,7 +612,9 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfFi
GELOGI("Prof finalize: device_id: %u, module: 0x%lx.", device_id, device_id_module.second);
rt_ret = rtProfilerStop(device_id_module.second, 1, &device_id);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(FAILED, "Runtime profiler stop failed.");
GELOGE(FAILED, "[Stop][Profiler]Failed, device_id %d, ret 0x%X", device_id, rt_ret);
REPORT_CALL_ERROR("E19999", "Stop runtime profiler failed, device_id %d, ret 0x%X",
device_id,rt_ret);
return FAILED;
}
}
@@ -611,18 +653,26 @@ Status ProfilingManager::ProfParseDeviceId(const std::map<std::string, std::stri
int32_t dev_id = std::stoi(decvice_id[i]);
device_list.push_back(dev_id);
} catch (std::invalid_argument &) {
GELOGE(FAILED, "Device id: %s is invalid.", decvice_id[i].c_str());
GELOGE(FAILED, "[Parse][DeviceId]Failed, it is invalid, %s", decvice_id[i].c_str());
REPORT_CALL_ERROR("E19999", "Parse device id %s failed, it is invalid",
decvice_id[i].c_str());
return FAILED;
} catch (std::out_of_range &) {
GELOGE(FAILED, "Device id: %s is out of range.", decvice_id[i].c_str());
GELOGE(FAILED, "[Parse][DeviceId]Failed, it is out of range, %s", decvice_id[i].c_str());
REPORT_CALL_ERROR("E19999", "Parse device id %s failed, it is out of range",
decvice_id[i].c_str());
return FAILED;
} catch (...) {
GELOGE(FAILED, "Device id: %s cannot change to int.", decvice_id[i].c_str());
GELOGE(FAILED, "[Parse][DeviceId]Faield, it cannot change to int, %s",
decvice_id[i].c_str());
REPORT_CALL_ERROR("E19999", "Parse device id %s failed, it cannot change to int",
decvice_id[i].c_str());
return FAILED;
}
}
} else {
GELOGE(FAILED, "Config para not contain device id list.");
GELOGE(FAILED, "[Parse][DeviceId]Config para not contain device id list");
REPORT_CALL_ERROR("E19999", "Parse device id failed, config para not contain device id list");
return FAILED;
}
#endif
@@ -638,27 +688,41 @@ Status ProfilingManager::ProfParseParam(const std::map<std::string, std::string>
try {
device_num = std::stoi(iter->second);
} catch (std::invalid_argument &) {
GELOGE(FAILED, "Device nun: %s is invalid.", iter->second.c_str());
GELOGE(FAILED, "[Parse][Param]Failed, device num %s is invalid", iter->second.c_str());
REPORT_CALL_ERROR("E19999", "Parse param failed, device num %s is invalid",
iter->second.c_str());
return FAILED;
} catch (std::out_of_range &) {
GELOGE(FAILED, "Device num: %s is out of range.", iter->second.c_str());
GELOGE(FAILED, "[Parse][Param]Failed, device num %s cannot change to int",
iter->second.c_str());
REPORT_CALL_ERROR("E19999", "Parse param failed, device num %s cannot change to int",
iter->second.c_str());
return FAILED;
} catch (...) {
GELOGE(FAILED, "Device num: %s cannot change to int.", iter->second.c_str());
GELOGE(FAILED, "[Parse][Param]Failed, device num %s cannot change to int",
iter->second.c_str());
REPORT_CALL_ERROR("E19999", "Parse param failed, device num %s cannot change to int",
iter->second.c_str());
return FAILED;
}
} else {
GELOGE(FAILED, "Config para not contain device num.");
GELOGE(FAILED, "[Parse][Param]Config para not contain device num %s", iter->second.c_str());
REPORT_CALL_ERROR("E19999", "Parse param failed, config para not contain device num %s",
iter->second.c_str());
return FAILED;
}
// device id
if (ProfParseDeviceId(config_para, device_list) != SUCCESS) {
GELOGE(FAILED, "Parse config para device id failed.");
GELOGE(FAILED, "[Parse][DeviceId]Failed");
REPORT_CALL_ERROR("E19999", "Parse device id failed");
return FAILED;
}

if (device_num == 0 || device_num > kMaxDeviceNum || device_num != static_cast<int32_t>(device_list.size())) {
GELOGE(FAILED, "Config para device num: %d not equal to device list size: %zu.", device_num, device_list.size());
GELOGE(FAILED, "[Parse][Param]Failed, config para device num %d not equal to "
"device list size %zu", device_num, device_list.size());
REPORT_INNER_ERROR("E19999", "[Parse][Param]Failed, config para device num %d "
"not equal to device list size %zu", device_num, device_list.size());
return FAILED;
}
#endif
@@ -676,13 +740,19 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfSt
int32_t device_num = 0;
vector<int32_t> device_list;
if (ProfParseParam(config_para, device_num, device_list) != SUCCESS) {
GELOGE(FAILED, "Prof start parse param failed.");
GELOGE(FAILED, "[Parse][Param]Prof start parse param failed, device num %d, "
"device list size %zu", device_num, device_list.size());
REPORT_CALL_ERROR("E19999", "Prof start parse param failed, device num %d, "
"device list size %zu", device_num, device_list.size());
return FAILED;
}

auto device_id_ptr = std::unique_ptr<uint32_t[]>(new (std::nothrow) uint32_t[device_num]);
if (device_id_ptr == nullptr) {
GELOGE(FAILED, "Prof start: device id ptr is null.");
GELOGE(FAILED, "[Start][Profiling]Malloc buffer failed when start profiling, device num %d",
device_num);
REPORT_CALL_ERROR("E19999", "Malloc buffer failed when start profiling, device num %d",
device_num);
return FAILED;
}
for (int32_t i = 0; i < device_num; i++) {
@@ -692,7 +762,10 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfSt

rtError_t rt_ret = rtProfilerStart(module, device_num, device_id_ptr.get());
if (rt_ret != RT_ERROR_NONE) {
GELOGE(FAILED, "Runtime profiler config proc failed.");
GELOGE(FAILED, "[Start][Profiler]Runtime profiler config proc failed, config param 0x%lx, "
"device num %d, ret 0x%X", module, device_num, rt_ret);
REPORT_CALL_ERROR("E19999", "Runtime profiler config proc failed, config param 0x%lx, "
"device num %d, ret 0x%X", module, device_num, rt_ret);
return FAILED;
}
if ((module & PROF_MODEL_EXECUTE_MASK) == PROF_MODEL_EXECUTE_MASK) {
@@ -719,12 +792,18 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfSt
int32_t device_num = 0;
vector<int32_t> device_list;
if (ProfParseParam(config_para, device_num, device_list) != SUCCESS) {
GELOGE(FAILED, "Prof stop parse param failed.");
GELOGE(FAILED, "[Stop][Profiling]Prof stop parse param failed, device num %d, "
"device list size %zu", device_num, device_list.size());
REPORT_CALL_ERROR("E19999", "Prof stop parse param failed, device num %d, device list size %zu",
device_num, device_list.size());
return FAILED;
}
auto device_id_ptr = std::unique_ptr<uint32_t[]>(new (std::nothrow) uint32_t[device_num]);
if (device_id_ptr == nullptr) {
GELOGE(FAILED, "Prof stop: device id ptr is null.");
GELOGE(FAILED, "[Stop][Profiling]Malloc buffer failed when stop profiling, device num %d",
device_num);
REPORT_CALL_ERROR("E19999", "Malloc buffer failed when stop profiling, device num %d",
device_num);
return FAILED;
}
for (int32_t i = 0; i < device_num; i++) {
@@ -733,7 +812,10 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfSt
GELOGI("Prof stop: runtime config param: 0x%lx, device num: %d", module, device_num);
rtError_t rt_ret = rtProfilerStop(module, device_num, device_id_ptr.get());
if (rt_ret != RT_ERROR_NONE) {
GELOGE(FAILED, "Prof stop: runtime profiler config proc failed.");
GELOGE(FAILED, "[Stop][Profiler]Runtime profiler config proc failed, config param 0x%lx, "
"device num: %d, ret 0x%X", module, device_num, rt_ret);
REPORT_CALL_ERROR("E19999", "Runtime profiler config proc failed, config param 0x%lx, "
"device num %d, ret 0x%X", module, device_num, rt_ret);
return FAILED;
}
uint64_t execute_model_mask = module & PROF_MODEL_EXECUTE_MASK;
@@ -790,7 +872,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ProfilingManager::Profilin
int32_t logic_device_id = 0;
rtError_t rt_ret = rtGetDevice(&logic_device_id);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(rt_ret, "Runtime get logic_device_id failed, current logic_device_id:%d", logic_device_id);
GELOGE(rt_ret, "[Get][LogicDeviceId]Failed, ret 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Get logic device id failed, ret 0x%X", rt_ret);
}
GELOGI("Current logic_device_id:%d", logic_device_id);

@@ -805,7 +888,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ProfilingManager::Profilin

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::PluginInit() {
if (prof_cb_.msprofReporterCallback == nullptr) {
GELOGE(ge::PARAM_INVALID, "MsprofReporterCallback callback is nullptr.");
GELOGE(ge::PARAM_INVALID, "[Check][Param]MsprofReporterCallback callback is nullptr");
REPORT_INNER_ERROR("E19999", "MsprofReporterCallback callback is nullptr");
return ge::PARAM_INVALID;
}
int32_t cb_ret = prof_cb_.msprofReporterCallback(
@@ -813,8 +897,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::Plugin
static_cast<uint32_t>(MsprofReporterCallbackType::MSPROF_REPORTER_INIT),
nullptr, 0);
if (cb_ret != MSPROF_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Profiling reporter init failed, ret = %d.", cb_ret);
GELOGE(INTERNAL_ERROR, "[Init][ProfilingReporter] profiling init failed, ret = %d.", cb_ret);
REPORT_CALL_ERROR("E19999", "Profiling reporter init failed, ret 0x%X", cb_ret);
GELOGE(INTERNAL_ERROR, "[Init][ProfilingReporter]Failed, ret 0x%X", cb_ret);
return INTERNAL_ERROR;
}

@@ -823,8 +907,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::Plugin
static_cast<uint32_t>(MsprofReporterCallbackType::MSPROF_REPORTER_DATA_MAX_LEN),
&reporter_max_len_, sizeof(uint32_t));
if (cb_ret != MSPROF_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Get profiling reporter data max len failed, ret = %d.", cb_ret);
GELOGE(INTERNAL_ERROR, "[Init][ProfilingReporter] Get profiling reporter data max len failed, ret = %d.", cb_ret);
REPORT_CALL_ERROR("E19999", "Get profiling reporter data max len failed, ret 0x%X", cb_ret);
GELOGE(INTERNAL_ERROR, "[Get][ProfilingDataMaxLen]Failed, ret 0x%X", cb_ret);
return INTERNAL_ERROR;
}

@@ -834,7 +918,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::Plugin
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::PluginUnInit() const {
#ifdef DAVINCI_SUPPORT_PROFILING
if (prof_cb_.msprofReporterCallback == nullptr) {
GELOGE(ge::PARAM_INVALID, "MsprofReporterCallback callback is nullptr.");
GELOGE(ge::PARAM_INVALID, "[Check][Param]MsprofReporterCallback callback is nullptr");
REPORT_INNER_ERROR("E19999", "MsprofReporterCallback callback is nullptr");
return;
}
int32_t cb_ret = prof_cb_.msprofReporterCallback(
@@ -850,7 +935,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::PluginUn
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::CallMsprofReport(
ReporterData &reporter_data) const {
if (prof_cb_.msprofReporterCallback == nullptr) {
GELOGE(ge::PARAM_INVALID, "MsprofReporterCallback callback is nullptr.");
GELOGE(ge::PARAM_INVALID, "[Check][Param]MsprofReporterCallback callback is nullptr");
REPORT_INNER_ERROR("E19999", "MsprofReporterCallback callback is nullptr");
return ge::PARAM_INVALID;
}
return prof_cb_.msprofReporterCallback(
@@ -946,5 +1032,4 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::GetFpBpP
return;
}


} // namespace ge

+ 2
- 0
ge/common/profiling/profiling_manager.h View File

@@ -81,7 +81,9 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager {
Status ProfModelUnsubscribe(void *model);
void StopProfiling();
bool ProfilingTrainingTraceOn() const { return is_training_trace_; }
// report model load profiling data flag, data contain task desc info, step info, model load fusion op info
bool ProfilingModelLoadOn() const { return is_load_profiling_; }
// report model execute profiling data flag, data contain model execute time info
bool ProfilingModelExecuteOn() const;
// is_execute_profiling_ only used by ge option and env
bool ProfilingOn() const { return is_load_profiling_ && is_execute_profiling_; }


+ 9
- 4
ge/common/properties_manager.cc View File

@@ -69,7 +69,8 @@ bool PropertiesManager::LoadFileContent(const std::string &file_path) {
std::ifstream fs(resolved_file_path, std::ifstream::in);

if (!fs.is_open()) {
GELOGE(PARAM_INVALID, "Open %s failed.", file_path.c_str());
GELOGE(PARAM_INVALID, "[Open][File]Failed, file path %s invalid", file_path.c_str());
REPORT_CALL_ERROR("E19999", "Open file failed, path %s invalid", file_path.c_str());
return false;
}

@@ -77,7 +78,8 @@ bool PropertiesManager::LoadFileContent(const std::string &file_path) {

while (getline(fs, line)) { // line not with \n
if (!ParseLine(line)) {
GELOGE(PARAM_INVALID, "Parse line failed. content is [%s].", line.c_str());
GELOGE(PARAM_INVALID, "[Parse][Line]Failed, content is %s", line.c_str());
REPORT_CALL_ERROR("E19999", "Parse line failed, content is %s", line.c_str());
fs.close();
return false;
}
@@ -100,15 +102,18 @@ bool PropertiesManager::ParseLine(const std::string &line) {
if (!temp.empty()) {
std::string::size_type pos = temp.find_first_of(delimiter);
if (pos == std::string::npos) {
GELOGE(PARAM_INVALID, "Incorrect line [%s], it must include [%s].Perhaps you use illegal chinese symbol",
GELOGE(PARAM_INVALID, "[Check][Param]Incorrect line %s, it must include %s",
line.c_str(), delimiter.c_str());
REPORT_CALL_ERROR("E19999", "Incorrect line %s, it must include %s",
line.c_str(), delimiter.c_str());
return false;
}

std::string map_key = Trim(temp.substr(0, pos));
std::string value = Trim(temp.substr(pos + 1));
if (map_key.empty() || value.empty()) {
GELOGE(PARAM_INVALID, "Map_key or value empty. %s", line.c_str());
GELOGE(PARAM_INVALID, "[Check][Param]Map_key or value empty, line %s", line.c_str());
REPORT_CALL_ERROR("E19999", "Map_key or value empty, line %s", line.c_str());
return false;
}



+ 44
- 26
ge/common/util.cc View File

@@ -83,7 +83,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadProtoFromBinaryFile(co
std::ifstream fs(real_path, std::ifstream::in | std::ifstream::binary);
if (!fs.is_open()) {
ErrorManager::GetInstance().ATCReportErrMessage("E19001", {"file", "errmsg"}, {file, "ifstream is_open failed"});
GELOGE(ge::FAILED, "Open real path[%s] failed.", file);
GELOGE(ge::FAILED, "[Open][File]Failed, file path %s", file);
return false;
}

@@ -96,7 +96,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadProtoFromBinaryFile(co

if (!ret) {
ErrorManager::GetInstance().ATCReportErrMessage("E19005", {"file"}, {file});
GELOGE(ge::FAILED, "Parse file[%s] failed.", file);
GELOGE(ge::FAILED, "[Parse][File]Failed, file %s", file);
return ret;
}

@@ -155,7 +155,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadBytesFromBinaryFile(co

std::ifstream file(real_path.c_str(), std::ios::binary | std::ios::ate);
if (!file.is_open()) {
GELOGE(ge::FAILED, "Read file %s failed.", file_name);
GELOGE(ge::FAILED, "[Read][File]Failed, file %s", file_name);
REPORT_CALL_ERROR("E19999", "Read file %s failed", file_name);
return false;
}

@@ -182,7 +183,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadBytesFromBinaryFile(co

std::ifstream file(real_path.c_str(), std::ios::binary | std::ios::ate);
if (!file.is_open()) {
GELOGE(ge::FAILED, "Read file %s failed.", file_name);
GELOGE(ge::FAILED, "[Read][File]Failed, file %s", file_name);
REPORT_CALL_ERROR("E19999", "Read file %s failed", file_name);
return false;
}

@@ -250,7 +252,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::string CurrentTimeInStr()
std::time_t now = std::time(nullptr);
std::tm *ptm = std::localtime(&now);
if (ptm == nullptr) {
GELOGE(ge::FAILED, "Localtime failed.");
GELOGE(ge::FAILED, "[Check][Param]Localtime incorrect, errmsg %s", strerror(errno));
REPORT_CALL_ERROR("E19999", "Localtime incorrect, errmsg %s", strerror(errno));
return "";
}

@@ -277,18 +280,16 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadProtoFromText(const ch

if (!fs.is_open()) {
ErrorManager::GetInstance().ATCReportErrMessage("E19017", {"realpth", "protofile"}, {real_path, file});
GELOGE(ge::FAILED, "Fail to open proto file real path is '%s' when orginal file path is '%s'.", real_path.c_str(),
file);
GELOGE(ge::FAILED, "[Open][ProtoFile]Failed, real path %s, orginal file path %s",
real_path.c_str(), file);
return false;
}

google::protobuf::io::IstreamInputStream input(&fs);
bool ret = google::protobuf::TextFormat::Parse(&input, message);
GE_IF_BOOL_EXEC(!ret, ErrorManager::GetInstance().ATCReportErrMessage("E19018", {"protofile"}, {file});
GELOGE(ret,
"Parse file[%s] through [google::protobuf::TextFormat::Parse] failed, "
"please check whether the file is a valid protobuf format file.",
file));
GELOGE(ret, "[Parse][File]Through [google::protobuf::TextFormat::Parse] failed, "
"file %s", file));
fs.close();

return ret;
@@ -490,7 +491,8 @@ FMK_FUNC_HOST_VISIBILITY bool ValidateStr(const std::string &str, const std::str
ret = regexec(&reg, str.c_str(), 0, NULL, 0);
if (ret) {
regerror(ret, &reg, ebuff, kMaxBuffSize);
GELOGE(ge::PARAM_INVALID, "regexec failed, reason: %s", ebuff);
GELOGE(ge::PARAM_INVALID, "[Rgexec][Param]Failed, reason %s", ebuff);
REPORT_CALL_ERROR("E19999", "Rgexec failed, reason %s", ebuff);
regfree(&reg);
return false;
}
@@ -518,35 +520,44 @@ FMK_FUNC_HOST_VISIBILITY bool ValidateStr(const std::string &str, const std::str

FMK_FUNC_HOST_VISIBILITY bool IsValidFile(const char *file_path) {
if (file_path == nullptr) {
GELOGE(PARAM_INVALID, "Config path is null.");
GELOGE(PARAM_INVALID, "[Check][Param]Config path is null");
REPORT_INNER_ERROR("E19999", "Config path is null");
return false;
}
if (!CheckInputPathValid(file_path)) {
GELOGE(PARAM_INVALID, "Config path is invalid: %s", file_path);
GELOGE(PARAM_INVALID, "[Check][Param]Config path %s is invalid", file_path);
REPORT_CALL_ERROR("E19999", "Config path %s is invalid", file_path);
return false;
}
// Normalize the path
std::string resolved_file_path = RealPath(file_path);
if (resolved_file_path.empty()) {
GELOGE(PARAM_INVALID, "Invalid input file path [%s], make sure that the file path is correct.", file_path);
GELOGE(PARAM_INVALID, "[Check][Param]Invalid input file path %s, errmsg %s", file_path, strerror(errno));
REPORT_CALL_ERROR("E19999", "Invalid input file path %s, errmsg %s", file_path, strerror(errno));
return false;
}

mmStat_t stat = {0};
int32_t ret = mmStatGet(resolved_file_path.c_str(), &stat);
if (ret != EN_OK) {
GELOGE(PARAM_INVALID, "cannot get config file status, which path is %s, maybe not exist, return %d, errcode %d",
resolved_file_path.c_str(), ret, mmGetErrorCode());
GELOGE(PARAM_INVALID, "[Get][FileStatus]Failed, which path %s maybe not exist, "
"return %d, errcode %d", resolved_file_path.c_str(), ret, mmGetErrorCode());
REPORT_CALL_ERROR("E19999", "Get config file status failed, which path %s maybe not exist, "
"return %d, errcode %d", resolved_file_path.c_str(), ret, mmGetErrorCode());
return false;
}
if ((stat.st_mode & S_IFMT) != S_IFREG) {
GELOGE(PARAM_INVALID, "config file is not a common file, which path is %s, mode is %u", resolved_file_path.c_str(),
stat.st_mode);
GELOGE(PARAM_INVALID, "[Check][Param]Config file is not a common file, which path is %s, "
"mode is %u", resolved_file_path.c_str(), stat.st_mode);
REPORT_CALL_ERROR("E19999", "Config file is not a common file, which path is %s, "
"mode is %u", resolved_file_path.c_str(), stat.st_mode);
return false;
}
if (stat.st_size > kMaxConfigFileByte) {
GELOGE(PARAM_INVALID, "config file %s size[%ld] is larger than max config file Bytes[%u]",
resolved_file_path.c_str(), stat.st_size, kMaxConfigFileByte);
GELOGE(PARAM_INVALID, "[Check][Param]Config file %s size %ld is larger than max config "
"file Bytes %u", resolved_file_path.c_str(), stat.st_size, kMaxConfigFileByte);
REPORT_CALL_ERROR("E19999", "Config file %s size %ld is larger than max config file Bytes %u",
resolved_file_path.c_str(), stat.st_size, kMaxConfigFileByte);
return false;
}
return true;
@@ -554,29 +565,36 @@ FMK_FUNC_HOST_VISIBILITY bool IsValidFile(const char *file_path) {

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status CheckPath(const char *path, size_t length) {
if (path == nullptr) {
GELOGE(PARAM_INVALID, "Config path is invalid.");
GELOGE(PARAM_INVALID, "[Check][Param]Config path is invalid");
REPORT_CALL_ERROR("E19999", "Config path is invalid");
return PARAM_INVALID;
}

if (strlen(path) != length) {
GELOGE(PARAM_INVALID, "Path is invalid or length of config path is not equal to given length.");
GELOGE(PARAM_INVALID, "[Check][Param]Path %s is invalid or length %zu "
"not equal to given length %zu", path, strlen(path), length);
REPORT_CALL_ERROR("E19999", "Path %s is invalid or length %zu "
"not equal to given length %zu", path, strlen(path), length);
return PARAM_INVALID;
}

if (length == 0 || length > MMPA_MAX_PATH) {
GELOGE(PARAM_INVALID, "Length of config path is invalid.");
GELOGE(PARAM_INVALID, "[Check][Param]Length of config path %zu is invalid", length);
REPORT_INNER_ERROR("E19999", "Length of config path %zu is invalid", length);
return PARAM_INVALID;
}

INT32 is_dir = mmIsDir(path);
if (is_dir != EN_OK) {
GELOGE(PATH_INVALID, "Open directory %s failed, maybe it is not exit or not a dir. errmsg:%s",
GELOGE(PATH_INVALID, "[Open][Directory]Failed, directory path %s, errmsg %s",
path, strerror(errno));
REPORT_CALL_ERROR("E19999", "Open directory %s failed, errmsg %s", path, strerror(errno));
return PATH_INVALID;
}

if (mmAccess2(path, M_R_OK) != EN_OK) {
GELOGE(PATH_INVALID, "Read path[%s] failed, errmsg[%s]", path, strerror(errno));
GELOGE(PATH_INVALID, "[Read][Path]Failed, path %s, errmsg %s", path, strerror(errno));
REPORT_CALL_ERROR("E19999", "Read path %s failed, errmsg %s", path, strerror(errno));
return PATH_INVALID;
}
return SUCCESS;


+ 80
- 33
ge/engine_manager/dnnengine_manager.cc View File

@@ -71,13 +71,15 @@ Status DNNEngineManager::Initialize(const std::map<std::string, std::string> &op
std::vector<std::string> so_func{so_api_func};
Status status = plugin_mgr_.Load(path, so_func);
if (status != SUCCESS) {
GELOGE(status, "Load engine's so failed. LibPath is %s", path.c_str());
GELOGE(status, "[Load][EngineSo]Failed, lib path %s", path.c_str());
REPORT_CALL_ERROR("E19999", "Load engine so failed, lib path %s", path.c_str());
return status;
}

status = plugin_mgr_.InvokeAll<std::map<std::string, DNNEnginePtr> &>(so_api_func, engines_map_);
if (status != SUCCESS) {
GELOGE(status, "Get DNNEngineObjs failed.");
GELOGE(status, "[Get][DNNEngineObjs]Failed, so_api_func %s", so_api_func.c_str());
REPORT_CALL_ERROR("E19999", "Get DNNEngineObjs failed, so_api_func %s", so_api_func.c_str());
return status;
}

@@ -94,16 +96,21 @@ Status DNNEngineManager::Initialize(const std::map<std::string, std::string> &op

status = iter->second->Initialize(options);
if (status != SUCCESS) {
GELOGE(status, "Engine: %s initialize failed.", (iter->first).c_str());
GELOGE(status, "[Init][Engine]Failed, engine %s", (iter->first).c_str());
REPORT_CALL_ERROR("E19999", "Initialize engine %s failed", (iter->first).c_str());
return status;
}


// Check engines' attribute
DNNEngineAttribute attrs;
iter->second->GetAttributes(attrs);
if (attrs.runtime_type == RuntimeType::DEVICE) {
if ((attrs.mem_type.size()) != 1 || (attrs.mem_type[0] != GE_ENGINE_ATTR_MEM_TYPE_HBM)) {
GELOGE(GE_ENG_MEMTYPE_ERROR, "Engine: %s in aicore, but the memory type is not HBM", (iter->first).c_str());
GELOGE(GE_ENG_MEMTYPE_ERROR, "[Check][Param]Engine %s in aicore, but the memory type is "
"not HBM, mem_type_size %lu", (iter->first).c_str(), attrs.mem_type.size());
REPORT_INNER_ERROR("E19999", "Engine %s in aicore, but the memory type is not HBM, "
"mem_type_size %lu", (iter->first).c_str(), attrs.mem_type.size());
return GE_ENG_MEMTYPE_ERROR;
}
}
@@ -111,13 +118,13 @@ Status DNNEngineManager::Initialize(const std::map<std::string, std::string> &op

status = ParserJsonFile();
if (status != SUCCESS) {
GELOGE(status, "parse json file failed");
GELOGE(status, "[Parse][JsonFile]Failed");
return status;
}

status = CheckJsonFile();
if (status != SUCCESS) {
GELOGE(status, "check json file failed");
GELOGE(status, "[Check][JsonFile]Failed");
return status;
}

@@ -138,7 +145,8 @@ Status DNNEngineManager::Finalize() {
GELOGI("DNNEngine name: %s.", (iter->first).c_str());
Status status = iter->second->Finalize();
if (status != SUCCESS) {
GELOGE(status, "Engine finalize failed.");
GELOGE(status, "[Finalize][Engine]Failed, engine %s", (iter->first).c_str());
REPORT_CALL_ERROR("E19999", "Finalize engine %s failed", (iter->first).c_str());
return status;
}
}
@@ -188,7 +196,8 @@ std::string DNNEngineManager::GetDNNEngineName(const ge::NodePtr &node_ptr) {
// Use the OpsKernelManager in GELib to get the opInfos for this opCode
std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
if ((instance_ptr == nullptr) || (!instance_ptr->InitFlag())) {
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GetDNNEngineName failed.");
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Get][DNNEngineName]Failed, gelib not init before");
REPORT_INNER_ERROR("E19999", "Get DNNEngineName failed, gelib not init before");
return "";
}
OpsKernelManager &ops_kernel_manager = instance_ptr->OpsKernelManagerObj();
@@ -234,8 +243,9 @@ std::string DNNEngineManager::GetDNNEngineName(const ge::NodePtr &node_ptr) {
ErrorManager::GetInstance().ATCReportErrMessage("E13001", {"kernelname", "optype", "opname"},
{kernel_name, op_desc->GetType(), op_desc->GetName()});
GELOGE(FAILED,
"The custom operator registered by the user does not support the logic function delivered by this "
"network. Check support failed, kernel_name is %s, op type is %s, op name is %s",
"[Check][Param]The custom operator registered by the user does not support "
"the logic function delivered by this network, kernel_name %s, op type %s, "
"op name %s",
kernel_name.c_str(), op_desc->GetType().c_str(), op_desc->GetName().c_str());
std::string error_info = "The custom operator registered by the user does not support the logic function"
"delivered by this network";
@@ -262,7 +272,8 @@ std::string DNNEngineManager::GetDNNEngineName(const ge::NodePtr &node_ptr) {
reason += it.first + ":" + it.second + ";";
ErrorManager::GetInstance().ATCReportErrMessage(
"E13002", {"optype", "opskernel", "reason"}, {op_desc->GetType(), it.first, it.second});
GELOGE(GE_GRAPH_ASSIGN_ENGINE_FAILED, "GetDNNEngineName:Op type %s of ops kernel %s is unsupported, reason:%s",
GELOGE(GE_GRAPH_ASSIGN_ENGINE_FAILED, "[Check][OpSupported]Op type %s of ops kernel %s "
"is unsupported, reason %s",
op_desc->GetType().c_str(), it.first.c_str(), it.second.c_str());
}

@@ -273,7 +284,8 @@ std::string DNNEngineManager::GetDNNEngineName(const ge::NodePtr &node_ptr) {

ErrorManager::GetInstance().ATCReportErrMessage(
"E13003", {"opname", "optype"}, {op_desc->GetName(), op_desc->GetType()});
GELOGE(GE_GRAPH_ASSIGN_ENGINE_FAILED, "Can't find any supported ops kernel and engine of %s, type is %s",
GELOGE(GE_GRAPH_ASSIGN_ENGINE_FAILED, "[Get][DNNEngineName]Can't find any supported ops kernel "
"and engine of %s, type is %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return "";
}
@@ -289,8 +301,10 @@ std::string DNNEngineManager::GetHostCpuEngineName(const std::vector<OpInfo> &op
return kHostCpuEngineName;
}
}
GELOGE(FAILED, "DNNEngineManager: HostCpuEngine not support [%s, %s].",
GELOGE(FAILED, "[Get][HostCpuEngineName]Failed, HostCpuEngine not support [%s, %s]",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
REPORT_INNER_ERROR("E19999", "Get HostCpuEngineName failed, HostCpuEngine not support [%s, %s]",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return "";
}

@@ -304,7 +318,8 @@ Status DNNEngineManager::ParserJsonFile() {
nlohmann::json scheduler_json_file;
Status status = ReadJsonFile(path, &scheduler_json_file);
if (status != SUCCESS) {
GELOGE(FAILED, "Read scheduler json file failed and the file path is %s", path.c_str());
GELOGE(FAILED, "[Read][JsonFile]Failed, file %s", path.c_str());
REPORT_CALL_ERROR("E19999", "Read json file %s failed", path.c_str());
return FAILED;
}
if (scheduler_json_file.is_null()) {
@@ -316,11 +331,15 @@ Status DNNEngineManager::ParserJsonFile() {
try {
nlohmann::json scheduler_utils_json = scheduler_json_file[kSchedulerUnits];
if (scheduler_utils_json.is_null()) {
GELOGE(FAILED, "The message of scheduler units is not found");
GELOGE(FAILED, "[Check[Param]Find scheduler units failed, the message is null, file %s", path.c_str());
REPORT_INNER_ERROR("E19999", "Find scheduler units failed, the message is null, file %s", path.c_str());
return FAILED;
}
if (!scheduler_utils_json.is_array()) {
GELOGE(FAILED, "The message of kSchedulerUnits is not array and the file path is %s", json_file_path.c_str());
GELOGE(FAILED, "[Check][Param]The message of kSchedulerUnits is not array and "
"the file path is %s", path.c_str());
REPORT_INNER_ERROR("E19999", "The message of kSchedulerUnits is not array and "
"the file path is %s", path.c_str());
return FAILED;
}
auto size = scheduler_json_file[kSchedulerUnits].size();
@@ -329,19 +348,23 @@ Status DNNEngineManager::ParserJsonFile() {
std::map<std::string, EngineConfPtr> engine_conf_map;
nlohmann::json engines_json_map = scheduler_utils_json[i][kCalEngines];
if (engines_json_map.is_null()) {
GELOGE(FAILED, "The message of cal_engines is not found");
GELOGE(FAILED, "[Check][Param]The message of cal_engines is null, file %s", path.c_str());
REPORT_INNER_ERROR("E19999", "The message of cal_engines is null, file %s", path.c_str());
return FAILED;
}
std::string scheduler_id_temp = scheduler_utils_json[i][kId];
if (!scheduler_id_temp.empty()) {
scheduler_conf.id = scheduler_id_temp;
} else {
GELOGE(FAILED, "Scheduler ID is null");
GELOGE(FAILED, "[Check][Param]Scheduler ID is null, file %s", path.c_str());
REPORT_INNER_ERROR("E19999", "Scheduler ID is null, file %s", path.c_str());
return FAILED;
}
status = ParserEngineMessage(engines_json_map, scheduler_id_temp, engine_conf_map);
if (status != SUCCESS) {
GELOGE(FAILED, "Parser engines messages failed");
GELOGE(FAILED, "[Parse][EngineMessage]Failed, scheduler_id_temp %s", scheduler_id_temp.c_str());
REPORT_CALL_ERROR("E19999", "Parse engine message failed, scheduler_id_temp %s",
scheduler_id_temp.c_str());
return FAILED;
}
scheduler_conf.name = scheduler_utils_json[i][kName];
@@ -349,13 +372,17 @@ Status DNNEngineManager::ParserJsonFile() {
scheduler_conf.cal_engines = engine_conf_map;
auto it = schedulers_.find(scheduler_id_temp);
if (it != schedulers_.end()) {
GELOGE(FAILED, "There are the same scheduler ts %s in the json file", scheduler_id_temp.c_str());
GELOGE(FAILED, "[Check][Param]There are the same scheduler ts %s in the json file",
scheduler_id_temp.c_str());
REPORT_INNER_ERROR("E19999", "[Check][Param]There are the same scheduler ts %s "
"in the json file", scheduler_id_temp.c_str());
return FAILED;
}
schedulers_.emplace(scheduler_id_temp, scheduler_conf);
}
} catch (const nlohmann::detail::type_error &e) {
GELOGE(FAILED, "Parser json file failed");
GELOGE(FAILED, "[Parse][JsonFile]Failed, file %s, reason %s", path.c_str(), e.what());
REPORT_CALL_ERROR("E19999", "Parse json file %s failed, reason %s", path.c_str(), e.what());
return FAILED;
}

@@ -367,7 +394,8 @@ Status DNNEngineManager::ParserEngineMessage(const json engines_json, const std:
std::map<std::string, EngineConfPtr> &engines) {
GELOGI("Begin to parser engine massage");
if (engines_json.is_null()) {
GELOGE(FAILED, "The message of cal_engines is null");
GELOGE(FAILED, "[Check][Param]The message of cal_engines is null");
REPORT_INNER_ERROR("E19999", "The message of cal_engines is null");
return FAILED;
}
try {
@@ -382,7 +410,8 @@ Status DNNEngineManager::ParserEngineMessage(const json engines_json, const std:
if (!engine_id.empty()) {
engine_conf_ptr->id = engine_id;
} else {
GELOGE(FAILED, "engineID is null");
GELOGE(FAILED, "[Check][Param]Engine ID is null");
REPORT_INNER_ERROR("E19999", "Engine ID is null");
return FAILED;
}
if (engines_elems.find(kName) != engines_elems.end()) {
@@ -404,17 +433,22 @@ Status DNNEngineManager::ParserEngineMessage(const json engines_json, const std:
engine_conf_ptr->scheduler_id = scheduler_mark;
auto it = engines.find(engine_id);
if (it != engines.end()) {
GELOGE(FAILED, "There are the same engine %s message in the json file", engine_id.c_str());
GELOGE(FAILED, "[Check][Param]There are the same engine %s message in the json file",
engine_id.c_str());
REPORT_INNER_ERROR("E19999", "There are the same engine %s message in the json file",
engine_id.c_str());
return FAILED;
}
engines.emplace(engine_id, engine_conf_ptr);
}
} else {
GELOGE(FAILED, "The message of cal_engines is not array in the json file");
GELOGE(FAILED, "[Check][Param]The message of cal_engines is not array in the json file");
REPORT_INNER_ERROR("E19999", "The message of cal_engines is not array in the json file");
return FAILED;
}
} catch (const json::exception &e) {
GELOGE(FAILED, "construct json content failed");
GELOGE(FAILED, "[Construct][JsonContent]Failed, reason %s", e.what());
REPORT_INNER_ERROR("E19999", "Construct json content failed, reason %s", e.what());
return FAILED;
}
GELOGI("Parser engine massage success");
@@ -424,18 +458,23 @@ Status DNNEngineManager::ParserEngineMessage(const json engines_json, const std:
Status DNNEngineManager::ReadJsonFile(const std::string &file_path, JsonHandle handle) {
GELOGD("Begin to read json file");
if (file_path.empty()) {
GELOGE(FAILED, "Json path %s is not valid", file_path.c_str());
GELOGE(FAILED, "[Check][Param]Json path is empty");
REPORT_INNER_ERROR("E19999", "Json path is empty");
return FAILED;
}
nlohmann::json *json_file = reinterpret_cast<nlohmann::json *>(handle);
if (json_file == nullptr) {
GELOGE(FAILED, "JsonFile is nullptr");
GELOGE(FAILED, "[Check][Param]Json file is nullptr");
REPORT_CALL_ERROR("E19999", "Json file is nullptr");
return FAILED;
}
const char *file = file_path.data();
if ((mmAccess2(file, M_F_OK)) != EN_OK) {
if (engines_map_.size() != 0) {
GELOGE(FAILED, "The json file %s is not exist, errmsg:%s", file_path.c_str(), strerror(errno));
GELOGE(FAILED, "[Check][Param]The json file %s not exists, err %s",
file_path.c_str(), strerror(errno));
REPORT_CALL_ERROR("E19999", "Json file %s not exists, err %s",
file_path.c_str(), strerror(errno));
return FAILED;
} else {
GELOGW("The json file %s is not needed.", file_path.c_str());
@@ -445,14 +484,16 @@ Status DNNEngineManager::ReadJsonFile(const std::string &file_path, JsonHandle h

std::ifstream ifs(file_path);
if (!ifs.is_open()) {
GELOGE(FAILED, "Open json file %s failed", file_path.c_str());
GELOGE(FAILED, "[Open][JsonFile]Failed, file %s", file_path.c_str());
REPORT_CALL_ERROR("E19999", "Open json file %s failed", file_path.c_str());
return FAILED;
}

try {
ifs >> *json_file;
} catch (const json::exception &e) {
GELOGE(FAILED, "Read json file failed");
GELOGE(FAILED, "[Read][JsonFile]Failed, reason %s", e.what());
REPORT_CALL_ERROR("E19999", "Read json file failed, reason %s", e.what());
ifs.close();
return FAILED;
}
@@ -474,11 +515,17 @@ Status DNNEngineManager::CheckJsonFile() {
}
}
if (count == 0) {
GELOGE(FAILED, "The engine message %s is not found in the json file", engine_name.c_str());
GELOGE(FAILED, "[Check][JsonFile]The engine message %s is not found in the json file",
engine_name.c_str());
REPORT_INNER_ERROR("E19999", "The engine message %s is not found in the json file",
engine_name.c_str());
return FAILED;
}
if (count > 1) {
GELOGE(FAILED, "The same engine message %s is existed in the json file", engine_name.c_str());
GELOGE(FAILED, "[Check][JsonFile]The same engine message %s exists in the json file",
engine_name.c_str());
REPORT_INNER_ERROR("E19999", "The same engine message %s exists in the json file",
engine_name.c_str());
return FAILED;
}
}


+ 7
- 0
ge/executor/CMakeLists.txt View File

@@ -19,6 +19,7 @@ set(SRC_LIST
"../common/dump/exception_dumper.cc"
"../common/dump/dump_manager.cc"
"../common/dump/dump_op.cc"
"../common/dump/dump_server.cc"
"../common/dump/opdebug_register.cc"
"../common/profiling/ge_profiling.cc"
"../graph/load/graph_loader.cc"
@@ -27,6 +28,8 @@ set(SRC_LIST
"../graph/manager/graph_var_manager.cc"
"../graph/manager/graph_mem_allocator.cc"
"../graph/manager/graph_caching_allocator.cc"
"../graph/manager/session_scope_mem_allocator.cc"
"../graph/manager/graph_mem_manager.cc"
"../graph/manager/trans_var_data_utils.cc"
"../graph/manager/util/debug.cc"
"../graph/manager/rdma_pool_allocator.cc"
@@ -110,6 +113,8 @@ set(SRC_LIST
"../hybrid/node_executor/controlop/control_op_executor.cc"
"../hybrid/node_executor/partitioned_call/partitioned_call_node_executor.cc"
"../hybrid/node_executor/rts/rts_node_executor.cc"
"../hybrid/node_executor/rts/rts_node_task.cc"
"../hybrid/node_executor/rts/rts_task_factory.cc"
"../hybrid/node_executor/node_executor.cc"
"../hybrid/node_executor/task_context.cc"
"../hybrid/hybrid_davinci_model.cc"
@@ -199,6 +204,7 @@ target_include_directories(ge_executor SYSTEM PRIVATE
${GE_CODE_DIR}/../inc/cce
#### blue zone ####
${GE_CODE_DIR}/third_party/fwkacllib/inc
${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain
)

target_link_libraries(ge_executor PRIVATE
@@ -245,6 +251,7 @@ target_include_directories(ge_executor_shared PRIVATE
${GE_CODE_DIR}/../inc/cce
#### blue zone ####
${GE_CODE_DIR}/third_party/fwkacllib/inc
${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain
)

target_link_options(ge_executor_shared PRIVATE


+ 18
- 1
ge/executor/ge_executor.cc View File

@@ -26,7 +26,7 @@
#include "graph/execute/graph_execute.h"
#include "graph/load/graph_loader.h"
#include "graph/load/model_manager/model_manager.h"
#include "graph/manager/graph_mem_allocator.h"
#include "graph/manager/graph_mem_manager.h"
#include "single_op/single_op_manager.h"
#include "graph/load/model_manager/davinci_model.h"
#include "opskernel_manager/ops_kernel_builder_manager.h"
@@ -731,6 +731,23 @@ Status GeExecutor::GetAippType(uint32_t model_id, uint32_t index, InputAippType
return SUCCESS;
}

Status GeExecutor::GetOpAttr(uint32_t model_id, const std::string &op_name, const std::string &attr_name,
std::string &attr_value) {
GELOGI("Begin to get op attr.");
if (!isInit_) {
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Init][GeExecutor]Ge executor not inited yet!");
REPORT_INNER_ERROR("E19999", "Ge executor not inited yet!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}
Status ret = GraphExecutor::GetOpAttr(model_id, op_name, attr_name, attr_value);
if (ret != SUCCESS) {
GELOGE(ret, "[Get][OpAttr]Get op:%s attr:%s failed.", op_name.c_str(), attr_name.c_str());
REPORT_CALL_ERROR("E19999", "Get op:%s attr:%s failed.", op_name.c_str(), attr_name.c_str());
return ret;
}
return SUCCESS;
}

Status GeExecutor::GetModelAttr(uint32_t model_id, std::vector<std::string> &dynamic_output_shape_info) {
if (!isInit_) {
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!");


+ 0
- 3
ge/ge_inference.mk View File

@@ -122,12 +122,10 @@ OMG_HOST_SRC_FILES := \
graph/passes/dimension_adjust_pass.cc \
graph/passes/get_original_format_pass.cc \
graph/passes/shape_operate_op_remove_pass.cc \
graph/passes/unused_op_remove_pass.cc \
graph/passes/assert_pass.cc \
graph/passes/dropout_pass.cc \
graph/passes/infershape_pass.cc \
graph/passes/unused_const_pass.cc \
graph/passes/isolated_op_remove_pass.cc \
graph/passes/permute_pass.cc \
graph/passes/ctrl_edge_transfer_pass.cc \
graph/passes/end_of_sequence_add_control_pass.cc \
@@ -209,7 +207,6 @@ OMG_HOST_SRC_FILES := \
graph/passes/switch_logic_remove_pass.cc \
graph/passes/switch_data_edges_bypass.cc \
graph/passes/merge_pass.cc \
graph/passes/variable_format_pass.cc \
graph/passes/variable_op_pass.cc \
graph/passes/cast_remove_pass.cc \
graph/passes/transpose_transdata_pass.cc \


+ 2
- 1
ge/ge_local_engine/engine/ge_local_engine.cc View File

@@ -35,7 +35,8 @@ Status GeLocalEngine::Initialize(const std::map<string, string> &options) {
if (ops_kernel_store_ == nullptr) {
ops_kernel_store_ = MakeShared<GeLocalOpsKernelInfoStore>();
if (ops_kernel_store_ == nullptr) {
GELOGE(FAILED, "Make GeLocalOpsKernelInfoStore failed.");
REPORT_CALL_ERROR("E19999", "create GeLocalOpsKernelInfoStore failed.");
GELOGE(FAILED, "[Call][MakeShared] Make GeLocalOpsKernelInfoStore failed.");
return FAILED;
}
}


+ 20
- 13
ge/ge_local_engine/engine/host_cpu_engine.cc View File

@@ -43,7 +43,7 @@ namespace {
} \
auto tensor = TensorAdapter::AsTensor(*ge_tensor); \
auto tensor_name = op_desc->GetOutputNameByIndex(i); \
GE_RETURN_WITH_LOG_IF_TRUE(tensor_name.empty(), "Failed to get output name. node = %s, index = %zu", \
GE_RETURN_WITH_LOG_IF_TRUE(tensor_name.empty(), "[Get][OutputName] failed. node = %s, index = %zu", \
op_desc->GetName().c_str(), i); \
named_outputs.emplace(tensor_name, tensor); \
break; \
@@ -61,7 +61,8 @@ Status GetDataNumber(const GeTensorDesc &out_desc, uint64_t &data_num) {
if (out_desc.GetShape().IsUnknownShape()) {
std::vector<std::pair<int64_t, int64_t>> range;
if (out_desc.GetShapeRange(range) != GRAPH_SUCCESS) {
GELOGE(INTERNAL_ERROR, "Get shape range failed.");
REPORT_CALL_ERROR("E19999", "GetShapeRange failed.");
GELOGE(INTERNAL_ERROR, "[Get][ShapeRange] failed.");
return INTERNAL_ERROR;
}
int64_t max_range_size = 1;
@@ -72,7 +73,8 @@ Status GetDataNumber(const GeTensorDesc &out_desc, uint64_t &data_num) {
num_size = max_range_size;
}
if (num_size < 0) {
GELOGE(INTERNAL_ERROR, "Get negative size, num_size=%ld.", num_size);
REPORT_INNER_ERROR("E19999", "Get negative size, num_size=%ld.", num_size);
GELOGE(INTERNAL_ERROR, "[Check][Param] Get negative size, num_size=%ld.", num_size);
return INTERNAL_ERROR;
}
data_num = static_cast<uint64_t>(num_size);
@@ -137,10 +139,10 @@ Status HostCpuEngine::PrepareInputs(const ge::ConstOpDescPtr &op_desc,
map<std::string, const Tensor> &named_inputs) {
auto num_inputs = op_desc->GetInputsSize();
if (num_inputs != inputs.size()) {
GELOGE(PARAM_INVALID,
"Mismatching input sizes. op_desc has %zu input(s), but given %zu",
num_inputs,
inputs.size());
REPORT_INNER_ERROR("E19999", "Mismatching input sizes. op_desc:%s(%s) has %zu input(s), but given %zu",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), num_inputs, inputs.size());
GELOGE(PARAM_INVALID, "[Check][Param] Mismatching input sizes. op_desc:%s(%s) has %zu input(s), but given %zu",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), num_inputs, inputs.size());
return PARAM_INVALID;
}

@@ -149,8 +151,8 @@ Status HostCpuEngine::PrepareInputs(const ge::ConstOpDescPtr &op_desc,
GE_CHECK_NOTNULL(ge_tensor);
auto tensor = TensorAdapter::AsTensor(*ge_tensor);
auto tensor_name = op_desc->GetInputNameByIndex(i);
GE_RETURN_WITH_LOG_IF_TRUE(tensor_name.empty(),
"Failed to get input name. node = %s, index = %zu", op_desc->GetName().c_str(), i);
GE_RETURN_WITH_LOG_IF_TRUE(tensor_name.empty(), "[Get][InputName] failed. node = %s, index = %zu",
op_desc->GetName().c_str(), i);
GELOGD("Successfully inserted input tensor. node = %s, index = %zu, input name = %s",
op_desc->GetName().c_str(), i, tensor_name.c_str());
named_inputs.emplace(tensor_name, tensor);
@@ -173,7 +175,7 @@ Status HostCpuEngine::PrepareOutputs(const ge::ConstOpDescPtr &op_desc,
uint64_t data_num = 0;
if (need_create_flag) {
if (GetDataNumber(out_desc, data_num) != SUCCESS) {
GELOGE(INTERNAL_ERROR, "node:%s, get size for output %zu failed", op_desc->GetName().c_str(), i);
GELOGE(INTERNAL_ERROR, "[Get][Number] node:%s get size for output %zu failed", op_desc->GetName().c_str(), i);
return INTERNAL_ERROR;
}
}
@@ -234,12 +236,16 @@ Status HostCpuEngine::Run(NodePtr &node, const vector<ConstGeTensorPtr> &inputs,
for (size_t i = 0; i < op_desc->GetOutputsSize(); i++) {
auto tensor_name = op_desc->GetOutputNameByIndex(i);
if (tensor_name.empty()) {
GELOGE(INTERNAL_ERROR, "Failed to get output name. node = %s, index = %zu", op_desc->GetName().c_str(), i);
REPORT_INNER_ERROR("E19999", "GetOutputNameByIndex failed, node = %s, index = %zu",
op_desc->GetName().c_str(), i);
GELOGE(INTERNAL_ERROR, "[Get][OutputName] failed. node = %s, index = %zu", op_desc->GetName().c_str(), i);
return INTERNAL_ERROR;
}
auto iter = named_outputs.find(tensor_name);
if (iter == named_outputs.end()) {
GELOGE(INTERNAL_ERROR, "Failed to get output tensor. node = %s, index = %zu, tensor_name = %s",
REPORT_INNER_ERROR("E19999", "get output tensor failed, node = %s, index = %zu, tensor_name = %s",
op_desc->GetName().c_str(), i, tensor_name.c_str());
GELOGE(INTERNAL_ERROR, "[Get][OutputTensor] failed. node = %s, index = %zu, tensor_name = %s",
op_desc->GetName().c_str(), i, tensor_name.c_str());
return INTERNAL_ERROR;
}
@@ -328,7 +334,8 @@ Status HostCpuEngine::LoadLib(const std::string &lib_path) {
if (handle == nullptr) {
const char *error = mmDlerror();
error = (error == nullptr) ? "" : error;
GELOGE(INTERNAL_ERROR, "Failed to invoke dlopen. path = %s, error = %s", lib_path.c_str(), error);
REPORT_CALL_ERROR("E19999", "mmDlopen failed, path = %s, error = %s", lib_path.c_str(), error);
GELOGE(INTERNAL_ERROR, "[Invoke][DlOpen] failed. path = %s, error = %s", lib_path.c_str(), error);
return INTERNAL_ERROR;
}



+ 22
- 10
ge/ge_local_engine/ops_kernel_store/ge_local_ops_kernel_builder.cc View File

@@ -52,7 +52,8 @@ Status GeLocalOpsKernelBuilder::CalcOpRunningParam(Node &ge_node) {
GELOGD("[%s] CalcOpRunningParam In.", ge_node.GetName().c_str());
OpDescPtr op_desc = ge_node.GetOpDesc();
if (op_desc == nullptr) {
GELOGE(FAILED, "CalcOpRunningParam failed, as op desc is null");
REPORT_CALL_ERROR("E19999", "param ge_node has no opdesc, check invalid.");
GELOGE(FAILED, "[Get][OpDesc] CalcOpRunningParam failed, as op desc is null");
return FAILED;
}

@@ -97,15 +98,21 @@ Status GeLocalOpsKernelBuilder::CalcOpRunningParam(Node &ge_node) {
}

if (graph_status != GRAPH_SUCCESS) {
GELOGE(FAILED, "Calc op[%s:%s] out[%zu] mem size failed, format=%s, data_type=%s, error=%u.", node_name.c_str(),
node_type.c_str(), i, TypeUtils::FormatToSerialString(format).c_str(),
REPORT_CALL_ERROR("E19999", "calc op[%s:%s] out[%zu] mem size failed, format=%s, data_type=%s, error=%u.",
node_name.c_str(), node_type.c_str(), i, TypeUtils::FormatToSerialString(format).c_str(),
TypeUtils::DataTypeToSerialString(data_type).c_str(), graph_status);
GELOGE(FAILED, "[Calc][MemSize] for op[%s:%s] out[%zu] failed, format=%s, data_type=%s, error=%u.",
node_name.c_str(), node_type.c_str(), i, TypeUtils::FormatToSerialString(format).c_str(),
TypeUtils::DataTypeToSerialString(data_type).c_str(), graph_status);
return FAILED;
}

if (output_mem_size < 0) {
GELOGE(FAILED,
"Calc op[%s:%s] out[%zu] mem size is negative(not support),"
REPORT_INNER_ERROR("E19999", "Calc op[%s:%s] out[%zu] mem size is negative(not support),"
" format=%s, data_type=%s, mem_size=%ld.",
node_name.c_str(), node_type.c_str(), i, TypeUtils::FormatToSerialString(format).c_str(),
TypeUtils::DataTypeToSerialString(data_type).c_str(), output_mem_size);
GELOGE(FAILED, "[Calc][MemSize] op[%s:%s] out[%zu] mem size is negative(not support),"
" format=%s, data_type=%s, mem_size=%ld.",
node_name.c_str(), node_type.c_str(), i, TypeUtils::FormatToSerialString(format).c_str(),
TypeUtils::DataTypeToSerialString(data_type).c_str(), output_mem_size);
@@ -133,17 +140,20 @@ Status GeLocalOpsKernelBuilder::CalcOpRunningParam(Node &ge_node) {

Status GeLocalOpsKernelBuilder::CalcConstantStrMemSize(const OpDescPtr &op_desc, int64_t &mem_size) {
if (op_desc == nullptr) {
GELOGE(FAILED, "CalcConstantStrMemSize failed, as op desc is null");
REPORT_INNER_ERROR("E19999", "param op_desc is nullptr, check invalid");
GELOGE(FAILED, "[Check][Param] CalcConstantStrMemSize failed, as op desc is null");
return FAILED;
}
ConstGeTensorPtr value = MakeShared<const GeTensor>();
if (value == nullptr) {
GELOGE(FAILED, "make shared ConstGeTensor exception.");
REPORT_CALL_ERROR("E19999", "make shared ConstGeTensor exception.");
GELOGE(FAILED, "[Create][GeTensor] make shared ConstGeTensor exception.");
return FAILED;
}
// Constant op attr name is "value"
if (!AttrUtils::GetTensor(op_desc, kConstantOpAttrName, value)) {
GELOGE(FAILED, "Get Constant op attr value failed");
REPORT_CALL_ERROR("E19999", "get op:%s attr value failed", op_desc->GetName().c_str());
GELOGE(FAILED, "[Get][Value] of Constant op attr failed");
return FAILED;
}
mem_size = static_cast<int64_t>(value->GetData().size());
@@ -165,13 +175,15 @@ Status GeLocalOpsKernelBuilder::GenerateTask(const Node &node, RunContext &conte

auto op = OpFactory::Instance().CreateOp(node, context);
if (op == nullptr) {
GELOGE(FAILED, "CreateOp for node:%s(%s) failed.", name.c_str(), type.c_str());
REPORT_CALL_ERROR("E19999", "create op for node:%s(%s) failed.", name.c_str(), type.c_str());
GELOGE(FAILED, "[Create][Op] for node:%s(%s) failed.", name.c_str(), type.c_str());
return FAILED;
}

Status ret = op->Run();
if (ret != SUCCESS) {
GELOGE(ret, "Node:%s(%s) op run failed.", name.c_str(), type.c_str());
REPORT_CALL_ERROR("E19999", "Node:%s(%s) op run failed.", name.c_str(), type.c_str());
GELOGE(ret, "[Call][Run] for Node:%s(%s) op failed.", name.c_str(), type.c_str());
return ret;
}
GELOGD("Ge local generate task for node:%s(%s) end, tasks.size()=%zu.", name.c_str(), type.c_str(), tasks.size());


+ 2
- 1
ge/ge_local_engine/ops_kernel_store/op/ge_deleted_op.cc View File

@@ -24,7 +24,8 @@ namespace ge_local {
GeDeletedOp::GeDeletedOp(const Node &node, RunContext &run_context) : Op(node, run_context) {}

Status GeDeletedOp::Run() {
GELOGE(FAILED, "Node:%s type is %s, should be deleted by ge.", name_.c_str(), type_.c_str());
REPORT_INNER_ERROR("E19999", "Node:%s type is %s, should be deleted by ge.", name_.c_str(), type_.c_str());
GELOGE(FAILED, "[Delelte][Node] Node:%s type is %s, should be deleted by ge.", name_.c_str(), type_.c_str());
// Do nothing
return FAILED;
}


+ 4
- 2
ge/ge_local_engine/ops_kernel_store/op/op_factory.cc View File

@@ -31,8 +31,10 @@ std::shared_ptr<Op> OpFactory::CreateOp(const Node &node, RunContext &run_contex
if (iter != op_creator_map_.end()) {
return iter->second(node, run_context);
}

GELOGE(FAILED, "Not supported OP, type = %s, name = %s", node.GetType().c_str(), node.GetName().c_str());
REPORT_INNER_ERROR("E19999", "Not supported OP, type = %s, name = %s",
node.GetType().c_str(), node.GetName().c_str());
GELOGE(FAILED, "[Check][Param] Not supported OP, type = %s, name = %s",
node.GetType().c_str(), node.GetName().c_str());
return nullptr;
}



+ 0
- 3
ge/ge_runner.mk View File

@@ -187,7 +187,6 @@ LIBGE_LOCAL_SRC_FILES := \
graph/passes/identity_pass.cc \
graph/passes/ref_identity_delete_op_pass.cc \
graph/passes/infershape_pass.cc \
graph/passes/isolated_op_remove_pass.cc \
graph/passes/iterator_op_pass.cc \
graph/passes/link_gen_mask_nodes_pass.cc \
graph/passes/merge_pass.cc \
@@ -233,13 +232,11 @@ LIBGE_LOCAL_SRC_FILES := \
graph/passes/transop_without_reshape_fusion_pass.cc \
graph/passes/transpose_transdata_pass.cc \
graph/passes/unused_const_pass.cc \
graph/passes/unused_op_remove_pass.cc \
graph/passes/var_is_initialized_op_pass.cc \
graph/passes/parallel_concat_start_op_pass.cc \
graph/passes/cond_pass.cc \
graph/passes/cond_remove_pass.cc \
graph/passes/for_pass.cc \
graph/passes/variable_format_pass.cc \
graph/passes/variable_op_pass.cc \
graph/passes/variable_prepare_op_pass.cc \
graph/passes/variable_ref_delete_op_pass.cc \


+ 89
- 46
ge/generator/ge_generator.cc View File

@@ -94,7 +94,7 @@ static Status CheckEngineTypeSupport(const NodePtr &node, OpEngineType engine_ty
ErrorManager::GetInstance().ATCReportErrMessage("E14001", {"opname", "optype", "value", "reason"},
{op_desc->GetName(), op_desc->GetType(), "engine type",
"it only support default/AIcoreEngine/VectorEngine"});
GELOGE(FAILED, "[Check][EngineType]value:%d not support, "
GELOGE(FAILED, "[Check][Param] value:%d not support, "
"only support default/AIcoreEngine/VectorEngine now", static_cast<int>(engine_type));
return FAILED;
}
@@ -107,7 +107,8 @@ static Status CheckEngineTypeSupport(const NodePtr &node, OpEngineType engine_ty
// set op engine name and opkernelLib. when engine support
std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
if ((instance_ptr == nullptr) || (!instance_ptr->InitFlag())) {
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "CheckEngineType failed.");
REPORT_INNER_ERROR("E19999", "get gelib failed, as get instance failed or initflag failed.");
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Get][GELib] CheckEngineType failed, as get gelib failed.");
return FAILED;
}
OpsKernelManager &ops_kernel_manager = instance_ptr->OpsKernelManagerObj();
@@ -115,7 +116,7 @@ static Status CheckEngineTypeSupport(const NodePtr &node, OpEngineType engine_ty
if (op_infos.empty()) {
ErrorManager::GetInstance().ATCReportErrMessage("E14001", {"opname", "optype", "value", "reason"},
{op_desc->GetName(), op_desc->GetType(), "optype", "it can not find"});
GELOGE(FAILED, "CheckEngineType: Can not get op info by op type %s", op_desc->GetType().c_str());
GELOGE(FAILED, "[Get][OpInfo] by op type %s failed.", op_desc->GetType().c_str());
return FAILED;
}
string kernel_name;
@@ -128,7 +129,8 @@ static Status CheckEngineTypeSupport(const NodePtr &node, OpEngineType engine_ty
if (kernel_name.empty()) {
ErrorManager::GetInstance().ATCReportErrMessage("E14001", {"opname", "optype", "value", "reason"},
{op_desc->GetName(), op_desc->GetType(), "engine name" + FmtToStr(op_engine_name), "it can not find"});
GELOGE(FAILED, "CheckEngineType:Can not find ops kernel, engine name: %s.", op_engine_name.c_str());
GELOGE(FAILED, "[Check][Param] Can not find ops kernel, engine name:%s. op:%s(%s)",
op_engine_name.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED;
}
auto &kernel_map = ops_kernel_manager.GetAllOpsKernelInfoStores();
@@ -144,15 +146,14 @@ static Status CheckEngineTypeSupport(const NodePtr &node, OpEngineType engine_ty
} else {
ErrorManager::GetInstance().ATCReportErrMessage(
"E13002", {"optype", "opskernel", "reason"}, {op_desc->GetType(), kernel_name, unsupported_reason});
GELOGE(FAILED, "CheckEngineType: check support failed, Op type %s of ops kernel %s is unsupported, reason:%s",
GELOGE(FAILED, "[Call][CheckSupported] failed, Op type %s of ops kernel %s is unsupported, reason:%s",
op_desc->GetType().c_str(), kernel_name.c_str(), unsupported_reason.c_str());
return FAILED;
}
} else {
ErrorManager::GetInstance().ATCReportErrMessage(
"E13003", {"opname", "optype"}, {op_desc->GetName(), op_desc->GetType()});
GELOGE(FAILED,
"CheckEngineType:Can not find any supported ops kernel info store by kernel_name %s,"
GELOGE(FAILED, "[Check][Param] Can not find any supported ops kernel info store by kernel_name %s,"
"op type is %s, op name is %s",
kernel_name.c_str(), op_desc->GetType().c_str(), op_desc->GetName().c_str());
}
@@ -183,34 +184,47 @@ static Status AddInputs(const ComputeGraphPtr &graph, const NodePtr &node, const
string op_name = node->GetName() + "_in_" + std::to_string(index);
OpDescPtr data_op = MakeShared<ge::OpDesc>(op_name, op_type);
if (data_op == nullptr) {
REPORT_CALL_ERROR("E19999", "create OpDesc failed, name:%s", op_name.c_str());
GELOGE(FAILED, "[Create][OpDesc] failed, name:%s", op_name.c_str());
return FAILED;
}
if (is_const) {
ConstGeTensorPtr tensor_value;
if (!AttrUtils::GetTensor(tensor, ge::ATTR_NAME_WEIGHTS, tensor_value)) {
GELOGE(FAILED, "Get value failed, node name:%s.", tensor.GetName().c_str());
REPORT_CALL_ERROR("E19999", "get attr %s failed, tensor:%s.",
ge::ATTR_NAME_WEIGHTS.c_str(), tensor.GetName().c_str());
GELOGE(FAILED, "[Get][Attr] %s failed, tensor:%s.", ge::ATTR_NAME_WEIGHTS.c_str(), tensor.GetName().c_str());
return FAILED;
}
if (!AttrUtils::SetTensor(data_op, ge::ATTR_NAME_WEIGHTS, tensor_value)) {
GELOGE(FAILED, "Set attr ATTR_NAME_WEIGHTS fail.");
REPORT_CALL_ERROR("E19999", "set attr %s failed, op:%s.", ge::ATTR_NAME_WEIGHTS.c_str(), op_name.c_str());
GELOGE(FAILED, "[Set][Attr] %s failed, op:%s.", ge::ATTR_NAME_WEIGHTS.c_str(), op_name.c_str());
return FAILED;
}
}

(void)AttrUtils::SetBool(data_op, "_is_single_op", true);

GE_CHK_BOOL_EXEC(data_op->AddInputDesc(tensor) == GRAPH_SUCCESS, return FAILED,
"[Add][InputDesc]fail for node:%s", data_op->GetName().c_str());
GE_CHK_BOOL_EXEC(data_op->AddOutputDesc(tensor) == GRAPH_SUCCESS, return FAILED,
"[Add][OutputDesc]fail for node:%s", data_op->GetName().c_str());
GE_CHK_BOOL_EXEC(data_op->AddInputDesc(tensor) == GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "AddInputDesc failed for node:%s", data_op->GetName().c_str());
return FAILED, "[Add][InputDesc] fail for node:%s", data_op->GetName().c_str());
GE_CHK_BOOL_EXEC(data_op->AddOutputDesc(tensor) == GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "AddOutputDesc failed for node:%s", data_op->GetName().c_str());
return FAILED, "[Add][OutputDesc] fail for node:%s", data_op->GetName().c_str());
if (attr && !is_const) {
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(data_op, ATTR_NAME_INDEX, data_index), return FAILED,
"[Set][Attr:%s]fail for node:%s", ATTR_NAME_INDEX.c_str(), data_op->GetName().c_str());
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(data_op, ATTR_NAME_INDEX, data_index),
REPORT_CALL_ERROR("E19999", "set attr %s failed for node:%s",
ATTR_NAME_INDEX.c_str(), data_op->GetName().c_str());
return FAILED,
"[Set][Attr:%s] fail for node:%s", ATTR_NAME_INDEX.c_str(), data_op->GetName().c_str());
++data_index;
}

ge::NodePtr arg_node = graph->AddNode(data_op);
GE_CHK_BOOL_EXEC(arg_node != nullptr, return FAILED, "Insert Data node fail");
GE_CHK_BOOL_EXEC(arg_node != nullptr,
REPORT_CALL_ERROR("E19999", "add node:%s to graph:%s failed", data_op->GetName().c_str(),
graph->GetName().c_str());
return FAILED, "[Add][Node] Insert Data node:%s fail", data_op->GetName().c_str());

GE_CHK_STATUS(GraphUtils::AddEdge(arg_node->GetOutDataAnchor(0), node->GetInDataAnchor(index)),
"[Add][Edge]fail from node:%s to node:%s", data_op->GetName().c_str(), node->GetName().c_str());
@@ -221,6 +235,8 @@ static Status AddInputs(const ComputeGraphPtr &graph, const NodePtr &node, const
static Status AddOutputs(const ComputeGraphPtr &graph, const NodePtr &node, const vector<GeTensor> &outputs) {
OpDescPtr op_desc = MakeShared<ge::OpDesc>(graph->GetName() + "_" + NODE_NAME_NET_OUTPUT, NETOUTPUT);
if (op_desc == nullptr) {
REPORT_CALL_ERROR("E19999", "create OpDesc failed, graph:%s", graph->GetName().c_str());
GELOGE(FAILED, "[Create][OpDesc] failed, graph:%s", graph->GetName().c_str());
return FAILED;
}
(void)AttrUtils::SetBool(op_desc, "_is_single_op", true);
@@ -228,18 +244,23 @@ static Status AddOutputs(const ComputeGraphPtr &graph, const NodePtr &node, cons
for (const auto &out_desc : outputs) {
GeTensorDesc tensor = out_desc.GetTensorDesc();
TensorUtils::SetInputTensor(tensor, true);
GE_CHK_BOOL_EXEC(op_desc->AddInputDesc(tensor) == GRAPH_SUCCESS, return FAILED,
"[Add][InputDesc]fail for node:%s", op_desc->GetName().c_str());
GE_CHK_BOOL_EXEC(op_desc->AddInputDesc(tensor) == GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "AddInputDesc failed for node:%s", op_desc->GetName().c_str());
return FAILED, "[Add][InputDesc]fail for node:%s", op_desc->GetName().c_str());

TensorUtils::SetInputTensor(tensor, false);
TensorUtils::SetOutputTensor(tensor, true);
GE_CHK_BOOL_EXEC(op_desc->AddOutputDesc(tensor) == GRAPH_SUCCESS, return FAILED,
"[Add][OutputDesc]fail for node:%s", op_desc->GetName().c_str());
GE_CHK_BOOL_EXEC(op_desc->AddOutputDesc(tensor) == GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "AddOutputDesc failed for node:%s", op_desc->GetName().c_str());
return FAILED, "[Add][OutputDesc]fail for node:%s", op_desc->GetName().c_str());
count++;
}
GE_CHECK_NOTNULL_EXEC(graph, return PARAM_INVALID);
ge::NodePtr out_node = graph->AddNode(op_desc);
GE_CHK_BOOL_EXEC(out_node != nullptr, return FAILED,
GE_CHK_BOOL_EXEC(out_node != nullptr,
REPORT_CALL_ERROR("E19999", "add node:%s to graph:%u failed.",
op_desc->GetName().c_str(), graph->GetGraphID());
return FAILED,
"[Add][Node:%s]fail in graph:%u", op_desc->GetName().c_str(), graph->GetGraphID());
GE_CHECK_NOTNULL_EXEC(node, return PARAM_INVALID);
for (int32_t i = 0; i < count; ++i) {
@@ -256,7 +277,8 @@ static void GetOpsProtoPath(string &opsproto_path) {
string path = path_env;
string file_path = RealPath(path.c_str());
if (file_path.empty()) {
GELOGE(FAILED, "File path %s is invalid.", path.c_str());
REPORT_CALL_ERROR("E19999", "File path %s is invalid.", path.c_str());
GELOGE(FAILED, "[Call][RealPath] File path %s is invalid.", path.c_str());
return;
}
opsproto_path = (path + "/op_proto/custom/" + ":") + (path + "/op_proto/built-in/");
@@ -288,7 +310,8 @@ static Status ResetTensorVecShape(const vector<GeTensor> &inputs, vector<GeTenso
int64_t storage_format = FORMAT_NCHW;
if (ge::AttrUtils::GetInt(desc, ge::ATTR_NAME_STORAGE_FORMAT, storage_format) &&
!ge::AttrUtils::SetListInt(desc, ge::ATTR_NAME_STORAGE_SHAPE, dynamic_shape_dims)) {
GELOGE(FAILED, "Set attr ATTR_NAME_STORAGE_SHAPE fail.");
REPORT_CALL_ERROR("E19999", "Set attr ATTR_NAME_STORAGE_SHAPE failed to op:%s.", desc.GetName().c_str());
GELOGE(FAILED, "[Set][Attr] ATTR_NAME_STORAGE_SHAPE fail.");
return FAILED;
}
desc.SetShape(dynamic_shape);
@@ -373,7 +396,8 @@ Status GeGenerator::Initialize(const map<string, string> &options) {
Status GeGenerator::Initialize(const map<string, string> &options, OmgContext &omg_context) {
impl_ = ge::MakeShared<Impl>(omg_context);
if (impl_ == nullptr) {
GELOGE(MEMALLOC_FAILED, "Make shared failed");
REPORT_CALL_ERROR("E19999", "create Impl failed.");
GELOGE(MEMALLOC_FAILED, "[Create][Impl] Make shared failed");
return MEMALLOC_FAILED;
}

@@ -388,7 +412,7 @@ Status GeGenerator::Initialize(const map<string, string> &options, OmgContext &o

Status ret = impl_->graph_manager_.Initialize(options);
if (ret != SUCCESS) {
GELOGE(GE_GENERATOR_GRAPH_MANAGER_INIT_FAILED, "Graph manager initialize failed.");
GELOGE(GE_GENERATOR_GRAPH_MANAGER_INIT_FAILED, "[Call][Initialize] Graph manager initialize failed.");
return GE_GENERATOR_GRAPH_MANAGER_INIT_FAILED;
}
// get ek file
@@ -430,7 +454,7 @@ Status GeGenerator::Finalize() {
GE_CHECK_NOTNULL_EXEC(impl_, return PARAM_INVALID);
Status ret = impl_->graph_manager_.Finalize();
if (ret != SUCCESS) {
GELOGE(GE_GENERATOR_GRAPH_MANAGER_FINALIZE_FAILED, "Graph manager finalize failed.");
GELOGE(GE_GENERATOR_GRAPH_MANAGER_FINALIZE_FAILED, "[Call][Finalize] Graph manager finalize failed.");
return GE_GENERATOR_GRAPH_MANAGER_FINALIZE_FAILED;
}
return SUCCESS;
@@ -454,9 +478,9 @@ Status GeGenerator::GenerateInfershapeGraph(const Graph &graph) {

Status ret = impl_->GenerateInfershapeGraph(graph);
if (ret != SUCCESS) {
GELOGE(ret, "Dump infershape json failed");
GELOGE(ret, "[Call][GenerateInfershapeGraph] Dump infershape json failed");
if (impl_->graph_manager_.Finalize() != SUCCESS) {
GELOGE(FAILED, "graph_manager finalize fail.");
GELOGE(FAILED, "[Call][Finalize] graph_manager finalize fail.");
}
return ret;
}
@@ -653,9 +677,9 @@ Status GeGenerator::GenerateModel(const Graph &graph, const string &file_name_pr
impl_->is_offline_ = is_offline;
Status ret = impl_->BuildModel(graph, inputs, ge_root_model);
if (ret != SUCCESS) {
GELOGE(ret, "Build model failed.");
GELOGE(ret, "[Build][Model] failed, ret:%d.", ret);
if (impl_->graph_manager_.Finalize() != SUCCESS) {
GELOGE(FAILED, "graph_manager finalize fail.");
GELOGE(FAILED, "[Call][Finalize] graph_manager finalize fail.");
}
return ret;
}
@@ -679,7 +703,7 @@ Status GeGenerator::GenerateModel(const Graph &graph, const string &file_name_pr
}
ret = impl_->SaveRootModel(file_name_prefix, ge_root_model, model);
if (ret != SUCCESS) {
GELOGE(ret, "Save model failed");
GELOGE(ret, "[Save][RootModel] failed, ret:%d, file:%s", ret, file_name_prefix.c_str());
if (impl_->graph_manager_.Finalize() != SUCCESS) {
GELOGE(FAILED, "graph_manager finalize fail.");
}
@@ -764,14 +788,16 @@ Status GeGenerator::CheckForSingleOp(OpDescPtr &op_desc, const vector<GeTensor>
ErrorManager::GetInstance().ATCReportErrMessage("E14001", {"opname", "optype", "value", "reason"},
{op_desc->GetName(), op_desc->GetType(), "inputs size" + FmtToStr(op_desc->GetAllInputsSize()),
"tensor size is " + FmtToStr(inputs.size())});
GELOGE(PARAM_INVALID, "Tensor size: %zu, Inputs size: %zu", inputs.size(), op_desc->GetAllInputsSize());
GELOGE(PARAM_INVALID, "[Check][Param] Tensor size: %zu, op:%s(%s) Inputs size: %zu, not equal",
inputs.size(), op_desc->GetName().c_str(), op_desc->GetType().c_str(), op_desc->GetAllInputsSize());
return PARAM_INVALID;
}
if (!outputs.empty() && (outputs.size() != op_desc->GetOutputsSize())) {
ErrorManager::GetInstance().ATCReportErrMessage("E14001", {"opname", "optype", "value", "reason"},
{op_desc->GetName(), op_desc->GetType(), "outputs size" + FmtToStr(op_desc->GetOutputsSize()),
"tensor size is " + FmtToStr(outputs.size())});
GELOGE(PARAM_INVALID, "Tensor size: %zu, Outputs size: %zu", outputs.size(), op_desc->GetOutputsSize());
GELOGE(PARAM_INVALID, "[Check][Param] Tensor size: %zu, op:%s(%s) Outputs size: %zu, not equal",
outputs.size(), op_desc->GetName().c_str(), op_desc->GetType().c_str(), op_desc->GetOutputsSize());
return PARAM_INVALID;
}
return SUCCESS;
@@ -786,7 +812,8 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
(void)AttrUtils::SetBool(op_desc, ATTR_SINGLE_OP_SCENE, true);

if (CheckForSingleOp(op_desc, inputs, outputs) != SUCCESS) {
GELOGE(PARAM_INVALID, "input param is invalid when build single op!");
GELOGE(PARAM_INVALID, "[Check][Param] input param is invalid when build single op:%s!",
op_desc->GetName().c_str());
return PARAM_INVALID;
}
OmgContext &omg_context = (impl_ == nullptr) ? domi::GetContext() : impl_->omg_context_;
@@ -805,6 +832,7 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
fuzz_compile_flag = true;
}
if (!AttrUtils::SetBool(op_desc, ATTR_NAME_FUZZ_BUILD, fuzz_compile_flag)) {
REPORT_CALL_ERROR("E19999", "set ATTR_NAME_FUZZ_BUILD failed for %s.", op_desc->GetName().c_str());
GELOGE(FAILED, "[Set][ATTR_NAME_FUZZ_BUILD] Failed to set attr for %s.", op_desc->GetName().c_str());
return FAILED;
}
@@ -813,7 +841,8 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
// 1. Create ComputeGraph.
string name = ge::CurrentTimeInStr() + "_" + model_file_name;
Graph graph;
GE_CHK_STATUS(BuildSingleOpGraph(op_desc, inputs, outputs, name, graph), "make graph fail.");
GE_CHK_STATUS(BuildSingleOpGraph(op_desc, inputs, outputs, name, graph),
"[Build][Graph] for single op:%s fail.", op_desc->GetName().c_str());

// 2. check engine type when compile online
if (model_file_name == kFileNameSuffix) {
@@ -838,7 +867,8 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
GE_CHECK_NOTNULL(ge_root_model->GetRootGraph());
map<string, GeModelPtr> name_to_ge_model = ge_root_model->GetSubgraphInstanceNameToModel();
if (name_to_ge_model.empty()) {
GELOGE(PARAM_INVALID, "GetSubgraphInstanceNameToModel is empty.");
REPORT_CALL_ERROR("E19999", "GetSubgraphInstanceNameToModel failed.");
GELOGE(PARAM_INVALID, "[Get][Name] GetSubgraphInstanceNameToModel is empty.");
return PARAM_INVALID;
}
const ComputeGraphPtr root_graph = ge_root_model->GetRootGraph();
@@ -869,7 +899,11 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
}
if (!fuzz_build_attrs.empty()) {
GE_CHK_BOOL_EXEC(AttrUtils::SetListNamedAttrs(ge_model, ATTR_NAME_FUZZ_BUILD_RES_ATTRS, fuzz_build_attrs),
return FAILED, "Set ATTR_NAME_FUZZ_BUILD_RES_ATTRS failed.");
REPORT_CALL_ERROR("E19999", "Set model:%s(id:%u) attr:%s failed.",
ge_model->GetName().c_str(), ge_model->GetModelId(),
ATTR_NAME_FUZZ_BUILD_RES_ATTRS.c_str());
return FAILED, "Set model:%s(id:%u) attr:%s failed.",
ge_model->GetName().c_str(), ge_model->GetModelId(), ATTR_NAME_FUZZ_BUILD_RES_ATTRS.c_str());
}
GE_CHK_STATUS_RET_NOLOG(impl_->SaveParams(ge_model, op_desc_tmp->GetType(), op_attrs, inputs, outputs));
} else {
@@ -998,7 +1032,7 @@ Status GeGenerator::Impl::SaveModel(const string &file_name_prefix, GeModelPtr &
model_helper.SetSaveMode(is_offline_);
Status ret = model_helper.SaveToOmModel(model, save_param_, file_name_prefix, model_buff);
if (ret != SUCCESS) {
GELOGE(ret, "Save to om model failed");
GELOGE(ret, "[Call][SaveToOmModel] Save to om model failed");
return ret;
}
return SUCCESS;
@@ -1009,12 +1043,15 @@ Status GeGenerator::Impl::SaveRootModel(const string &file_name_prefix, GeRootMo
bool is_unknown_shape = false;
auto ret = ge_root_model->CheckIsUnknownShape(is_unknown_shape);
if (ret != SUCCESS) {
GELOGE(FAILED, "Check root model is unkonwn shape failed");
REPORT_CALL_ERROR("E19999", "root model(id:%u) CheckIsUnknownShape failed, ret:%d",
ge_root_model->GetModelId(), ret);
GELOGE(FAILED, "[Check][RootModel] is unkonwn shape failed, ret:%d", ret);
return FAILED;
}
GELOGD("begin save root model, cur model is unkonwn shape model ? : %d", is_unknown_shape);
GE_CHK_BOOL_EXEC(!ge_root_model->GetSubgraphInstanceNameToModel().empty(), return FAILED,
"ge root model has no sub model")
GE_CHK_BOOL_EXEC(!ge_root_model->GetSubgraphInstanceNameToModel().empty(),
REPORT_CALL_ERROR("E19999", "root model(id:%u) has no sub model.", ge_root_model->GetModelId());
return FAILED, "[Get][SubModel] ge root model has no sub model")
GeModelPtr model_root = nullptr;
if (is_unknown_shape) {
auto name_to_ge_model = ge_root_model->GetSubgraphInstanceNameToModel();
@@ -1038,7 +1075,8 @@ Status GeGenerator::Impl::SaveRootModel(const string &file_name_prefix, GeRootMo
model_helper.SetSaveMode(is_offline_);
ret = model_helper.SaveToOmRootModel(ge_root_model, save_param_, file_name_prefix, model_buff, is_unknown_shape);
if (ret != SUCCESS) {
GELOGE(ret, "Save to om model failed");
REPORT_CALL_ERROR("E19999", "SaveToOmRootModel failed, ret:%d, model id:%u", ret, ge_root_model->GetModelId());
GELOGE(ret, "[Call][SaveToOmRootModel] failed, ret:%d, model id:%u", ret, ge_root_model->GetModelId());
return ret;
}
return SUCCESS;
@@ -1051,7 +1089,8 @@ Status GeGenerator::Impl::BuildModel(const Graph &graph, const vector<GeTensor>
const std::map<std::string, std::string> options;
Status ret = graph_manager_.AddGraph(graph_id, graph, options, omg_context_);
if (ret != SUCCESS) {
GELOGE(GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED, "GraphManager add graph fail, graph id: %u", graph_id);
REPORT_CALL_ERROR("E19999", "add graph(id:%u) failed, ret:%d", graph_id, ret);
GELOGE(GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED, "[Add][Graph] fail, graph id: %u", graph_id);
(void)graph_manager_.Finalize();
return GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED;
}
@@ -1075,7 +1114,8 @@ Status GeGenerator::Impl::BuildModel(const Graph &graph, const vector<GeTensor>

ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther);
if (ret != SUCCESS) {
GELOGE(GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED, "GraphManager build graph fail, graph id: %u", graph_id);
REPORT_CALL_ERROR("E19999", "build graph failed, graph id:%u, ret:%d", graph_id, ret);
GELOGE(GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED, "[Build][Graph] fail, graph id: %u", graph_id);
ret = GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED;
}

@@ -1091,14 +1131,17 @@ Status GeGenerator::Impl::GenerateInfershapeGraph(const Graph &graph) {
const std::map<std::string, std::string> options;
Status ret = graph_manager_.AddGraph(graph_id, graph, options, omg_context_);
if (ret != SUCCESS) {
GELOGE(GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED, "GraphManager add graph failed, graph id: %u", graph_id);
REPORT_CALL_ERROR("E19999", "add graph failed, graph id:%u, ret:%d", graph_id, ret);
GELOGE(GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED, "[Add][Graph] failed, graph id: %u", graph_id);
(void)graph_manager_.Finalize();
return GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED;
}

ret = graph_manager_.GenerateInfershapeGraph(graph_id);
if (ret != SUCCESS) {
GELOGE(GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED, "GraphManager generate graph failed");
REPORT_CALL_ERROR("E19999", "GenerateInfershapeGraph failed, graph id:%u, ret:%d", graph_id, ret);
GELOGE(GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED,
"[Generate][Graph] failed, graph id:%u, ret:%d", graph_id, ret);
return GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED;
}



+ 22
- 15
ge/generator/generator_api.cc View File

@@ -23,22 +23,24 @@
#include "graph/op_desc.h"
#include "graph/utils/tensor_utils.h"

#define CHECK_PARAM_NOT_NULL(param) \
do { \
if (param == nullptr) { \
GELOGE(ge::PARAM_INVALID, "Param: %s is null.", #param); \
return ge::PARAM_INVALID; \
} \
#define CHECK_PARAM_NOT_NULL(param) \
do { \
if (param == nullptr) { \
REPORT_INNER_ERROR("E19999", "param:%s is null", #param); \
GELOGE(ge::PARAM_INVALID, "[Check][Param] %s is null.", #param); \
return ge::PARAM_INVALID; \
} \
} while (0)

#define CHECK_PARAM_OBJECT(object, param) \
({ \
object *obj_value = reinterpret_cast<object *>(param); \
if (obj_value == nullptr) { \
GELOGE(ge::PARAM_INVALID, "Param: %s is null.", #param); \
return ge::PARAM_INVALID; \
} \
obj_value; \
#define CHECK_PARAM_OBJECT(object, param) \
({ \
object *obj_value = reinterpret_cast<object *>(param); \
if (obj_value == nullptr) { \
REPORT_INNER_ERROR("E19999", "param:%s is null.", #param); \
GELOGE(ge::PARAM_INVALID, "[Check][Param] %s is null.", #param); \
return ge::PARAM_INVALID; \
} \
obj_value; \
})

class OpAttr {
@@ -118,6 +120,8 @@ Status_t OpTaskGernerator(const char *op_type, const OpTensor_t *in_tensor, int
std::string op_name = std::string(op_type) + "_" + std::to_string(ge::GetCurrentTimestamp());
ge::OpDescPtr op_desc = ge::MakeShared<ge::OpDesc>(op_name, op_type);
if (op_desc == nullptr) {
REPORT_CALL_ERROR("E19999", "MakeShared ge::OpDesc failed, as return nullptr");
GELOGE(ge::FAILED, "[Call][MakeShared] create ge::OpDesc failed.");
return ge::FAILED;
}
std::vector<ge::GeTensor> inputs;
@@ -132,7 +136,8 @@ Status_t OpTaskGernerator(const char *op_type, const OpTensor_t *in_tensor, int
ge::TensorUtils::SetOutputTensor(tensor_desc, false);

if (op_desc->AddInputDesc(tensor_desc) != ge::GRAPH_SUCCESS) {
GELOGE(ge::FAILED, "AddInputDesc fail.");
REPORT_CALL_ERROR("E19999", "add inputdesc failed, op:%s", op_desc->GetName().c_str());
GELOGE(ge::FAILED, "[Add][InputDesc] fail, op:%s.", op_desc->GetName().c_str());
return ge::FAILED;
}
inputs.emplace_back(tensor_desc);
@@ -157,6 +162,8 @@ Status_t OpTaskGernerator(const char *op_type, const OpTensor_t *in_tensor, int
OpAttr *op_attr = CHECK_PARAM_OBJECT(OpAttr, attr);
for (const auto &it : op_attr->Attrs()) {
GE_IF_BOOL_EXEC(op_desc->SetAttr(it.first, it.second) != ge::SUCCESS, GELOGE(ge::FAILED, "SetAttr failed.");
REPORT_CALL_ERROR("E19999", "set attr:%s failed, op:%s",
it.first.c_str(), op_desc->GetName().c_str());
return ge::FAILED);
}
}


+ 60
- 56
ge/graph/build/graph_builder.cc View File

@@ -79,7 +79,8 @@ Status HandleSubgraphDataNode(NodePtr &src_node, OutDataAnchorPtr &src_out_ancho
if (!AttrUtils::GetInt(src_node->GetOpDesc(), ATTR_NAME_PARENT_NODE_INDEX, index)) {
REPORT_INNER_ERROR("E19999", "get attr:%s failed from node:%s",
ATTR_NAME_PARENT_NODE_INDEX.c_str(), src_node->GetName().c_str());
GELOGE(FAILED, "Get attr ATTR_NAME_PARENT_NODE_INDEX failed, node:%s.", src_node->GetName().c_str());
GELOGE(FAILED, "[Get][Attr] %s failed, node:%s.", ATTR_NAME_PARENT_NODE_INDEX.c_str(),
src_node->GetName().c_str());
return FAILED;
}
const NodePtr &parent_node = src_node->GetOwnerComputeGraph()->GetParentNode();
@@ -113,7 +114,8 @@ Status GraphBuilder::CalcOpParam(const ge::ComputeGraphPtr &graph) {
if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
REPORT_INNER_ERROR("E19999", "check gelib instance null, graph:%s",
graph->GetName().c_str());
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GraphBuilder: GE is not initialized");
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Check][GELib] GraphBuilder: GE is not initialized, graph:%s",
graph->GetName().c_str());
return GE_CLI_GE_NOT_INITIALIZED;
}

@@ -127,7 +129,7 @@ Status GraphBuilder::CalcOpParam(const ge::ComputeGraphPtr &graph) {
if (kernel_lib_name.empty()) {
REPORT_INNER_ERROR("E19999", "op kernel lib is empty in node:%s(%s)",
node_ptr->GetName().c_str(), node_ptr->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Get node:%s(%s) kernel lib failed.", node_ptr->GetName().c_str(),
GELOGE(INTERNAL_ERROR, "[Get][KernelLibName] of node:%s(%s) failed.", node_ptr->GetName().c_str(),
node_ptr->GetType().c_str());
return INTERNAL_ERROR;
}
@@ -137,7 +139,7 @@ Status GraphBuilder::CalcOpParam(const ge::ComputeGraphPtr &graph) {
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set node:%s(%s) inputDesc size failed",
node_ptr->GetName().c_str(), node_ptr->GetType().c_str());
GELOGE(ret, "Set node inputDesc size failed, node name is %s", node_ptr->GetName().c_str());
GELOGE(ret, "[Set][InputSize] to node:%s failed.", node_ptr->GetName().c_str());
return ret;
}

@@ -145,7 +147,7 @@ Status GraphBuilder::CalcOpParam(const ge::ComputeGraphPtr &graph) {
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call Calculate op:%s(%s) running param failed",
node_ptr->GetName().c_str(), node_ptr->GetType().c_str());
GELOGE(ret, "Calculate op running param failed, node name is %s", node_ptr->GetName().c_str());
GELOGE(ret, "[Call][Calculate] op running param failed, node name is %s", node_ptr->GetName().c_str());
return ret;
}
GE_CHK_STATUS_RET(AddOutputMemTypeForNode(node_ptr));
@@ -202,7 +204,7 @@ Status GraphBuilder::UpdateParentNodeOutputSize(const ge::ComputeGraphPtr &graph
Status GraphBuilder::Build(ComputeGraphPtr &comp_graph, GeRootModelPtr &ge_root_model_ptr, uint64_t session_id) {
if (comp_graph == nullptr) {
REPORT_INNER_ERROR("E19999", "check compute_graph nullptr, session_id:%lu", session_id);
GELOGE(GE_GRAPH_PARAM_NULLPTR, "Graph build comp_graph is null.");
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[Check][Param] comp_graph is null, session_id:%lu", session_id);
return GE_GRAPH_PARAM_NULLPTR;
}
ge_root_model_ptr = MakeShared<ge::GeRootModel>(comp_graph);
@@ -216,12 +218,13 @@ Status GraphBuilder::Build(ComputeGraphPtr &comp_graph, GeRootModelPtr &ge_root_
if (is_dynamic_shape || comp_graph->GetGraphUnknownFlag()) {
GE_CHK_STATUS_RET(
BuildForDynamicShapeGraph(comp_graph, ge_root_model_ptr, ge_model_ptr, session_id),
"Build for dynamic shape graph failed.");
"[Build][DynamicShapeGraph] failed, graph:%s, session id:%lu.", comp_graph->GetName().c_str(), session_id);
return SUCCESS;
}

GE_CHK_STATUS_RET(BuildForKnownShapeGraph(comp_graph, ge_model_ptr, session_id),
"Build for known shape graph failed.");
"[Build][KnownShapeGraph] failed, graph:%s, session id:%lu.",
comp_graph->GetName().c_str(), session_id);
ge_root_model_ptr->SetSubgraphInstanceNameToModel(comp_graph->GetName(), ge_model_ptr);
return SUCCESS;
}
@@ -229,28 +232,29 @@ Status GraphBuilder::Build(ComputeGraphPtr &comp_graph, GeRootModelPtr &ge_root_
Status GraphBuilder::BuildForKnownShapeGraph(ComputeGraphPtr &comp_graph,
GeModelPtr &ge_model_ptr, uint64_t session_id) {
if (ge::GetContext().GetHostExecFlag()) {
GE_CHK_STATUS_RET(BuildForHostCpuGraph(comp_graph, ge_model_ptr, session_id), "Build for host-cpu graph failed.");
GE_CHK_STATUS_RET(BuildForHostCpuGraph(comp_graph, ge_model_ptr, session_id),
"[Build][HostCpuGraph] failed, graph:%s, session id:%lu.",
comp_graph->GetName().c_str(), session_id);
return SUCCESS;
}

ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kPreBuild);
GELOGI("Begin to build known shape graph[%s].", comp_graph->GetName().c_str());
Status ret = SecondPartition(comp_graph);
GE_CHK_STATUS_RET(ret, "Graph[%s] second partition Failed.", comp_graph->GetName().c_str());
GE_CHK_STATUS_RET(ret, "[Call][SecondPartition] for Graph[%s] failed.", comp_graph->GetName().c_str());
auto subgraph_map = graph_partitioner_.GetSubGraphMap();

GE_TIMESTAMP_START(BuildSubgraph);
ge::ModelBuilder builder(session_id, comp_graph, subgraph_map, stream_max_parallel_num_, hcom_parallel_, build_mode_);
GE_DUMP(comp_graph, "BeforePreBuildModel");
GE_TIMESTAMP_START(PreBuildModel);
GE_CHK_STATUS_RET(builder.PreBuildModel(), "Graph[%s] builder PreBuildModel() return fail.",
GE_CHK_STATUS_RET(builder.PreBuildModel(), "[PreBuild][Model] failed, Graph[%s].",
comp_graph->GetName().c_str());
GE_TIMESTAMP_END(PreBuildModel, "GraphBuilder::PreBuildModel");

GE_DUMP(comp_graph, "AfterPreBuildModel");
GE_TIMESTAMP_START(CalcOpParam);
GE_CHK_STATUS_RET(CalcOpParam(comp_graph), "Graph[%s] builder CalcOpParam() return fail.",
comp_graph->GetName().c_str());
GE_CHK_STATUS_RET(CalcOpParam(comp_graph), "[Calc][OpParam] fail, Graph[%s].", comp_graph->GetName().c_str());
GE_TIMESTAMP_END(CalcOpParam, "GraphBuilder::CalcOpParam");
GE_DUMP(comp_graph, "AfterCalcOpParam");

@@ -259,7 +263,7 @@ Status GraphBuilder::BuildForKnownShapeGraph(ComputeGraphPtr &comp_graph,
return MEMALLOC_FAILED;
}
GE_TIMESTAMP_START(BuildModelForGetTask);
GE_CHK_STATUS_RET(builder.BuildModelForGetTask(*model_ptr), "Graph[%s] builder BuildModelForGetTask() return fail.",
GE_CHK_STATUS_RET(builder.BuildModelForGetTask(*model_ptr), "[Build][Model] ForGetTask fail, Graph[%s].",
comp_graph->GetName().c_str());
GE_TIMESTAMP_END(BuildModelForGetTask, "GraphBuilder::BuildModelForGetTask");
GE_DUMP(comp_graph, "AfterBuildModel");
@@ -270,7 +274,7 @@ Status GraphBuilder::BuildForKnownShapeGraph(ComputeGraphPtr &comp_graph,
GE_TIMESTAMP_END(GetTaskInfo, "GraphBuilder::GetTaskInfo");
GE_DUMP(comp_graph, "AfterGetTask");
if (ret != SUCCESS) {
GELOGE(ret, "Graph[%s] builder GetTaskInfo() return fail.", comp_graph->GetName().c_str());
GELOGE(ret, "[Get][TaskInfo] fail, Graph[%s].", comp_graph->GetName().c_str());
return ret;
}

@@ -280,7 +284,7 @@ Status GraphBuilder::BuildForKnownShapeGraph(ComputeGraphPtr &comp_graph,
return MEMALLOC_FAILED;
}
GE_CHK_STATUS_RET(builder.SaveDataToModel(*model_ptr, *ge_model_ptr),
"Graph[%s] builder SaveDataToModel() return fail.", comp_graph->GetName().c_str());
"[Save][Data] ToModel fail, Graph[%s].", comp_graph->GetName().c_str());
GELOGD("Success to build graph[%s] model.", comp_graph->GetName().c_str());
GE_TIMESTAMP_END(BuildSubgraph, "GraphBuilder::Build");
return SUCCESS;
@@ -315,7 +319,7 @@ Status GraphBuilder::SetConstantInputOffset(ComputeGraphPtr &comp_graph) {
if (weights.empty()) {
REPORT_INNER_ERROR("E19999", "check weights size of node %s(%s) is empty",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "weights size of node %s is empty", node->GetName().c_str());
GELOGE(FAILED, "[Check][Param] weights size of node %s is empty", node->GetName().c_str());
return FAILED;
}
GeTensorPtr weight = weights[0];
@@ -342,23 +346,21 @@ Status GraphBuilder::BuildForUnknownShapeGraph(ComputeGraphPtr &comp_graph, GeMo
ge::ModelBuilder builder(session_id, comp_graph, subgraph_map, stream_max_parallel_num_, hcom_parallel_, build_mode_);
GE_DUMP(comp_graph, "BeforePreBuildModel");
GE_TIMESTAMP_START(PreBuildModel);
GE_CHK_STATUS_RET(builder.PreBuildModel(), "Graph[%s] builder PreBuildModel() return fail.",
comp_graph->GetName().c_str());
GE_CHK_STATUS_RET(builder.PreBuildModel(), "[PreBuild][Model] fail, Graph[%s].", comp_graph->GetName().c_str());
GE_TIMESTAMP_END(PreBuildModel, "GraphBuilder::PreBuildModel");
GE_DUMP(comp_graph, "AfterPreBuildModel");

GE_TIMESTAMP_START(CalcOpParam);
GE_CHK_STATUS_RET(CalcOpParam(comp_graph), "Graph[%s] builder CalcOpParam() return fail.",
comp_graph->GetName().c_str());
GE_CHK_STATUS_RET(CalcOpParam(comp_graph), "[Calc][OpParam] fail, Graph[%s].", comp_graph->GetName().c_str());
GE_TIMESTAMP_END(CalcOpParam, "GraphBuilder::CalcOpParam");
GE_DUMP(comp_graph, "AfterCalcOpParam");

GE_TIMESTAMP_START(SetConstantInputOffset);
GE_CHK_STATUS_RET(SetConstantInputOffset(comp_graph),
"Graph[%s] failed to set constant input offset.", comp_graph->GetName().c_str());
"[Set][Offset] Graph[%s] failed to set constant input offset.", comp_graph->GetName().c_str());
GE_TIMESTAMP_END(SetConstantInputOffset, "GraphBuilder::SetConstantInputOffset");
GE_TIMESTAMP_START(MergeWeights);
GE_CHK_STATUS_RET(builder.MergeWeights(), "Graph[%s] failed to merge weights.", comp_graph->GetName().c_str());
GE_CHK_STATUS_RET(builder.MergeWeights(), "[Merge][Weights] failed for Graph[%s].", comp_graph->GetName().c_str());
GE_TIMESTAMP_END(MergeWeights, "GraphBuilder::MergeWeights");

ModelPtr model_ptr = MakeShared<ge::Model>();
@@ -367,7 +369,7 @@ Status GraphBuilder::BuildForUnknownShapeGraph(ComputeGraphPtr &comp_graph, GeMo
}
GE_TIMESTAMP_START(BuildModelForGetDynShapeTask);
GE_CHK_STATUS_RET(builder.BuildModelForGetDynShapeTask(*model_ptr),
"Graph[%s] builder BuildModelForGetDynShapeTask() return fail.", comp_graph->GetName().c_str());
"[Build][Model] ForGetDynShapeTask fail, Graph[%s].", comp_graph->GetName().c_str());
GE_TIMESTAMP_END(BuildModelForGetDynShapeTask, "GraphBuilder::BuildModelForGetDynShapeTask");
ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kTaskGenerate);
GE_TIMESTAMP_START(GetTaskInfo);
@@ -378,7 +380,7 @@ Status GraphBuilder::BuildForUnknownShapeGraph(ComputeGraphPtr &comp_graph, GeMo
GraphUtils::DumpGEGraph(comp_graph, "AfterGetTask");
GraphUtils::DumpGEGraphToOnnx(*comp_graph, "AfterGetTask");
if (ret != SUCCESS) {
GELOGE(ret, "Graph[%s] builder GetTaskInfo() return fail.", comp_graph->GetName().c_str());
GELOGE(ret, "[Get][TaskInfo] fail, Graph[%s].", comp_graph->GetName().c_str());
return ret;
}
ge_model_ptr = MakeShared<ge::GeModel>();
@@ -386,7 +388,7 @@ Status GraphBuilder::BuildForUnknownShapeGraph(ComputeGraphPtr &comp_graph, GeMo
return MEMALLOC_FAILED;
}
GE_CHK_STATUS_RET(builder.SaveDataToModel(*model_ptr, *ge_model_ptr),
"Graph[%s] builder SaveDataToModel() return fail.", comp_graph->GetName().c_str());
"[Save][Data] ToModel fail, Graph[%s].", comp_graph->GetName().c_str());
GELOGD("Success to build graph[%s] model.", comp_graph->GetName().c_str());
return SUCCESS;
}
@@ -433,8 +435,9 @@ Status GraphBuilder::MarkFpBpProfilingTaskAttr(ComputeGraphPtr &com_graph) {
GE_IF_BOOL_EXEC(TypeUtils::CheckUint64MulOverflow(i, kProfilingArStep),
REPORT_INNER_ERROR("E19999", "Multiply result is out of range when calc profiling ar log id "
"for node:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Multiply result is out of range.");
return FAILED);
GELOGE(FAILED, "[Check][Param] Multiply result is out of range, node:%s(%s)",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED);
int64_t log_id = i * kProfilingArStep + kProfilingArStartLogid;
(void)ge::AttrUtils::SetInt(op_desc, ATTR_NAME_INSERT_PROFILILNG_TASK_LOG_ID, log_id);
continue;
@@ -458,14 +461,14 @@ Status GraphBuilder::BuildForDynamicShapeGraph(ComputeGraphPtr &comp_graph,
GE_CHECK_NOTNULL(op_desc);
op_desc->SetStreamId(kInvalidStream);
if (node->GetType() == DATA) {
GE_CHK_STATUS_RET(CalcDynShapeRootGraphDataSize(op_desc), "Calc dynamic shape root graph data[%s] size failed.",
GE_CHK_STATUS_RET(CalcDynShapeRootGraphDataSize(op_desc), "[Calc][DynShapeRootGraphDataSize] failed, op:%s.",
op_desc->GetName().c_str());
}
}

// Set fp bp profiling task attr for graph
if (MarkFpBpProfilingTaskAttr(comp_graph) != SUCCESS) {
GELOGE(FAILED, "Set fp bp profiling task attr for graph.");
GELOGE(FAILED, "[Mark][TaskAttr]Set fp bp profiling task attr for graph:%s failed.", comp_graph->GetName().c_str());
return FAILED;
}

@@ -482,18 +485,20 @@ Status GraphBuilder::BuildForDynamicShapeGraph(ComputeGraphPtr &comp_graph,
if (sub_graph->GetGraphUnknownFlag()) {
// unknown shape build flow
GE_CHK_STATUS_RET(BuildForUnknownShapeGraph(sub_graph, ge_model_ptr, session_id),
"Build for unknown shape graph failed.");
"[Build][Graph] as unknown shape failed, session id:%lu.", session_id);
} else {
// reset functional subgraph parent graph as known subgraph
for (const auto &node : sub_graph->GetDirectNode()) {
for (const auto &sub_graph_name : node->GetOpDesc()->GetSubgraphInstanceNames()) {
auto sub_sub_graph = comp_graph->GetSubgraph(sub_graph_name);
GE_CHK_STATUS_RET(sub_graph->AddSubgraph(sub_sub_graph), "Failed add subgraph to known graph.");
GE_CHK_STATUS_RET(sub_graph->AddSubgraph(sub_sub_graph),
"[Add][SubGraph] %s to known graph:%s failed.", sub_sub_graph->GetName().c_str(),
sub_graph->GetName().c_str());
}
}
// known shape build flow
GE_CHK_STATUS_RET(BuildForKnownShapeGraph(sub_graph, ge_model_ptr, session_id),
"Build for known shape graph failed.");
"[Build][Graph] for known shape failed, session id:%lu.", session_id);
}
ge_root_model_ptr->SetSubgraphInstanceNameToModel(sub_graph->GetName(), ge_model_ptr);
}
@@ -510,19 +515,20 @@ Status GraphBuilder::GetTaskInfo(const ge::ModelBuilder &builder, const ModelPtr
int64_t memory_size = 0;
if (!AttrUtils::GetInt(model_ptr, ATTR_MODEL_MEMORY_SIZE, memory_size)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s fail in model", ATTR_MODEL_MEMORY_SIZE.c_str());
GELOGE(INTERNAL_ERROR, "Get memory size fail.");
GELOGE(INTERNAL_ERROR, "[Get][Attr] memory size fail, graph:%s, session id:%lu.", comp_graph->GetName().c_str(),
session_id);
return INTERNAL_ERROR;
}
int64_t p2p_memory_size = 0;
if (!AttrUtils::GetInt(model_ptr, ATTR_MODEL_P2P_MEMORY_SIZE, p2p_memory_size)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s fail in model", ATTR_MODEL_P2P_MEMORY_SIZE.c_str());
GELOGE(INTERNAL_ERROR, "Get p2p memory size fail.");
GELOGE(INTERNAL_ERROR, "[Get][Attr] %s fail in model", ATTR_MODEL_P2P_MEMORY_SIZE.c_str());
return INTERNAL_ERROR;
}
int64_t weight_size = 0;
if (!AttrUtils::GetInt(model_ptr, ATTR_MODEL_WEIGHT_SIZE, weight_size)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s fail in model", ATTR_MODEL_WEIGHT_SIZE.c_str());
GELOGE(INTERNAL_ERROR, "Get weight memory size fail.");
GELOGE(INTERNAL_ERROR, "[Get][Attr] %s fail in model", ATTR_MODEL_WEIGHT_SIZE.c_str());
return INTERNAL_ERROR;
}

@@ -548,20 +554,20 @@ Status GraphBuilder::GetTaskInfo(const ge::ModelBuilder &builder, const ModelPtr
Status ret = run_context.InitMemInfo(get_mem_base, memory_size, mem_type_to_data_mem_base, mem_type_to_data_mem_size,
get_weight_mem_base, weight_size);
if (ret != SUCCESS) {
GELOGE(ret, "task_generator init mem info fail.");
GELOGE(ret, "[Init][MemInfo] fail, ret:%d.", ret);
return ret;
}
auto weight_buffer = builder.GetWeightBuffer();
ret = run_context.CreateRunContext(*model_ptr, comp_graph, weight_buffer, session_id);
if (ret != SUCCESS) {
GELOGE(ret, "runContext create run context fail.");
GELOGE(ret, "[Create][RunContext] fail, ret:%d, graph:%s.", ret, comp_graph->GetName().c_str());
return ret;
}

StreamGraphOptimizer stream_optimizer;
ret = stream_optimizer.OptimizeStreamedSubGraph(comp_graph, subgraph_map, run_context.GetRunContext());
if (ret != SUCCESS) {
GELOGE(ret, "Optimize streamed subGraph fail.");
GELOGE(ret, "[Optimize][StreamedSubGraph] fail, graph:%s.", comp_graph->GetName().c_str());
return ret;
}
GE_DUMP(comp_graph, "AfterOptimizeStreamedSubGraph");
@@ -578,13 +584,13 @@ Status GraphBuilder::SetInputSize(const ge::NodePtr &node_ptr) {
if (node_ptr->GetType() == DATA) {
bool is_unknown_shape = false;
GE_CHK_STATUS_RET(ge::NodeUtils::GetNodeUnknownShapeStatus(*node_ptr, is_unknown_shape),
"Get data node[%s] shape status failed!", node_ptr->GetName().c_str());
"[Get][Status] of data node[%s] shape failed!", node_ptr->GetName().c_str());
if (is_unknown_shape) {
GELOGD("data node: %s is unknown shape, do not set input size!", node_ptr->GetName().c_str());
return SUCCESS;
}
if (UpdateDataInputSize(node_ptr) != SUCCESS) {
GELOGE(FAILED, "Update data input size failed.");
GELOGE(FAILED, "[Update][Data] input size failed, node:%s.", node_ptr->GetName().c_str());
return FAILED;
}
}
@@ -632,7 +638,7 @@ Status GraphBuilder::UpdateDataInputSize(const ge::NodePtr &node_ptr) {
const auto &op_desc = node_ptr->GetOpDesc();
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "check op_desc is nullptr");
GELOGE(FAILED, "Op desc is nullptr.");
GELOGE(FAILED, "[Check][Param] Op desc is nullptr.");
return FAILED;
}
// data op only has one output anchor
@@ -651,7 +657,7 @@ Status GraphBuilder::UpdateDataInputSize(const ge::NodePtr &node_ptr) {
if (graph_status != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get tensor size in bytes failed for op:%s(%s) index:0",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Get tensor size in bytes failed.");
GELOGE(FAILED, "[Get][TensorSize] in bytes failed, op:%s.", op_desc->GetName().c_str());
return FAILED;
}
// data op only has one input anchor
@@ -660,7 +666,7 @@ Status GraphBuilder::UpdateDataInputSize(const ge::NodePtr &node_ptr) {
if (op_desc->UpdateInputDesc(0, input_desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update input desc size failed for op:%s(%s) index:0",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Update input desc size failed.");
GELOGE(FAILED, "[Update][InputDesc] failed, op:%s.", op_desc->GetName().c_str());
return FAILED;
}
}
@@ -690,7 +696,7 @@ Status GraphBuilder::CalcDynShapeRootGraphDataSize(const ge::OpDescPtr &op_desc)
if (graph_status != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get tensor size in bytes failed for op:%s(%s) index:0 ",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Get tensor size in bytes failed.");
GELOGE(FAILED, "[Get][TensorSize] in bytes failed, op:%s.", op_desc->GetName().c_str());
return FAILED;
}

@@ -699,7 +705,7 @@ Status GraphBuilder::CalcDynShapeRootGraphDataSize(const ge::OpDescPtr &op_desc)
if (op_desc->UpdateOutputDesc(0, output_desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update output desc size failed for op:%s(%s) index:0 ",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Update dynamic shape graph data output desc size failed.");
GELOGE(FAILED, "[Update][OutputDesc] for dynamic shape graph data failed, op:%s.", op_desc->GetName().c_str());
return FAILED;
}
}
@@ -710,15 +716,13 @@ Status GraphBuilder::SecondPartition(ge::ComputeGraphPtr &comp_graph) {
GE_TIMESTAMP_START(GraphPartition2);
auto ret = graph_partitioner_.Partition(comp_graph, GraphPartitioner::kSecondPartitioning);
if (ret != SUCCESS) {
GELOGE(ret, "Graph partition Failed");
GELOGE(ret, "[Call][Partition] for Graph Failed");
return ret;
}
GE_CHK_STATUS_RET(ret, "Graph partition Failed.");
const auto &graph_2_subgraphlist = graph_partitioner_.GetSubGraphMap();
if (graph_2_subgraphlist.find(comp_graph) == graph_2_subgraphlist.end()) {
REPORT_INNER_ERROR("E19999", "find subgraphlis in graph:%s failed",
comp_graph->GetName().c_str());
GELOGE(FAILED, "Find subgraph failed.");
REPORT_INNER_ERROR("E19999", "find subgraphlis in graph:%s failed", comp_graph->GetName().c_str());
GELOGE(FAILED, "[Check][Param] Find subgraph graph:%s failed.", comp_graph->GetName().c_str());
return FAILED;
}
GE_TIMESTAMP_END(GraphPartition2, "GraphPartitioner::Partition2");
@@ -749,18 +753,18 @@ Status GraphBuilder::AddOutputMemTypeForNode(const NodePtr &node) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s for node:%s(%s) out_index:%u failed",
ATTR_OUTPUT_MEMORY_TYPE.c_str(), src_desc->GetName().c_str(), src_desc->GetType().c_str(),
src_out_anchor->GetIdx());
GELOGE(INTERNAL_ERROR, "Set out_memory_type attr for [%s:%d] failed.", src_desc->GetName().c_str(),
GELOGE(INTERNAL_ERROR, "[Set][Attr] out_memory_type for [%s:%d] failed.", src_desc->GetName().c_str(),
src_out_anchor->GetIdx());
return INTERNAL_ERROR;
}
switch (TransferNodeType(src_node)) {
case kSubgraphNode:
GE_CHK_STATUS_RET(HandleSubgraphNode(src_node, src_out_anchor), "Handle subgraph node %s failed",
src_node->GetName().c_str());
GE_CHK_STATUS_RET(HandleSubgraphNode(src_node, src_out_anchor),
"[Handle][Node] %s in subgraph failed", src_node->GetName().c_str());
break;
case kSubgraphData:
GE_CHK_STATUS_RET(HandleSubgraphDataNode(src_node, src_out_anchor), "Handle Data node %s in subgraph failed",
src_node->GetName().c_str());
GE_CHK_STATUS_RET(HandleSubgraphDataNode(src_node, src_out_anchor),
"[Handle][DataNode] %s in subgraph failed", src_node->GetName().c_str());
break;
case kOthers:
default:


+ 6
- 6
ge/graph/build/label_allocator.cc View File

@@ -29,7 +29,7 @@ LabelAllocator::LabelAllocator(const ComputeGraphPtr &graph) : compute_graph_(gr
Status LabelAllocator::AssignFunctionalLabels() {
if (compute_graph_ == nullptr) {
REPORT_INNER_ERROR("E19999", "check param compute_graph nullptr");
GELOGE(INTERNAL_ERROR, "ComputeGraph not set, Assign labels failed.");
GELOGE(INTERNAL_ERROR, "[Check][Param] ComputeGraph not set, Assign labels failed.");
return INTERNAL_ERROR;
}

@@ -49,14 +49,14 @@ Status LabelAllocator::AssignFunctionalLabels() {
if (maker == nullptr) {
REPORT_CALL_ERROR("E19999", "Check Node:%s(%s) label maker not registed",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Node: %s label maker not registed.", node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "[Create][LabelMaker] Node: %s label maker not registed.", node->GetType().c_str());
return INTERNAL_ERROR;
}

if (maker->Run(label_index) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Node:%s(%s) run label maker failed",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Node: %s run label maker failed.", node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "[Call][Run] Node: %s run label maker failed.", node->GetType().c_str());
return INTERNAL_ERROR;
}
}
@@ -69,7 +69,7 @@ Status LabelAllocator::AssignFunctionalLabels() {
bool LabelAllocator::CollectFunctionalNode(ComputeGraphPtr &graph, std::set<NodePtr> &functional_nodes) {
if (graph == nullptr) {
REPORT_INNER_ERROR("E19999", "check param compute_graph nullptr");
GELOGE(INTERNAL_ERROR, "Sub ComputeGraph is null.");
GELOGE(INTERNAL_ERROR, "[Check][Param] Sub ComputeGraph is null.");
return false;
}

@@ -82,7 +82,7 @@ bool LabelAllocator::CollectFunctionalNode(ComputeGraphPtr &graph, std::set<Node
if (func_node == nullptr) {
REPORT_INNER_ERROR("E19999", "Parent node not set in node:%s(%s), graph:%s",
func_node->GetName().c_str(), func_node->GetType().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Parent functional node not set: %s.", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Get][Node] Parent functional node not set: %s.", graph->GetName().c_str());
return false;
}

@@ -90,7 +90,7 @@ bool LabelAllocator::CollectFunctionalNode(ComputeGraphPtr &graph, std::set<Node
if (owner_graph == nullptr) {
REPORT_INNER_ERROR("E19999", "ComputeGraph owner not set in node:%s(%s), graph:%s",
func_node->GetName().c_str(), func_node->GetType().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "ComputeGraph owner not set: %s.", func_node->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Get][Graph] ComputeGraph owner not set: %s.", func_node->GetName().c_str());
return false;
}



+ 11
- 11
ge/graph/build/logical_stream_allocator.cc View File

@@ -322,7 +322,7 @@ Status SingleStreamPass::Run(ComputeGraphPtr graph, const vector<SubgraphPtr> &s
if (!stream_label.empty()) {
REPORT_INNER_ERROR("E19999", "Stream labels are not supported in SingleStream mode "
"(subgraph: %s, stream label: %s)", subgraph->name.c_str(), stream_label.c_str());
GELOGE(INTERNAL_ERROR, "Stream labels are not supported (subgraph: %s, stream label: %s).",
GELOGE(INTERNAL_ERROR, "[Get][Label] Stream labels are not supported (subgraph: %s, stream label: %s).",
subgraph->name.c_str(), stream_label.c_str());
return INTERNAL_ERROR;
}
@@ -341,8 +341,8 @@ Status NodeStreamUpdatePass::Run(ComputeGraphPtr graph, const vector<SubgraphPtr
if (!IsEngineSkip(*subgraph) && !HasAssignedStream(*subgraph)) {
REPORT_INNER_ERROR("E19999", "Subgraph %s has not yet been assigned a stream (engine: %s)",
subgraph->name.c_str(), engine_name.c_str());
GELOGE(INTERNAL_ERROR, "Subgraph %s has not yet been assigned a stream (engine: %s).", subgraph->name.c_str(),
engine_name.c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] Subgraph %s has not yet been assigned a stream (engine: %s).",
subgraph->name.c_str(), engine_name.c_str());
return INTERNAL_ERROR;
} else {
GELOGI("[Assign][StreamId] %ld for Subgraph %s (engine: %s).", subgraph->stream_id, subgraph->name.c_str(),
@@ -402,7 +402,7 @@ Status UpdateForParallelGroupPass::Run(ComputeGraphPtr graph, const vector<Subgr
for (const auto &op_desc : itr.second) {
std::string group_name;
if (!AttrUtils::GetStr(op_desc, ATTR_NAME_PARALLEL_GROUP, group_name)) {
GELOGE(FAILED, "[GetAttr][OpDesc]Get node %s ATTR_NAME_PARALLEL_GROUP failed.", op_desc->GetName().c_str());
GELOGE(FAILED, "[Get][Attr] ATTR_NAME_PARALLEL_GROUP of node %s failed.", op_desc->GetName().c_str());
REPORT_INNER_ERROR("E19999", "Get node %s ATTR_NAME_PARALLEL_GROUP failed.", op_desc->GetName().c_str());
return FAILED;
}
@@ -606,7 +606,7 @@ Status LogicalStreamAllocator::Assign(const ComputeGraphPtr &root_graph, const G

Status status = DoAssign(root_graph, subgraph_map, engine_confs);
if (status != SUCCESS) {
GELOGE(status, "Assign streams failed.");
GELOGE(status, "[Assign][Streams] failed, graph:%s.", root_graph->GetName().c_str());
return status;
}

@@ -614,7 +614,7 @@ Status LogicalStreamAllocator::Assign(const ComputeGraphPtr &root_graph, const G
for (const ComputeGraphPtr &subgraph : subgraphs) {
Status status = DoAssign(subgraph, subgraph_map, engine_confs);
if (status != SUCCESS) {
GELOGE(status, "Assign streams failed.");
GELOGE(status, "[Assign][Streams] failed, graph:%s.", subgraph->GetName().c_str());
return status;
}
}
@@ -642,7 +642,7 @@ Status LogicalStreamAllocator::DoAssign(const ComputeGraphPtr &graph, const Grap
if (iter == subgraph_map.end()) {
REPORT_INNER_ERROR("E19999", "Graph %s not found in subgraph_map when do logical stream assign ",
graph->GetName().c_str());
GELOGE(FAILED, "Graph %s not found.", graph->GetName().c_str());
GELOGE(FAILED, "[Check][Param] Graph %s not found.", graph->GetName().c_str());
return FAILED;
}

@@ -652,7 +652,7 @@ Status LogicalStreamAllocator::DoAssign(const ComputeGraphPtr &graph, const Grap
Status status = ConvertSubgraphs(subgraph_info_list, engine_confs, subgraphs);
GE_TIMESTAMP_END(ConvertSubgraphs, "GraphBuilder::AssignStreamConvertSubgraphs");
if (status != SUCCESS) {
GELOGE(status, "Create subgraphs failed.");
GELOGE(status, "[Convert][SubGraphs] failed.");
return status;
}

@@ -683,8 +683,8 @@ Status LogicalStreamAllocator::ConvertSubgraphs(const vector<SubGraphInfoPtr> &s
if ((engine_conf_iter == engine_confs.end()) || (engine_conf_iter->second == nullptr)) {
REPORT_INNER_ERROR("E19999", "Engine conf of subgraph %s not found (engine name: %s)",
subgraph_name.c_str(), engine_name.c_str());
GELOGE(INTERNAL_ERROR, "Engine conf of subgraph %s not found (engine name: %s).", subgraph_name.c_str(),
engine_name.c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] Engine conf of subgraph %s not found (engine name: %s).",
subgraph_name.c_str(), engine_name.c_str());

return INTERNAL_ERROR;
}
@@ -731,7 +731,7 @@ Status LogicalStreamAllocator::RunPasses(const ComputeGraphPtr &graph, const vec
GELOGD("[Show][Status]Stream pass %s return NOT_CHANGED.", pass->GetName().c_str());
} else {
REPORT_CALL_ERROR("E19999", "Stream pass %s run failed.", pass->GetName().c_str());
GELOGE(status, "Stream pass %s failed.", pass->GetName().c_str());
GELOGE(status, "[Call][Run] Stream pass %s failed.", pass->GetName().c_str());
return status;
}
}


+ 120
- 60
ge/graph/build/memory/block_mem_assigner.cc View File

@@ -239,6 +239,10 @@ bool MemoryBlock::IsSameBatchLabel() {
return all_same_label;
}

bool MemoryBlock::CanReuse(int32_t thread_scope_id) const {
return (thread_scope_id_.find(thread_scope_id) == thread_scope_id_.end());
}

bool CanNotLifeReuse(MemoryBlock *block) {
if ((block == nullptr) || !block->reuse_mem_ || block->deleted_block_) {
return true;
@@ -283,6 +287,14 @@ void MemoryBlock::AddLifeReuseBlock(MemoryBlock *block, DependStreamLife &total_
if (CanNotLifeReuse(this) || CanNotLifeReuse(block) || (batch_label_ != block->batch_label_)) {
return;
}

// not same thread scode id can reuse
for (auto thread_scope_id : ThreadScopeId()) {
if (!block->CanReuse(thread_scope_id)) {
return;
}
}

if (block->continuous_block_) {
AddContinuousLifeReuseBlock(block, total_node_depend_stream_life);
return;
@@ -431,7 +443,7 @@ void SetLastUsedInputMemAttr(NodePtr &node, int input_index) {
auto node_op_desc = node->GetOpDesc();
if (node_op_desc != nullptr) {
auto input_desc = node_op_desc->MutableInputDesc(input_index);
if (!ge::AttrUtils::SetInt(*input_desc, ATTR_NAME_IS_END_OF_INPUTMEM_LIFECYCLE, true)) {
if (!ge::AttrUtils::SetBool(*input_desc, ATTR_NAME_IS_END_OF_INPUTMEM_LIFECYCLE, true)) {
GELOGW("Set %s input[%d] ATTR_NAME_IS_END_OF_INPUTMEM_LIFECYCLE to true failed.", node_op_desc->GetName().c_str(),
input_index);
return;
@@ -488,6 +500,7 @@ string MemoryBlock::String() {
ss << "Block size: " << Size() << " from " << HeadOffset() << " to " << TailOffset() << " ";
ss << "real_size_list: " << ToString(real_size_list_) << " ";
ss << "ref_count: " << ref_count_ << " ";
ss << "reuse_mem_: " << reuse_mem_ << " ";
ss << "members: ";
for (auto x : NodeTypeIndexList()) {
ss << "__node: " << ToString(x) << " ";
@@ -501,8 +514,8 @@ string MemoryBlock::String() {

BlockMemAssigner::BlockMemAssigner(ComputeGraphPtr compute_graph, const map<string, string> &anchor_to_symbol,
const map<string, list<NodeIndexIO>> &symbol_to_anchors)
: mem_offset_(0), p2p_mem_offset_(0), compute_graph_(std::move(compute_graph)),
symbol_to_anchors_(symbol_to_anchors), anchor_to_symbol_(anchor_to_symbol), life_time_(0) {}
: compute_graph_(std::move(compute_graph)), symbol_to_anchors_(symbol_to_anchors),
anchor_to_symbol_(anchor_to_symbol), life_time_(0) {}

BlockMemAssigner::~BlockMemAssigner() {
GELOGD("[Destruct][BlockMemAssigner]blocks_store_ size : %lu", blocks_store_.size());
@@ -659,7 +672,12 @@ bool IsDirectOutputNode(const NodePtr &node, int idx) {
return false;
}

bool CanReuseBlock(size_t continuous_life_begin, const MemoryBlock &reusable_block, size_t block_size) {
bool CanReuseBlock(int32_t thread_scope_id, size_t continuous_life_begin, const MemoryBlock &reusable_block,
size_t block_size) {
if (!reusable_block.CanReuse(thread_scope_id)) {
return false;
}

bool can_reuse = false;
if (reusable_block.Size() == block_size) {
// in some continuous input case, continuous first input node's is not same as topo first node.
@@ -781,7 +799,8 @@ bool IsContinuousInputNodeMaxLife(const NodePtr &n, uint32_t out_index) {
}
auto peer_in_node_desc = peer_in_anchor->GetOwnerNode()->GetOpDesc();
GE_IF_BOOL_EXEC(peer_in_node_desc == nullptr,
GELOGE(FAILED, "Node[%s] output[%u] peer in node desc is null.", n->GetName().c_str(), out_index);
GELOGE(FAILED, "[Get][OpDesc] Node[%s] output[%u] peer in node desc is null.",
n->GetName().c_str(), out_index);
return false;);

if(peer_in_node_desc->GetId() > max_node_life_time) {
@@ -1057,7 +1076,8 @@ void BlockMemAssigner::UpdateOpTensorMemType(std::list<NodeIndexIO> node_index_i

bool BlockMemAssigner::IsContinuousOutput(const NodePtr &n) {
if (n == nullptr) {
GELOGE(FAILED, "Node is null.");
REPORT_INNER_ERROR("E19999", "param n is nullptr, check invalid.");
GELOGE(FAILED, "[Check][Param] Node is null.");
return false;
}

@@ -1065,7 +1085,8 @@ bool BlockMemAssigner::IsContinuousOutput(const NodePtr &n) {
bool is_output_continuous = false;
auto node_desc = n->GetOpDesc();
if (node_desc == nullptr) {
GELOGE(FAILED, "Node[%s] nodedesc is null.", n->GetName().c_str());
REPORT_INNER_ERROR("E19999", "param node:%s opdesc is nullptr, check invalid.", n->GetName().c_str());
GELOGE(FAILED, "[Get][OpDesc] Node[%s] nodedesc is null.", n->GetName().c_str());
return false;
}

@@ -1103,7 +1124,7 @@ bool BlockMemAssigner::IsZeroCopyBlock(const NodePtr &node, bool continuous) {
MemoryBlock *BlockMemAssigner::ApplyMemory(size_t block_size, size_t real_size, size_t no_align_size,
OpMemoryType mem_type, const NodePtr &n, uint32_t out_index,
const vector<bool> &workspace_reuse_flag, const bool is_op_reuse_mem,
const bool continuous, int64_t memory_type) {
const bool continuous, uint64_t memory_type) {
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(
n == nullptr,
REPORT_INNER_ERROR("E19999", "Input parameter n(type:node_ptr) is null, apply memory failed");
@@ -1122,6 +1143,8 @@ MemoryBlock *BlockMemAssigner::ApplyMemory(size_t block_size, size_t real_size,
}

bool is_reuse_memory = false;
int32_t thread_scope_id = kInvalidThreadScopeId;
(void)ge::AttrUtils::GetInt(node_op_desc, ATTR_NAME_THREAD_SCOPE_ID, thread_scope_id);
if (ge_disable_reuse_mem_env_ != "1") {
bool reuse_mem_flag = (mem_type == kOutput) ? IsPreReuse(n, out_index) :
!((workspace_reuse_flag.size() > out_index) && !workspace_reuse_flag[out_index]);
@@ -1141,8 +1164,8 @@ MemoryBlock *BlockMemAssigner::ApplyMemory(size_t block_size, size_t real_size,
GE_IF_BOOL_EXEC(reusable_block->batch_label_ != batch_label, continue);

// A node can reuse blocks of the same stream and preorder streams
if (CanReuseBlock(continuous_life_begin_, *reusable_block, block_size)) {
reusable_block->AddNodeTypeIndex({n, mem_type, out_index, false, continuous_life_begin_},
if (CanReuseBlock(thread_scope_id, continuous_life_begin_, *reusable_block, block_size)) {
reusable_block->AddNodeTypeIndex({n, mem_type, out_index, false, continuous_life_begin_, thread_scope_id},
real_size, no_align_size);
if (mem_type == kOutput) {
auto iter = anchor_to_symbol_.find(NodeIndexIO(n, out_index, kOut).ToString());
@@ -1168,7 +1191,8 @@ MemoryBlock *BlockMemAssigner::ApplyMemory(size_t block_size, size_t real_size,

// Data and netoutput need zero copy block
block->is_zero_copy_ = IsZeroCopyBlock(n, continuous);
block->AddNodeTypeIndex({n, mem_type, out_index, false, continuous_life_begin_}, real_size, no_align_size);
block->AddNodeTypeIndex({n, mem_type, out_index, false, continuous_life_begin_, thread_scope_id},
real_size, no_align_size);
block->stream_id_ = node_op_desc->GetStreamId();
block->continuous_block_ = continuous;
block->batch_label_ = batch_label;
@@ -1430,7 +1454,9 @@ MemoryBlock *BlockMemAssigner::ApplyOutMemory(const NodePtr &n, uint32_t index,
auto op_desc = owner_node->GetOpDesc();
GE_IF_BOOL_EXEC(op_desc == nullptr, continue);
Params *instance = Params::Instance();
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(instance == nullptr, return nullptr, "Params instance is nullptr.");
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(instance == nullptr,
REPORT_INNER_ERROR("E19999", "Params instance is nullptr.");
return nullptr, "[Get][Instance] Params instance is nullptr.");
if (!((instance->GetTarget() == TARGET_TYPE_TINY) && (op_desc->GetType() == NETOUTPUT))) {
out_count++;
}
@@ -1442,7 +1468,9 @@ MemoryBlock *BlockMemAssigner::ApplyOutMemory(const NodePtr &n, uint32_t index,

bool IsOutputBlock(const ge::InDataAnchorPtr &in_data_anchor) {
auto peer_out_anchor = in_data_anchor->GetPeerOutAnchor();
GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, GELOGE(FAILED, "Peer out anchor is nullptr."); return false);
GE_IF_BOOL_EXEC(peer_out_anchor == nullptr,
REPORT_INNER_ERROR("E19999", "Peer out anchor is nullptr.");
GELOGE(FAILED, "[Check][Param] Peer out anchor is nullptr."); return false);
auto src = peer_out_anchor->GetOwnerNode();
int32_t index = peer_out_anchor->GetIdx();
auto iter = GetLocalOmgContext().out_nodes_map.find(src->GetName());
@@ -1491,10 +1519,13 @@ bool IsKnownSubgraphData(const NodePtr &node) {

void BlockMemAssigner::ReleaseMemory(MemoryBlock *to_release, vector<MemoryBlock *> &reusable_memory,
bool same_stream) {
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(to_release == nullptr, return, "Input parameter to_release is null.");
GE_CHK_TRUE_EXEC_INFO(to_release->ref_count_ <= 0, return, "Release memory");
GE_CHK_TRUE_EXEC_INFO(!to_release->reuse_mem_, return, "doesn't reuse memory");
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(to_release == nullptr,
return, "[Check][Param] Input parameter to_release is null.");
GE_CHK_TRUE_EXEC_INFO(to_release->ref_count_ <= 0,
return, "[Check][Param] to_release->ref_count_ must greater than 0");
GE_CHK_TRUE_EXEC_INFO(!to_release->reuse_mem_, return, "[Check][Param] doesn't reuse memory");
--to_release->ref_count_;
GE_CHK_TRUE_EXEC_INFO(!to_release->reuse_mem_, return, "doesn't reuse memory");
if (!same_stream) {
to_release->same_stream_ = false;
}
@@ -1593,13 +1624,13 @@ void CheckAndGetOpReuseEnv(const string &env, vector<string> &env_vec, bool &op_
string env_str;
env_str = string(env);
if (env_str.size() > kReuseMaxCharNum) {
GELOGE(FAILED, "The OP_NO_REUSE_MEM has more than %d characters.", kReuseMaxCharNum);
GELOGE(FAILED, "[Check][Param] The OP_NO_REUSE_MEM has more than %d characters.", kReuseMaxCharNum);
return;
}

SplitStringByComma(env_str, env_vec);
if (env_vec.size() > kReuseMaxOpNum) {
GELOGE(FAILED, "The OP_NO_REUSE_MEM has more than %d nodes.", kReuseMaxOpNum);
GELOGE(FAILED, "[Check][Param] The OP_NO_REUSE_MEM has more than %d nodes.", kReuseMaxOpNum);
return;
}

@@ -1794,8 +1825,8 @@ void BlockMemAssigner::AssignMemoryWithReuse(vector<int64_t> &ranges) {
zero_memory_list_.emplace_back(n, kWorkspace, static_cast<uint32_t>(i), false);
continue;
}
int64_t memory_type = RT_MEMORY_HBM;
if (!GetWorkSpaceMemoryType(n, i, memory_type)) {
uint64_t memory_type = RT_MEMORY_HBM;
if (!GetWorkSpaceMemoryType(n, i, memory_type, workspace_reuse_flag)) {
GELOGW("Get workspace memory type failed.");
return;
}
@@ -1830,7 +1861,7 @@ void BlockMemAssigner::AssignMemoryWithReuse(vector<int64_t> &ranges) {
}

void BlockMemAssigner::CheckWorkspaceReuse(const vector<bool> &workspace_reuse_flag, uint32_t index, int64_t stream_id,
MemoryBlock *mem_block, int64_t memory_type) {
MemoryBlock *mem_block, uint64_t memory_type) {
bool reuse_mem_flag =
((workspace_reuse_flag.size() > index) && (workspace_reuse_flag[index] == false)) ? false : true;
if (reuse_mem_flag) {
@@ -1840,7 +1871,9 @@ void BlockMemAssigner::CheckWorkspaceReuse(const vector<bool> &workspace_reuse_f

void BlockMemAssigner::GetNodeWorkSpaceSize(const NodePtr &node, vector<int64_t> &workspace_memory,
int64_t &total_size) {
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(node->GetOpDesc() == nullptr, return, "Op desc is null.");
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(node->GetOpDesc() == nullptr,
REPORT_INNER_ERROR("E19999", "param node opdesc is nullptr, check invalid.");
return, "[Check][Param] Op desc is null.");
vector<int64_t> workspace_byte_nums = node->GetOpDesc()->GetWorkspaceBytes();

GELOGD("node[%s] size:%zu", node->GetOpDesc()->GetName().c_str(), workspace_byte_nums.size());
@@ -1960,24 +1993,29 @@ void BlockMemAssigner::ReuseBlocksByLifeTime(size_t range_size) {
}
}

void AddBlockMemOffset(size_t &mem_offset, size_t &p2p_mem_offset, MemoryBlock &block) {
if (block.memory_type_ == RT_MEMORY_HBM) {
if (block.first_continuous_block_) {
mem_offset += MEM_ALIGN_SIZE;
}
block.Resize();
block.SetHeadOffset(mem_offset);
mem_offset += block.Size();
block.SetTailOffset(mem_offset - 1);
} else if (block.memory_type_ == RT_MEMORY_P2P_DDR) {
if (block.first_continuous_block_) {
p2p_mem_offset += MEM_ALIGN_SIZE;
void AddBlockMemOffset(std::map<uint64_t, size_t> &mem_offsets, MemoryBlock &block) {
auto it = mem_offsets.find(block.memory_type_);
if (it == mem_offsets.end()) {
auto result = mem_offsets.insert(std::pair<int64_t, size_t>(block.memory_type_, 0));
// Insert failure is unlikely
if (!result.second) {
return;
}
block.Resize();
block.SetHeadOffset(p2p_mem_offset);
p2p_mem_offset += block.Size();
block.SetTailOffset(p2p_mem_offset - 1);
it = result.first;
}

if (it == mem_offsets.end()) {
return;
}

auto &mem_offset = it->second;
if (block.first_continuous_block_) {
mem_offset += MEM_ALIGN_SIZE;
}
block.Resize();
block.SetHeadOffset(mem_offset);
mem_offset += block.Size();
block.SetTailOffset(mem_offset - 1);
}

bool DynamicBatchBlockReuse(MemoryBlock &block) {
@@ -2004,27 +2042,27 @@ void BlockMemAssigner::ResizeDynamicBatchBlocks() {
}
}

size_t max_mem_offset = mem_offset_;
size_t max_p2p_mem_offset = p2p_mem_offset_;
std::map<uint64_t, size_t> max_mem_offsets = mem_offsets_;
for (auto &batch_blocks : dynamic_batch_blocks) {
size_t mem_offset = mem_offset_;
size_t p2p_mem_offset = p2p_mem_offset_;
std::map<uint64_t, size_t> mem_offsets = mem_offsets_;
for (auto block : batch_blocks.second) {
if (block == nullptr || block->deleted_block_ || block->is_zero_copy_) {
continue;
}
AddBlockMemOffset(mem_offset, p2p_mem_offset, *block);
}
if (mem_offset > max_mem_offset) {
max_mem_offset = mem_offset;
AddBlockMemOffset(mem_offsets, *block);
}
if (p2p_mem_offset > max_p2p_mem_offset) {
max_p2p_mem_offset = p2p_mem_offset;

for (auto &it : mem_offsets) {
auto itmax = max_mem_offsets.find(it.first);
if (itmax == max_mem_offsets.end()) {
max_mem_offsets[it.first] = it.second;
} else if (it.second > itmax->second) {
itmax->second = it.second;
}
GELOGI("Batch:%s memory type:%ld offset:%zu", batch_blocks.first.c_str(), it.first, it.second);
}
GELOGI("Batch[%s] offset[%zu] p2p_offset[%zu]", batch_blocks.first.c_str(), mem_offset, p2p_mem_offset);
}
mem_offset_ = max_mem_offset;
p2p_mem_offset_ = max_p2p_mem_offset;
mem_offsets_ = max_mem_offsets;
}

///
@@ -2042,11 +2080,13 @@ void BlockMemAssigner::ResizeMemoryBlocks() {
continue;
}

AddBlockMemOffset(mem_offset_, p2p_mem_offset_, *memory_block);
AddBlockMemOffset(mem_offsets_, *memory_block);
}
ResizeDynamicBatchBlocks();
GELOGI("mem_offset_ exclude zero_copy_memory is %zu, p2p_mem_offset_ exclude zero_copy_memory is %zu,"
"theory_min_memory_size %zu", mem_offset_, p2p_mem_offset_, theory_min_memory_size_);
for (auto it : mem_offsets_) {
GELOGI("Memory type:%ld mem_offset exclude zero_copy_memory:%zu, theory_min_memory_size:%zu", it.first, it.second,
theory_min_memory_size_);
}
}

///
@@ -2062,7 +2102,13 @@ void SetOffsetSize(const NodeTypeIndex &node_type, const MemoryBlock *block,
size_t real_size, size_t no_align_size, int32_t child_block_level) {
ge::OpDescPtr op_desc = node_type.node->GetOpDesc();
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(op_desc == nullptr, return, "op_desc is null.");
string graph_name = node_type.node->GetOwnerComputeGraph()->GetName();
auto owner_graph = node_type.node->GetOwnerComputeGraph();
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(owner_graph == nullptr, return, "owner_graph is null.");
string graph_name = owner_graph->GetName();
if (owner_graph->GetParentGraph() != nullptr) {
graph_name = owner_graph->GetParentGraph()->GetName();
}

vector<int64_t> memorys_type;
int64_t offset = block->HeadOffset();
size_t end = node_type.life_time_end;
@@ -2108,12 +2154,12 @@ void SetOffsetSize(const NodeTypeIndex &node_type, const MemoryBlock *block,
op_desc->SetWorkspace(workspace_list);
}
GELOGI("[IMAS]Set %s name[%s] optype[%s] %s[%u] offset to [%ld] streamid[%ld] memtype[%ld] size[%zu] realsize[%zu] "
"noalignsize[%zu] life time begin[%s] life time end[%zu] child[%d:%d:%d:%d:%d] isref[%d] batch[%s]",
"noalignsize[%zu] life time begin[%s] life time end[%zu] child[%d:%d:%d:%d:%d] isref[%d] batch[%s] scope[%d]",
graph_name.c_str(), op_desc->GetName().c_str(), node_type.node->GetType().c_str(),
node_type.GetMemType().c_str(), node_type.index, offset, op_desc->GetStreamId(),block->memory_type_,
block->Size(), real_size, no_align_size, node_type.GetLifeBeginDesc().c_str(), end, child_block_level,
block->reuse_mem_, block->continuous_block_, block->is_zero_copy_, block->same_stream_, node_type.ref_input,
block->batch_label_.c_str());
block->batch_label_.c_str(), node_type.thread_scope_id);
}

void SetBlockOpMemOffset(MemoryBlock *block, int32_t child_block_level) {
@@ -2176,11 +2222,11 @@ Status BlockMemAssigner::Assign() {

bool BlockMemAssigner::CheckIsZeroMemNodeType(const string &node_type) const {
return (node_type == VARIABLE) || (node_type == CONSTANT) || (node_type == MULTISHAPE) ||
(node_type == CONSTANTOP) || (node_type == ASSIGNADD) || (node_type == ASSIGNSUB) ||
(node_type == ASSIGN) || (node_type == HVDWAIT);
(node_type == CONSTANTOP) || (node_type == HVDWAIT);
}

bool BlockMemAssigner::GetWorkSpaceMemoryType(const NodePtr &node, size_t index, int64_t &memory_type) {
bool BlockMemAssigner::GetWorkSpaceMemoryType(const NodePtr &node, size_t index, uint64_t &memory_type,
vector<bool> &workspace_reuse_flag) {
memory_type = RT_MEMORY_HBM;
vector<int64_t> workspace_memory_type;
auto op_desc = node->GetOpDesc();
@@ -2196,6 +2242,20 @@ bool BlockMemAssigner::GetWorkSpaceMemoryType(const NodePtr &node, size_t index,
return false;
}
memory_type = has_workspace_mem_type_attr ? workspace_memory_type[index] : RT_MEMORY_HBM;

vector<int32_t> workspace_no_reuse_scope;
bool has_workspace_no_reuse_scope =
ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_WORKSPACE_MEMORY_NO_REUSE_SCOPE, workspace_no_reuse_scope);
if (has_workspace_no_reuse_scope && (index < workspace_no_reuse_scope.size())
&& (workspace_no_reuse_scope[index] == kSessionNoReuse)) {
memory_type |= kSessionScopeMemory;
if (workspace_reuse_flag.empty()) {
workspace_reuse_flag.assign(workspace_no_reuse_scope.size(), true);
}
// set to no reuse
workspace_reuse_flag[index] = false;
GELOGI("%s's workspace is session scope no reuse, memory type:%lu.", node->GetName().c_str(), memory_type);
}
return true;
}
} // namespace ge

+ 23
- 13
ge/graph/build/memory/block_mem_assigner.h View File

@@ -33,14 +33,21 @@

namespace ge {
const size_t kMaxLifeTime = 0xffffffff;
const int32_t kInvalidThreadScopeId = -1;
const uint64_t kSessionScopeMemory = 0x100000000;
const uint64_t kMemoryTypeMask = 0xffffffff;

enum MemoryNoReuseScope { kReuse, kSessionNoReuse, kGraphNoReuse };

using DependStreamLife = std::map<int64_t, std::map<int64_t, size_t>>;

enum OpMemoryType { kOutput, kWorkspace };

struct NodeTypeIndex {
NodeTypeIndex(ge::NodePtr node, OpMemoryType mem_type, uint32_t index, bool ref_input = false, size_t begin = 0)
: node(std::move(node)), mem_type(mem_type), index(index), ref_input(ref_input), life_time_begin(begin) {}
NodeTypeIndex(ge::NodePtr node, OpMemoryType mem_type, uint32_t index, bool ref_input = false, size_t begin = 0,
int32_t thread_scope_id = kInvalidThreadScopeId)
: node(std::move(node)), mem_type(mem_type), index(index), ref_input(ref_input), life_time_begin(begin),
thread_scope_id(thread_scope_id) {}

ge::NodePtr node = nullptr;
OpMemoryType mem_type = kOutput;
@@ -48,6 +55,7 @@ struct NodeTypeIndex {
bool ref_input = false;
size_t life_time_begin = 0;
size_t life_time_end = kMaxLifeTime;
int32_t thread_scope_id = kInvalidThreadScopeId;
const string GetMemType() const {
if (mem_type == kOutput) {
return "output";
@@ -143,6 +151,9 @@ class MemoryBlock {
same_stream_ = false;
}
}
if (node_type_index.thread_scope_id != kInvalidThreadScopeId) {
thread_scope_id_.insert(node_type_index.thread_scope_id);
}
}

void AddSymbol(const std::string &symbol) {
@@ -154,6 +165,7 @@ class MemoryBlock {
const std::vector<size_t> &RealSizeList() const { return real_size_list_; }
const std::vector<MemoryBlock *> &ChildBlockList() const { return child_blocks_; }
const std::vector<size_t> &NoAlignSizeList() const { return no_align_size_list_; }
const std::set<int32_t> &ThreadScopeId() const { return thread_scope_id_; }

void Resize();

@@ -175,6 +187,8 @@ class MemoryBlock {

size_t GetDependLifeBegin(int64_t stream_id, DependStreamLife &node_depend_stream_life);

bool CanReuse(int32_t thread_scope_id) const;

int ref_count_;
int64_t stream_id_;
bool deleted_block_;
@@ -198,6 +212,7 @@ class MemoryBlock {
std::vector<NodeTypeIndex> node_type_index_list_;
std::vector<std::string> symbol_list_;
std::vector<MemoryBlock *> child_blocks_;
std::set<int32_t> thread_scope_id_;
};

class BlockMemAssigner : public MemAssigner {
@@ -213,9 +228,7 @@ class BlockMemAssigner : public MemAssigner {

Status Assign() override;

size_t GetMemOffset() const { return mem_offset_; }

size_t GetP2PMemOffset() const { return p2p_mem_offset_; }
const std::map<uint64_t, size_t> &GetMemOffsets() const { return mem_offsets_; }

int64_t GetAtomicAddrCleanId() const { return atomic_addr_clean_id_; }

@@ -318,14 +331,10 @@ class BlockMemAssigner : public MemAssigner {
///
void UpdateOpTensorMemType(std::list<NodeIndexIO> node_index_io_list, int64_t memory_type);

size_t mem_offset_;
size_t p2p_mem_offset_;

std::map<uint64_t, size_t> mem_offsets_;
ge::ComputeGraphPtr compute_graph_;

std::vector<MemoryBlock *> memory_blocks_;
std::vector<MemoryBlock *> blocks_store_;

std::vector<NodeTypeIndex> zero_memory_list_;

// ref mapping
@@ -369,7 +378,7 @@ class BlockMemAssigner : public MemAssigner {
///
MemoryBlock *ApplyMemory(size_t block_size, size_t real_size, size_t no_align_size, OpMemoryType mem_type,
const ge::NodePtr &n, uint32_t out_index, const std::vector<bool> &workspace_reuse_flag,
const bool is_op_reuse_mem, const bool continuous, int64_t memory_type);
const bool is_op_reuse_mem, const bool continuous, uint64_t memory_type);

///
/// @ingroup GE
@@ -383,7 +392,7 @@ class BlockMemAssigner : public MemAssigner {
/// @author
///
void CheckWorkspaceReuse(const vector<bool> &workspace_reuse_flag, uint32_t index, int64_t stream_id,
MemoryBlock *mem_block, int64_t memory_type);
MemoryBlock *mem_block, uint64_t memory_type);

///
/// @ingroup GE
@@ -446,7 +455,8 @@ class BlockMemAssigner : public MemAssigner {

bool IsContinuousOutput(const NodePtr &n);

bool GetWorkSpaceMemoryType(const NodePtr &node, size_t index, int64_t &memory_type);
bool GetWorkSpaceMemoryType(const NodePtr &node, size_t index, uint64_t &memory_type,
vector<bool> &workspace_reuse_flag);

void ContinuousOutRefCheck(bool &isAllOutputRef, bool &isOutputHasRef, const NodePtr &n);



+ 311
- 10
ge/graph/build/memory/graph_mem_assigner.cc View File

@@ -36,6 +36,9 @@ namespace {
const int kAllInputAddrIsAtomic = -1;
const int kVirtualInputNodeMemoryReuse = 0;
const int kVirtualOutputNodeMemoryReuse = 1;
const int kPrevNextDistanceNum = 2;
const int64_t kInvalidStream = -1;
const char *const kEngineNameGeLocal = "DNN_VM_GE_LOCAL_OP_STORE";
// One state per bit cannot be repeated
enum ContinuousType { kTypeInput = 1, kTypeInputNoPadding = 2, kTypeOutput = 4, kTypeOutputNoPadding = 8 };

@@ -104,11 +107,22 @@ Status GraphMemoryAssigner::AssignMemory() {
compute_graph_->GetGraphID(), compute_graph_->GetName().c_str());
return ge::FAILED;
}
MemoryOffset memory_offset(RT_MEMORY_HBM, mem_assigner->GetMemOffset());
memory_offset_.emplace(RT_MEMORY_HBM, memory_offset);

if (mem_assigner->GetP2PMemOffset() >= 0) {
MemoryOffset p2p_memory_offset(RT_MEMORY_P2P_DDR, mem_assigner->GetP2PMemOffset());
for (auto pair : mem_assigner->GetMemOffsets()) {
MemoryOffset offset(pair.first, pair.second);
memory_offset_.emplace(pair.first, offset);
}

// base memtype offset must be exist
auto it = mem_assigner->GetMemOffsets().find(RT_MEMORY_HBM);
if (it == mem_assigner->GetMemOffsets().end()) {
MemoryOffset memory_offset(RT_MEMORY_HBM, 0);
memory_offset_.emplace(RT_MEMORY_HBM, memory_offset);
}

it = mem_assigner->GetMemOffsets().find(RT_MEMORY_P2P_DDR);
if (it == mem_assigner->GetMemOffsets().end()) {
MemoryOffset p2p_memory_offset(RT_MEMORY_P2P_DDR, 0);
memory_offset_.emplace(RT_MEMORY_P2P_DDR, p2p_memory_offset);
}

@@ -221,7 +235,7 @@ ge::Status CalculateTensorRealSizeAndOutSize(const ge::ConstGeTensorDescPtr &out
return SUCCESS;
}

Status GraphMemoryAssigner::ReAssignMemory(bool is_loop_graph, map<int64_t, size_t> &mem_type_to_offset) {
Status GraphMemoryAssigner::ReAssignMemory(bool is_loop_graph, map<uint64_t, size_t> &mem_type_to_offset) {
if (memory_offset_.empty()) {
REPORT_INNER_ERROR("E19999", "InnerData memory_offset_ empty, not expected, graph_id:%u, graph_name:%s",
compute_graph_->GetGraphID(), compute_graph_->GetName().c_str());
@@ -230,9 +244,12 @@ Status GraphMemoryAssigner::ReAssignMemory(bool is_loop_graph, map<int64_t, size
return ge::FAILED;
}

GE_CHK_STATUS_RET(ReAssignContinuousMemory(is_loop_graph), "ReAssignContinuousMemory Failed!");
GE_CHK_STATUS_RET(ReAssignAtomicMemory(is_loop_graph), "ReAssignAtomicMemory Failed!");
GE_CHK_STATUS_RET(AssignBufferPoolMemory(), "AssignBufferPoolMemory Failed!");
GE_CHK_STATUS_RET(ReAssignContinuousMemory(is_loop_graph),
"[ReAssign][ContinuousMemory] Failed! graph:%s", compute_graph_->GetName().c_str());
GE_CHK_STATUS_RET(ReAssignAtomicMemory(is_loop_graph),
"[ReAssign][AtomicMemory] Failed! graph:%s", compute_graph_->GetName().c_str());
GE_CHK_STATUS_RET(AssignBufferPoolMemory(),
"[Assign][BufferPoolMemory] Failed! graph:%s", compute_graph_->GetName().c_str());

size_t total_mem_offset = 0;
for (auto pair : memory_offset_) {
@@ -258,7 +275,7 @@ Status GraphMemoryAssigner::ReAssignMemory(bool is_loop_graph, map<int64_t, size
return SUCCESS;
}

Status GraphMemoryAssigner::AssignZeroCopyMemory(map<int64_t, size_t> &mem_offset, size_t &zero_mem_copy_size) {
Status GraphMemoryAssigner::AssignZeroCopyMemory(map<uint64_t, size_t> &mem_offset, size_t &zero_mem_copy_size) {
BlockMemAssignerPtr priority_assigner = std::move(mem_assigner_->GetPriorityAssinger());
if (priority_assigner == nullptr) {
REPORT_INNER_ERROR("E19999", "InnerData priority_assigner nullptr, not expected, graph_id:%u, graph_name:%s",
@@ -1006,7 +1023,9 @@ Status GraphMemoryAssigner::AssignReferenceMemory() {
node->GetName().c_str());

auto out_op_desc = node->GetOpDesc();
GE_IF_BOOL_EXEC(out_op_desc == nullptr, GELOGE(ge::FAILED, "out_op_desc is null."); return ge::FAILED);
GE_IF_BOOL_EXEC(out_op_desc == nullptr,
REPORT_INNER_ERROR("E19999", "out_op_desc is null.");
GELOGE(ge::FAILED, "[Check][Param] out_op_desc is null."); return ge::FAILED);
vector<int64_t> output_list = out_op_desc->GetOutputOffset();

if (out_op_desc->GetOutputsSize() > output_list.size()) {
@@ -1231,6 +1250,7 @@ Status GraphMemoryAssigner::AssignOrdinaryAtomicWorkspaceMemory(const ge::OpDesc
batch_label.c_str());

mem_type_iter->second.mem_offset_ += workspace_size;
AlignMemOffset(MEM_ALIGN_SIZE, RT_MEMORY_HBM);
mem_offset_end.emplace_back(mem_type_iter->second.mem_offset_);
}
}
@@ -1273,6 +1293,7 @@ Status GraphMemoryAssigner::AssignFusionAtomicWorkspaceMemory(const ge::OpDescPt
op_desc->GetStreamId(), RT_MEMORY_HBM, workspace_size, workspace_size, batch_label.c_str());

mem_type_iter->second.mem_offset_ += workspace_size;
AlignMemOffset(MEM_ALIGN_SIZE, RT_MEMORY_HBM);
mem_offset_end.emplace_back(mem_type_iter->second.mem_offset_);
index_offset.insert(std::make_pair(workspace_index, workspace_offset));
}
@@ -1388,6 +1409,9 @@ ge::Status GraphMemoryAssigner::SetInputOffset() {
"graph_id:%u, graph_name:%s", compute_graph_->GetGraphID(), compute_graph_->GetName().c_str());
}
for (auto pair : memory_offset_) {
if ((pair.first != RT_MEMORY_HBM) && (pair.second.mem_offset_ == 0)) {
continue;
}
GEEVENT("[IMAS]AfterAssignMemory : %s memoffset[%zu], memtype[%ld]", compute_graph_->GetName().c_str(),
pair.second.mem_offset_, pair.first);
}
@@ -1944,4 +1968,281 @@ Status GraphMemoryAssigner::AssignBufferPoolMemory() {
compute_graph_->GetName().c_str(), mem_type, buffer_pool_mem_assigner.GetMemOffset());
return SUCCESS;
}

// if producer and customers in the same stream, or customers on the same stream when producer not assign a stream,
// then return false.
bool GraphMemoryAssigner::IsOutputVisitedByMultiStream(const NodePtr &peer_out_node, int64_t out_anchor_index) {
GE_IF_BOOL_EXEC(peer_out_node->GetOpDesc() == nullptr, return true);
int64_t unique_stream_id = peer_out_node->GetOpDesc()->GetStreamId();

GE_IF_BOOL_EXEC(peer_out_node->GetOutDataAnchor(out_anchor_index) == nullptr, return true);
for (const auto &in_data_anchor : peer_out_node->GetOutDataAnchor(out_anchor_index)->GetPeerInDataAnchors()) {
auto node = in_data_anchor->GetOwnerNode();
GE_IF_BOOL_EXEC(node == nullptr || node->GetOpDesc() == nullptr, continue);
if (node->GetOpDesc()->GetStreamId() == kInvalidStream) {
continue;
}
if (unique_stream_id == kInvalidStream) { // peer_out_node not belong to any stream
unique_stream_id = node->GetOpDesc()->GetStreamId();
continue;
}
if (node->GetOpDesc()->GetStreamId() != unique_stream_id) {
return true;
}
}
return false;
}

void GraphMemoryAssigner::UpdatePrevNodeInputDesc(const NodePtr &prev_node,
const vector<int64_t> &prev_node_input_index_vec,
int64_t distance) {
GE_IF_BOOL_EXEC(prev_node == nullptr, return);
auto prev_node_op_desc = prev_node->GetOpDesc();
GE_IF_BOOL_EXEC(prev_node_op_desc == nullptr, return);

for (const auto prev_node_input_index : prev_node_input_index_vec) {
auto input_desc = prev_node_op_desc->GetInputDesc(prev_node_input_index);
vector<int64_t> prev_next_distances;
if (!ge::AttrUtils::GetListInt(input_desc, ATTR_NAME_DATA_VISIT_DISTANCE, prev_next_distances)) {
GELOGW("Get [%s] input [%ld] ATTR_NAME_DATA_VISIT_DISTANCE failed",
prev_node_op_desc->GetName().c_str(),
prev_node_input_index);
continue;
}

if (prev_next_distances.size() == kPrevNextDistanceNum) {
prev_next_distances[1] = distance;
} else {
GELOGW("Size of prev_next_distances is not %d.", kPrevNextDistanceNum);
continue;
}
if (!ge::AttrUtils::SetListInt(input_desc, ATTR_NAME_DATA_VISIT_DISTANCE, prev_next_distances)) {
GELOGW("Set [%s] input [%ld] ATTR_NAME_DATA_VISIT_DISTANCE failed.",
prev_node_op_desc->GetName().c_str(),
prev_node_input_index);
continue;
}

if (prev_node_op_desc->UpdateInputDesc(prev_node_input_index, input_desc) != GRAPH_SUCCESS) {
GELOGW("Update [%s] input [%ld] ATTR_NAME_DATA_VISIT_DISTANCE failed.",
prev_node_op_desc->GetName().c_str(),
prev_node_input_index);
continue;
}
GELOGD("Set the next distance[%ld] to node[%s], input index[%ld]",
distance,
prev_node->GetName().c_str(),
prev_node_input_index);
}
return;
}

void GraphMemoryAssigner::UpdateCurNodeInputDesc(const NodePtr &cur_node,
int64_t cur_node_input_index,
int64_t distance) {
GE_IF_BOOL_EXEC(cur_node == nullptr, return);
GE_IF_BOOL_EXEC(cur_node->GetOpDesc() == nullptr, return);
auto input_desc = cur_node->GetOpDesc()->GetInputDesc(cur_node_input_index);
vector<int64_t> prev_next_distances{distance, -1};

if (!ge::AttrUtils::SetListInt(input_desc, ATTR_NAME_DATA_VISIT_DISTANCE, prev_next_distances)) {
GELOGW("Set [%s] input[%ld] ATTR_NAME_DATA_VISIT_DISTANCE failed.",
cur_node->GetOpDesc()->GetName().c_str(),
cur_node_input_index);
return;
}
if (cur_node->GetOpDesc()->UpdateInputDesc(cur_node_input_index, input_desc) != GRAPH_SUCCESS) {
GELOGW("Update [%s] input[%ld] ATTR_NAME_DATA_VISIT_DISTANCE failed.",
cur_node->GetOpDesc()->GetName().c_str(),
cur_node_input_index);
return;
}
GELOGD("Set the prev distance[%ld] to node[%s], input index[%ld]",
distance,
cur_node->GetName().c_str(),
cur_node_input_index);
return;
}

void GraphMemoryAssigner::CheckNeedCalcDistAndUpdateVisitInfo(
const NodePtr &peer_out_node,
const OutDataAnchorPtr &peer_out_anchor,
size_t matched_mem_offset,
map<size_t, pair<NodePtr, vector<int64_t>>> &mem_block_visit_info,
bool &is_need_calc_distance) {
auto iter = mem_block_visit_info.find(matched_mem_offset);
// cannot find visit info, peer_out_node must be a producer and this data is the first time to be visited.
if (iter == mem_block_visit_info.end()) {
if (IsOutputVisitedByMultiStream(peer_out_node, peer_out_anchor->GetIdx())) {
vector<int64_t> temp;
mem_block_visit_info.insert(std::make_pair(matched_mem_offset, std::make_pair(nullptr, temp)));
is_need_calc_distance = false;
return;
} else {
vector<int64_t> temp = {-1};
// producer's prev_node_index set to -1 as default
mem_block_visit_info.insert(std::make_pair(matched_mem_offset, std::make_pair(peer_out_node, temp)));
is_need_calc_distance = true;
return;
}
} else {
if (mem_block_visit_info[matched_mem_offset].first == nullptr) {
// multi-stream visit, no need to calculate
is_need_calc_distance = false;
return;
}
if (peer_out_node->GetOpDesc()->GetStreamId() !=
mem_block_visit_info[matched_mem_offset].first->GetOpDesc()->GetStreamId()) {
// cur node and peer_out_node not in the same stream, no need to calculate
is_need_calc_distance = false;
return;
}
}
is_need_calc_distance = true;
return;
}

// calculate distance, update visit info, update prev_node input desc, update cur node input desc
void GraphMemoryAssigner::CalcDistanceAndUpdateDesc(const map<string, int64_t> &node_index_in_stream,
const InDataAnchorPtr &in_data_anchor,
size_t matched_mem_offset,
NodePtr &node,
map<size_t, pair<NodePtr, vector<int64_t>>> &mem_block_visit_info,
bool &is_need_skip) {
int64_t distance = -1;
auto prev_node = mem_block_visit_info[matched_mem_offset].first;
auto prev_node_input_index_vec = mem_block_visit_info[matched_mem_offset].second;
GE_IF_BOOL_EXEC(prev_node == nullptr, is_need_skip = true; return);
if (prev_node_input_index_vec.size() == 1 && prev_node_input_index_vec[0] == -1) {
// prev_node is producer and the data is just be produced(not visited by other node)
GE_IF_BOOL_EXEC(prev_node->GetOpDesc() == nullptr, is_need_skip = true; return);
if (prev_node->GetOpDesc()->GetStreamId() == -1) { // producer not assigned a stream
distance = 0;
} else {
auto iter = node_index_in_stream.find(prev_node->GetName());
if (iter == node_index_in_stream.end()) {
distance = 0;
} else {
distance = node_index_in_stream.at(node->GetName()) - iter->second - 1;
}
}
mem_block_visit_info[matched_mem_offset].first = node;
mem_block_visit_info[matched_mem_offset].second.clear();
mem_block_visit_info[matched_mem_offset].second.push_back(in_data_anchor->GetIdx());
} else { // the data is visit by other customer just before.
if (prev_node_input_index_vec.empty()) {
GELOGW("Missing prev node[%s] input index.", prev_node->GetName().c_str());
is_need_skip = true;
return;
}
if (prev_node == node) { // scene: multiple anchors of a node access the same data
vector<int64_t> prev_next_distances;
GE_IF_BOOL_EXEC(prev_node->GetOpDesc() == nullptr, is_need_skip = true; return);
auto input_desc = prev_node->GetOpDesc()->GetInputDesc(prev_node_input_index_vec[0]);
if (!ge::AttrUtils::GetListInt(input_desc, ATTR_NAME_DATA_VISIT_DISTANCE, prev_next_distances)) {
GELOGW("Get ATTR_NAME_DATA_VISIT_DISTANCE failed.");
is_need_skip = true;
return;
}
if (prev_next_distances.size() != kPrevNextDistanceNum) {
GELOGW("Size of prev_next_distance is not %d.", kPrevNextDistanceNum);
is_need_skip = true;
return;
} else {
distance = prev_next_distances[0]; // use the same prev_distance as previous anchor
}
mem_block_visit_info[matched_mem_offset].second.push_back(in_data_anchor->GetIdx());
} else {
distance = node_index_in_stream.at(node->GetName()) - node_index_in_stream.at(prev_node->GetName()) - 1;
UpdatePrevNodeInputDesc(prev_node, prev_node_input_index_vec, distance);
mem_block_visit_info[matched_mem_offset].first = node;
mem_block_visit_info[matched_mem_offset].second.clear();
mem_block_visit_info[matched_mem_offset].second.push_back(in_data_anchor->GetIdx());
}
}
UpdateCurNodeInputDesc(node, in_data_anchor->GetIdx(), distance);
}

void GraphMemoryAssigner::DeleteVisitInfoWhenLifecycleEnded(
const NodePtr &node,
const InDataAnchorPtr &in_data_anchor,
size_t matched_mem_offset,
map<size_t, pair<NodePtr, vector<int64_t>>> &mem_block_visit_info) {
GE_IF_BOOL_EXEC(node->GetOpDesc() == nullptr, return);
auto input_desc = node->GetOpDesc()->GetInputDesc(in_data_anchor->GetIdx());
bool is_end_of_inputmem_lifecycle = false;
// if is_end_of_inputmem_lifecycle is true, indicating that cur node is the last customer of this data,
// then we need to delete the visit info of the block in case that the memblock be reused and visited.
if (ge::AttrUtils::GetBool(input_desc, ATTR_NAME_IS_END_OF_INPUTMEM_LIFECYCLE, is_end_of_inputmem_lifecycle) &&
is_end_of_inputmem_lifecycle) {
GELOGD("ATTR_NAME_IS_END_OF_INPUTMEM_LIFECYCLE is true, node name is [%s], in_data_anchor index is [%d]",
node->GetName().c_str(),
in_data_anchor->GetIdx());
auto iter = mem_block_visit_info.find(matched_mem_offset);
if (iter != mem_block_visit_info.end()) {
mem_block_visit_info.erase(iter);
}
}
}


void GraphMemoryAssigner::MarkNodeDistanceAttr(const ComputeGraphPtr &compute_graph,
NodePtr &node,
map<size_t, pair<NodePtr, vector<int64_t>>> &mem_block_visit_info,
const map<string, int64_t> &node_index_in_stream) {
GELOGD("Begin to mark node distance attr, node name is [%s]", node->GetName().c_str());
GE_IF_BOOL_EXEC(node == nullptr, return);
for (const auto &in_data_anchor : node->GetAllInDataAnchors()) {
auto peer_out_anchor = in_data_anchor->GetPeerOutAnchor();
GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue);
auto peer_out_node = peer_out_anchor->GetOwnerNode();
GE_IF_BOOL_EXEC(peer_out_node == nullptr, continue);

GE_IF_BOOL_EXEC(peer_out_node->GetOpDesc() == nullptr, continue);
auto matched_mem_offset = peer_out_node->GetOpDesc()->GetOutputOffset().at(peer_out_anchor->GetIdx());

bool is_need_calc_distance = false;
CheckNeedCalcDistAndUpdateVisitInfo(peer_out_node, peer_out_anchor, matched_mem_offset,
mem_block_visit_info, is_need_calc_distance);
if (!is_need_calc_distance) {
continue;
}

bool is_need_skip = false;
CalcDistanceAndUpdateDesc(node_index_in_stream, in_data_anchor, matched_mem_offset, node,
mem_block_visit_info, is_need_skip);
if (is_need_skip) {
continue;
}

DeleteVisitInfoWhenLifecycleEnded(node, in_data_anchor, matched_mem_offset, mem_block_visit_info);
}
}

void GraphMemoryAssigner::MarkDistanceAttr() {
// key: mem_offset of the memory which we visited. value: node we visited and input index of this node
map<size_t, pair<NodePtr, vector<int64_t>>> mem_block_visit_info;
// key: node name, value: topo order of node in it's belonged stream(exclude ge_local_op)
map<string, int64_t> node_index_in_stream;
// key: stream id, value: cur nodes num in that stream
map<int64_t, int64_t> stream_nodes_num;

for (auto &node : compute_graph_->GetAllNodes()) {
auto node_op_desc = node->GetOpDesc();
GE_IF_BOOL_EXEC(node_op_desc == nullptr, return);
int64_t stream_id = node_op_desc->GetStreamId();
if (node_op_desc->GetOpKernelLibName() != kEngineNameGeLocal) {
if (stream_nodes_num.find(stream_id) == stream_nodes_num.end()) {
stream_nodes_num.insert(std::make_pair(stream_id, 1));
} else {
++stream_nodes_num[stream_id];
}
node_index_in_stream.insert(std::make_pair(node->GetName(), stream_nodes_num[stream_id] - 1));

MarkNodeDistanceAttr(compute_graph_, node, mem_block_visit_info, node_index_in_stream);
} else {
GELOGD("node[%s] is ge_local_op, no need to calculate distance.", node->GetName().c_str());
}
}
}
} // namespace ge

+ 35
- 2
ge/graph/build/memory/graph_mem_assigner.h View File

@@ -103,9 +103,9 @@ class GraphMemoryAssigner {

ge::Status AssignMemory2HasRefAttrNode();

ge::Status ReAssignMemory(bool is_loop_graph, map<int64_t, size_t> &mem_type_to_offset);
ge::Status ReAssignMemory(bool is_loop_graph, map<uint64_t, size_t> &mem_type_to_offset);

ge::Status AssignZeroCopyMemory(map<int64_t, size_t> &mem_offset, size_t &zero_mem_copy_size);
ge::Status AssignZeroCopyMemory(map<uint64_t, size_t> &mem_offset, size_t &zero_mem_copy_size);

ge::Status SetInputOffset();

@@ -118,6 +118,13 @@ class GraphMemoryAssigner {

ge::Status AssignReferenceMemory();

void MarkDistanceAttr();

void MarkNodeDistanceAttr(const ComputeGraphPtr &compute_graph,
NodePtr &node,
map<size_t, pair<NodePtr, vector<int64_t>>> &mem_block_visit_info,
const map<string, int64_t> &node_index_in_stream);

private:
///
/// @ingroup ge_graph
@@ -197,6 +204,32 @@ class GraphMemoryAssigner {

Status UpdateRefOpOffsetReverse(const NodePtr &node);

bool IsOutputVisitedByMultiStream(const NodePtr &peer_out_node, int64_t out_anchor_index);

void UpdatePrevNodeInputDesc(const NodePtr &prev_node,
const vector<int64_t> &prev_node_input_index_vec,
int64_t distance);

void UpdateCurNodeInputDesc(const NodePtr &cur_node, int64_t cur_node_input_index, int64_t distance);

void CheckNeedCalcDistAndUpdateVisitInfo(const NodePtr &peer_out_node,
const OutDataAnchorPtr &peer_out_anchor,
size_t matched_mem_offset,
map<size_t, pair<NodePtr, vector<int64_t>>> &mem_block_visit_info,
bool &is_need_calc_distance);

void CalcDistanceAndUpdateDesc(const map<string, int64_t> &node_index_in_stream,
const InDataAnchorPtr &in_data_anchor,
size_t matched_mem_offset,
NodePtr &node,
map<size_t, pair<NodePtr, vector<int64_t>>> &mem_block_visit_info,
bool &is_need_skip);

void DeleteVisitInfoWhenLifecycleEnded(const NodePtr &node,
const InDataAnchorPtr &in_data_anchor,
size_t matched_mem_offset,
map<size_t, pair<NodePtr, vector<int64_t>>> &mem_block_visit_info);

MemoryOffsetMap memory_offset_;
ge::ComputeGraphPtr compute_graph_;
HybridMemAssignerPtr mem_assigner_;


+ 10
- 8
ge/graph/build/memory/hybrid_mem_assigner.cc View File

@@ -23,27 +23,30 @@

namespace ge {
HybridMemAssigner::HybridMemAssigner(ge::ComputeGraphPtr compute_graph)
: mem_offset_(0), p2p_mem_offset_(0), compute_graph_(std::move(compute_graph)), priority_assigner_(nullptr) {}
: compute_graph_(std::move(compute_graph)), priority_assigner_(nullptr) {}

Status HybridMemAssigner::AssignMemory(std::unique_ptr<BlockMemAssigner> &block_assigner, size_t &mem_size) {
vector<int64_t> ranges;
GE_CHECK_NOTNULL(block_assigner);
if (block_assigner->GetMemoryRanges(ranges) != SUCCESS) {
GELOGE(FAILED, "GetMemoryRanges Fail!");
GELOGE(FAILED, "[Get][MemoryRanges] Fail!");
return FAILED;
}
GE_IF_BOOL_EXEC(ranges.empty(), return SUCCESS);

block_assigner->AssignMemoryWithReuse(ranges);

mem_size = block_assigner->GetMemOffset();
// total size
for (auto it : block_assigner->GetMemOffsets()) {
mem_size += it.second;
}
return SUCCESS;
}

Status HybridMemAssigner::Assign() {
if (GraphUtils::GetRefMapping(compute_graph_, symbol_to_anchors_, anchor_to_symbol_) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get ref-mapping for graph %s failed", compute_graph_->GetName().c_str());
GELOGE(FAILED, "Get ref-mapping for graph %s failed.", compute_graph_->GetName().c_str());
GELOGE(FAILED, "[Get][RefMapping] for graph %s failed.", compute_graph_->GetName().c_str());
return FAILED;
}

@@ -58,8 +61,8 @@ Status HybridMemAssigner::Assign() {
size_t bin_mem_size = 0;
size_t max_mem_size = 0;

GE_CHK_STATUS_RET(AssignMemory(binary_assigner, bin_mem_size), "BinaryBlock Method AssignMemory Fail!");
GE_CHK_STATUS_RET(AssignMemory(max_assigner, max_mem_size), "MaxBlock Method AssignMemory Fail!");
GE_CHK_STATUS_RET(AssignMemory(binary_assigner, bin_mem_size), "[Assign][Memory] Fail!");
GE_CHK_STATUS_RET(AssignMemory(max_assigner, max_mem_size), "[Assign][Memory] Fail!");

std::unique_ptr<BlockMemAssigner> priority_assigner;

@@ -73,8 +76,7 @@ Status HybridMemAssigner::Assign() {
}

priority_assigner->SetOpMemOffset(false);
mem_offset_ = priority_assigner->GetMemOffset();
p2p_mem_offset_ = priority_assigner->GetP2PMemOffset();
mem_offsets_ = priority_assigner->GetMemOffsets();
priority_assigner_ = std::move(priority_assigner);

return SUCCESS;


+ 2
- 4
ge/graph/build/memory/hybrid_mem_assigner.h View File

@@ -42,16 +42,14 @@ class HybridMemAssigner : public MemAssigner {

Status Assign() override;

size_t GetMemOffset() const { return mem_offset_; }
size_t GetP2PMemOffset() const { return p2p_mem_offset_; }
const std::map<uint64_t, size_t> &GetMemOffsets() const { return mem_offsets_; }

BlockMemAssignerPtr GetPriorityAssinger() const { return priority_assigner_; }

private:
Status AssignMemory(std::unique_ptr<BlockMemAssigner> &block_assigner, size_t &mem_size);

size_t mem_offset_;
size_t p2p_mem_offset_;
std::map<uint64_t, size_t> mem_offsets_;

ge::ComputeGraphPtr compute_graph_;



+ 12
- 9
ge/graph/build/memory/memory_assigner.cc View File

@@ -20,51 +20,54 @@
#include "graph/build/memory/graph_mem_assigner.h"

namespace ge {
Status MemoryAssigner::AssignMemory(bool is_loop_graph, map<int64_t, size_t> &mem_offset, size_t &zero_copy_mem_size) {
Status MemoryAssigner::AssignMemory(bool is_loop_graph, map<uint64_t, size_t> &mem_offset, size_t &zero_copy_mem_size) {
GraphMemoryAssigner graph_mem_assigner(compute_graph_);

if (graph_mem_assigner.AssignMemory() != ge::SUCCESS) {
GELOGE(ge::FAILED, "Memory assigner failed");
GELOGE(ge::FAILED, "[Assign][Memory] failed, graph:%s", compute_graph_->GetName().c_str());
return ge::FAILED;
}

// Reassign memory for special nodes
if (graph_mem_assigner.ReAssignMemory(is_loop_graph, mem_offset) != ge::SUCCESS) {
GELOGE(ge::FAILED, "Memory assigner failed");
GELOGE(ge::FAILED, "[ReAssign][Memory] failed, graph:%s", compute_graph_->GetName().c_str());
return ge::FAILED;
}

// Assign memory (block and offset) for zero copy nodes
if (graph_mem_assigner.AssignZeroCopyMemory(mem_offset, zero_copy_mem_size) != ge::SUCCESS) {
GELOGE(ge::FAILED, "Zero copy memory assigner failed");
GELOGE(ge::FAILED, "[Assign][ZeroCopyMemory] failed, graph:%s", compute_graph_->GetName().c_str());
return ge::FAILED;
}

if (graph_mem_assigner.AssignMemory2HasRefAttrNode() != ge::SUCCESS) {
GELOGE(ge::FAILED, "Assign memory to node which has ref attr failed!");
GELOGE(ge::FAILED, "[Assign][Memory] to node which has ref attr failed! graph:%s",
compute_graph_->GetName().c_str());
return ge::FAILED;
}

// Assign memory for reference
if (graph_mem_assigner.AssignReferenceMemory() != ge::SUCCESS) {
GELOGE(ge::FAILED, "Assign reference memory failed!");
GELOGE(ge::FAILED, "[Assign][ReferenceMemory] failed! graph:%s", compute_graph_->GetName().c_str());
return ge::FAILED;
}

// Must do variable attr assign after all the memory assigned
if (graph_mem_assigner.AssignVarAttr2Nodes() != SUCCESS) {
GELOGE(FAILED, "Variable Memory assigner failed");
GELOGE(FAILED, "[Variable][Memory] assigner failed, graph:%s", compute_graph_->GetName().c_str());
return FAILED;
}
if (graph_mem_assigner.SetInputOffset() != ge::SUCCESS) {
GELOGE(ge::FAILED, "SetInputOffset Fail!");
GELOGE(ge::FAILED, "[Set][InputOffset] Fail! graph:%s", compute_graph_->GetName().c_str());
return ge::FAILED;
}

if (graph_mem_assigner.CheckOffset() != SUCCESS) {
GELOGE(FAILED, "CheckOffset Fail!");
GELOGE(FAILED, "[Check][Offset] Fail! graph:%s", compute_graph_->GetName().c_str());
return FAILED;
}

graph_mem_assigner.MarkDistanceAttr();
return SUCCESS;
}
} // namespace ge

+ 21
- 19
ge/graph/build/memory/var_mem_assign_util.cc View File

@@ -53,9 +53,8 @@ Status VarMemAssignUtil::AssignStaticMemory2Node(ge::ComputeGraphPtr &compute_gr
GE_IF_BOOL_EXEC(ge::AttrUtils::GetStr(n->GetOpDesc(), REF_VAR_SRC_VAR_NAME, ref_var_src_var_name), continue);
string node_name = n->GetName();
GE_IF_BOOL_EXEC(n->GetOpDesc()->GetAllOutputsDesc().empty(),
REPORT_INNER_ERROR("E19999", "check node:%s has no OutputDesc",
n->GetName().c_str());
GELOGE(FAILED, "node:%s has no OutputDesc.", n->GetName().c_str());
REPORT_INNER_ERROR("E19999", "check node:%s has no OutputDesc", n->GetName().c_str());
GELOGE(FAILED, "[Check][Param] node:%s has no OutputDesc.", n->GetName().c_str());
return FAILED);
ge::ConstGeTensorDescPtr tensor_desc = n->GetOpDesc()->GetOutputDescPtr(0);
GE_CHECK_NOTNULL(tensor_desc);
@@ -118,9 +117,8 @@ Status VarMemAssignUtil::SetOutVariableAttr(const ge::NodePtr &node, const ge::N
GE_CHECK_NOTNULL(node->GetOpDesc());
output_list = node->GetOpDesc()->GetOutputOffset();
if (output_list.empty()) {
REPORT_INNER_ERROR("E19999", "check node:%s output_offset_list is empty",
node->GetName().c_str());
GELOGE(PARAM_INVALID, "Output_list is empty");
REPORT_INNER_ERROR("E19999", "check node:%s output_offset_list is empty", node->GetName().c_str());
GELOGE(PARAM_INVALID, "[Check][Param] node:%s Output_list is empty", node->GetName().c_str());
return PARAM_INVALID;
}
GE_CHECK_NOTNULL(var_node->GetOpDesc());
@@ -133,7 +131,8 @@ Status VarMemAssignUtil::SetOutVariableAttr(const ge::NodePtr &node, const ge::N
if (index >= out_list_size) {
REPORT_INNER_ERROR("E19999", "param index:%d >= output_list.size() %d in node %s, check invalid",
index, out_list_size, node->GetName().c_str());
GELOGE(FAILED, "index %d >= output_list.size() %d", index, out_list_size);
GELOGE(FAILED, "[Check][Param] index %d >= output_list.size() %d in node %s", index, out_list_size,
node->GetName().c_str());
return FAILED;
}

@@ -169,7 +168,8 @@ Status VarMemAssignUtil::DealBroadCastNode(uint32_t graph_id, const ge::NodePtr
broad_cast_info.broadcast_name = node->GetName();

auto op_desc = node->GetOpDesc();
GE_CHK_BOOL_RET_STATUS(op_desc != nullptr, FAILED, "Get broadcast op %s desc is nullptr", node->GetName().c_str());
GE_CHK_BOOL_RET_STATUS(op_desc != nullptr, FAILED,
"[Check][Param] Get broadcast op %s desc is nullptr", node->GetName().c_str());

GE_IF_BOOL_EXEC(broad_cast_info.idx < 0,
GELOGI("Broadcast input index must be positive, actual %d", broad_cast_info.idx);
@@ -180,8 +180,8 @@ Status VarMemAssignUtil::DealBroadCastNode(uint32_t graph_id, const ge::NodePtr
if (input_tensor_desc_ptr_vistor.size() <= broad_cast_index) {
REPORT_INNER_ERROR("E19999", "Get broadcast op %s input tensor desc size [%zu] < idx [%d]",
node->GetName().c_str(), input_tensor_desc_ptr_vistor.size(), broad_cast_info.idx);
GELOGE(FAILED, "Get broadcast op %s input tensor desc size [%zu] < idx [%d]", node->GetName().c_str(),
input_tensor_desc_ptr_vistor.size(), broad_cast_info.idx);
GELOGE(FAILED, "[Check][Param] Get broadcast op %s input tensor desc size [%zu] < idx [%d]",
node->GetName().c_str(), input_tensor_desc_ptr_vistor.size(), broad_cast_info.idx);
return FAILED;
}
const ge::GeTensorDescPtr input_tensor_desc =
@@ -192,8 +192,8 @@ Status VarMemAssignUtil::DealBroadCastNode(uint32_t graph_id, const ge::NodePtr

vector<int64_t> output_list = op_desc->GetOutputOffset();
GE_CHK_BOOL_RET_STATUS(output_list.size() > broad_cast_index, FAILED,
"Get broadcast op %s output_list size [%zu] < idx [%d]", node->GetName().c_str(),
output_list.size(), broad_cast_info.idx);
"[Check][Param] Get broadcast op %s output_list size [%zu] < idx [%d]",
node->GetName().c_str(), output_list.size(), broad_cast_info.idx);
broad_cast_info.input_offset = output_list[broad_cast_info.idx];
broad_cast_info.output_offset = output_list[broad_cast_info.idx];

@@ -201,16 +201,16 @@ Status VarMemAssignUtil::DealBroadCastNode(uint32_t graph_id, const ge::NodePtr

auto output_tensor_desc_ptr_vistor = op_desc->GetAllOutputsDescPtr();
GE_CHK_BOOL_RET_STATUS(output_tensor_desc_ptr_vistor.size() > broad_cast_index, FAILED,
"Get broadcast op %s output tensor desc size [%zu] < idx [%d]", node->GetName().c_str(),
output_tensor_desc_ptr_vistor.size(), broad_cast_info.idx);
"[Check][Param] Get broadcast op %s output tensor desc size [%zu] < idx [%d]",
node->GetName().c_str(), output_tensor_desc_ptr_vistor.size(), broad_cast_info.idx);
const ge::GeTensorDescPtr output_tensor_desc =
output_tensor_desc_ptr_vistor.at(static_cast<size_t>(broad_cast_info.idx));
int64_t output_size = 0;
GE_CHK_STATUS(TensorUtils::GetSize(*output_tensor_desc, output_size), "get input size failed.");
GE_CHK_STATUS(TensorUtils::GetSize(*output_tensor_desc, output_size), "[Check][Param] get output size failed.");
broad_cast_info.output_size = output_size;
GE_CHK_BOOL_RET_STATUS(broad_cast_info.output_size == broad_cast_info.input_size, FAILED,
"Broadcast op input size[%lu] is not equal output size[%lu]", broad_cast_info.input_size,
broad_cast_info.output_size);
"[Check][Param] Broadcast op input size[%lu] is not equal output size[%lu]",
broad_cast_info.input_size, broad_cast_info.output_size);

GE_CHK_STATUS_RET(VarManager::Instance(session_id)->SaveBroadCastInfo(graph_id, broad_cast_info));
return SUCCESS;
@@ -298,7 +298,9 @@ Status VarMemAssignUtil::SetOutTransNodeToAssign(const ge::NodePtr &node, const
vector<int64_t> output_list = node->GetOpDesc()->GetOutputOffset();
auto out_list_size = output_list.size();
GE_CHECK_SIZE(out_list_size);
GE_CHK_BOOL_RET_STATUS(index < out_list_size, FAILED, "index %zu >= output_list.size() %zu", index, out_list_size);
GE_CHK_BOOL_RET_STATUS(index < out_list_size, FAILED,
"[Check][Param] index %zu >= output_list.size() %zu, node:%s",
index, out_list_size, node->GetName().c_str());

// final_trans_node outputOffset[0] to assign_node outputOffset[0]
GELOGI("final_trans_node outputOffset[0] is: %ld", final_trans_output_list[0]);
@@ -372,7 +374,7 @@ Status VarMemAssignUtil::AssignData2VarRef(const ge::NodePtr &has_ref_attr_node,
GE_CHECK_SIZE(ref_attr_node_output_list.size());

GE_CHK_BOOL_RET_STATUS(out_index < ref_attr_node_output_list.size(), FAILED,
"out_index %u >= ref_attr_node_output_list.size() %zu", out_index,
"[Check][Param] out_index %u >= ref_attr_node_output_list.size() %zu", out_index,
ref_attr_node_output_list.size());

ref_attr_node_output_list[out_index] = static_cast<int64_t>(reinterpret_cast<uintptr_t>(dev_ptr));


+ 107
- 81
ge/graph/build/model_builder.cc View File

@@ -47,6 +47,7 @@
#include "omg/version.h"
#include "register/op_registry.h"
#include "graph/passes/set_input_output_offset_pass.h"
#include "graph/build/memory/block_mem_assigner.h"

using std::map;
using std::set;
@@ -118,14 +119,16 @@ Status ModelBuilder::CalcOutputSize(const ge::NodePtr &n) {
if (graph_status != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get tensor size in bytes failed for op:%s(%s) index:%u",
node_op_desc->GetName().c_str(), node_op_desc->GetType().c_str(), index);
GELOGE(graph_status, "GetTensorMemorySizeInBytes failed!");
GELOGE(graph_status, "[Get][TensorMemorySize] In Bytes failed for op:%s(%s) index:%u",
node_op_desc->GetName().c_str(), node_op_desc->GetType().c_str(), index);
return FAILED;
}
TensorUtils::SetSize(desc_temp, size_temp);
if (node_op_desc->UpdateOutputDesc(index, desc_temp) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update Output desc size failed for op:%s(%s) index:%u",
node_op_desc->GetName().c_str(), node_op_desc->GetType().c_str(), index);
GELOGE(FAILED, "UpdateOutputDesc failed.");
GELOGE(FAILED, "[Update][OutputDesc] failed for op:%s(%s) index:%u",
node_op_desc->GetName().c_str(), node_op_desc->GetType().c_str(), index);
return FAILED;
}

@@ -212,14 +215,14 @@ Status ModelBuilder::AdjustConstWeightSize(const ge::NodePtr &node, size_t &mem_
if (weights.empty()) {
REPORT_INNER_ERROR("E19999", "Check weights size of node %s(%s) is empty",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "weights size of node %s is empty", node->GetName().c_str());
GELOGE(FAILED, "[Check][Param] weights size of node %s is empty", node->GetName().c_str());
return FAILED;
}
GeTensorPtr weight = weights[0];
if (weight == nullptr) {
REPORT_INNER_ERROR("E19999", "Check weight of node %s(%s) is nullptr",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "weights[0] is null.");
GELOGE(FAILED, "[Check][Param] weights[0] is null, node:%s.", node->GetName().c_str());
return FAILED;
}
GeTensorDesc &tensor_desc = weight->MutableTensorDesc();
@@ -271,15 +274,16 @@ Status ModelBuilder::SetInputOutputDesc() {
bool is_unknow = false;
(void)NodeUtils::GetNodeUnknownShapeStatus(*n, is_unknow);
if ((IsGeLocalOp(n->GetOpDesc())) && (!is_unknow)) {
GE_CHK_STATUS_RET(CalcOutputSize(n), "Calculate output size failed");
GE_CHK_STATUS_RET(CalcOutputSize(n), "[Calc][OutputSize] failed, node:%s", n->GetName().c_str());
}
ret = AdjustConstWeightSize(n, weight_offset_);
GE_CHK_STATUS_RET(ret, "AdjustConstWeightSize failed");
GE_CHK_STATUS_RET(ret, "[Adjust][ConstWeightSize] failed, node:%s", n->GetName().c_str());

GE_IF_BOOL_EXEC(((weight_offset_ > 0) && (weight_offset_ % MEM_ALIGN_SIZE != 0)),
weight_offset_ = (weight_offset_ + MEM_ALIGN_SIZE - 1) / MEM_ALIGN_SIZE * MEM_ALIGN_SIZE);
}
GE_CHK_STATUS_RET(compute_graph_->TopologicalSorting(), "TopologicalSorting failed");
GE_CHK_STATUS_RET(compute_graph_->TopologicalSorting(), "[Call][TopologicalSorting] failed, graph:%s",
compute_graph_->GetName().c_str());
return SUCCESS;
}

@@ -363,7 +367,8 @@ Status ModelBuilder::AdjustInputTensorFlag() {
REPORT_CALL_ERROR("E19999", "Update Input desc size failed for op:%s(%s) index:%u",
owner_node_op_desc->GetName().c_str(), owner_node_op_desc->GetType().c_str(),
in_anchors->GetIdx());
GELOGE(FAILED, "UpdateOutputDesc failed.");
GELOGE(FAILED, "[Update][InputDesc] failed for op:%s(%s) index:%u",
owner_node_op_desc->GetName().c_str(), owner_node_op_desc->GetType().c_str(), in_anchors->GetIdx());
return FAILED;
}
}
@@ -391,61 +396,64 @@ Status ModelBuilder::BuildModelDef(ge::Model &model) {

max_mem_offset_ = mem_type_to_mem_offset_[RT_MEMORY_HBM];
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_MEMORY_SIZE, max_mem_offset_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_MODEL_MEMORY_SIZE.c_str());
GELOGE(FAILED, "SetInt of ATTR_MODEL_MEMORY_SIZE failed.");
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed", ATTR_MODEL_MEMORY_SIZE.c_str());
GELOGE(FAILED, "[Set][Attr] %s in model failed", ATTR_MODEL_MEMORY_SIZE.c_str());
return FAILED);
auto mem_type_session_scope = (kSessionScopeMemory | RT_MEMORY_HBM);
size_t session_scope_mem_offset = 0;
auto it = mem_type_to_mem_offset_.find(mem_type_session_scope);
if (it != mem_type_to_mem_offset_.end()) {
session_scope_mem_offset = it->second;
}
if (mem_type_to_mem_offset_.find(RT_MEMORY_P2P_DDR) != mem_type_to_mem_offset_.end()) {
p2p_mem_offset_ = mem_type_to_mem_offset_[RT_MEMORY_P2P_DDR];
}
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_P2P_MEMORY_SIZE, p2p_mem_offset_),
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_SESSION_SCOPE_MEMORY_SIZE, session_scope_mem_offset),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_MODEL_P2P_MEMORY_SIZE.c_str());
GELOGE(FAILED, "SetInt of ATTR_MODEL_P2P_MEMORY_SIZE failed.");
return FAILED);
ATTR_MODEL_SESSION_SCOPE_MEMORY_SIZE.c_str());
GELOGE(FAILED, "SetInt of ATTR_NAME_SESSION_SCOPE_MEMORY_SIZE failed.");
return FAILED);

GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_P2P_MEMORY_SIZE, p2p_mem_offset_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed", ATTR_MODEL_P2P_MEMORY_SIZE.c_str());
GELOGE(FAILED, "[Set][Attr] %s in model failed", ATTR_MODEL_P2P_MEMORY_SIZE.c_str());
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_WEIGHT_SIZE, weight_offset_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_MODEL_WEIGHT_SIZE.c_str());
GELOGE(FAILED, "SetInt of ATTR_MODEL_WEIGHT_SIZE failed.");
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed", ATTR_MODEL_WEIGHT_SIZE.c_str());
GELOGE(FAILED, "[Set][Attr] %s in model failed", ATTR_MODEL_WEIGHT_SIZE.c_str());
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_STREAM_NUM, stream_num_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_MODEL_STREAM_NUM.c_str());
GELOGE(FAILED, "SetInt of ATTR_MODEL_STREAM_NUM failed.");
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed", ATTR_MODEL_STREAM_NUM.c_str());
GELOGE(FAILED, "[Set][Attr] %s in model failed", ATTR_MODEL_STREAM_NUM.c_str());
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_EVENT_NUM, event_num_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_MODEL_EVENT_NUM.c_str());
GELOGE(FAILED, "SetInt of ATTR_MODEL_EVENT_NUM failed.");
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed", ATTR_MODEL_EVENT_NUM.c_str());
GELOGE(FAILED, "[Set][Attr] %s in model failed", ATTR_MODEL_EVENT_NUM.c_str());
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListInt(&model, ATTR_MODEL_HUGE_STREAM_LIST, huge_streams_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_MODEL_HUGE_STREAM_LIST.c_str());
GELOGE(FAILED, "SetInt of ATTR_MODEL_HUGE_STREAM_LIST failed.");
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed", ATTR_MODEL_HUGE_STREAM_LIST.c_str());
GELOGE(FAILED, "[Set][Attr] %s in model failed", ATTR_MODEL_HUGE_STREAM_LIST.c_str());
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_LABEL_NUM, label_num_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_MODEL_LABEL_NUM.c_str());
GELOGE(FAILED, "SetInt of ATTR_MODEL_LABEL_NUM failed.");
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed", ATTR_MODEL_LABEL_NUM.c_str());
GELOGE(FAILED, "[Set][Attr] %s in model failed", ATTR_MODEL_LABEL_NUM.c_str());
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_ZERO_COPY_MEMORY_SIZE, zero_copy_mem_size_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_MODEL_ZERO_COPY_MEMORY_SIZE.c_str());
GELOGE(FAILED, "SetInt of ATTR_MODEL_ZERO_COPY_MEMORY_SIZE failed.");
GELOGE(FAILED, "[Set][Attr] %s in model failed.", ATTR_MODEL_ZERO_COPY_MEMORY_SIZE.c_str());
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(&model, ATTR_MODEL_OUT_NODES_NAME, GetLocalOmgContext().net_out_nodes),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_MODEL_OUT_NODES_NAME.c_str());
GELOGE(FAILED, "SetListStr of ATTR_MODEL_OUT_NODES_NAME failed.");
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed", ATTR_MODEL_OUT_NODES_NAME.c_str());
GELOGE(FAILED, "[Set][Str] %s in model failed.", ATTR_MODEL_OUT_NODES_NAME.c_str());
return FAILED);
GELOGI("For model, max_mem_offset_: %zu, p2p_mem_size: %zu, zero_copy_mem_size_: %zu", max_mem_offset_,
p2p_mem_offset_, zero_copy_mem_size_);
GELOGI("For model, max_mem_offset: %zu, p2p_mem_size: %zu, zero_copy_mem_size: %zu, session_scope_mem_size: %zu",
max_mem_offset_, p2p_mem_offset_, zero_copy_mem_size_, session_scope_mem_offset);
string fp_ceiling_mode;
if (ge::GetContext().GetOption("ge.fpCeilingMode", fp_ceiling_mode) == SUCCESS) {
if (!ge::AttrUtils::SetStr(&model, ATTR_FP_CEILING_MODE, fp_ceiling_mode)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_FP_CEILING_MODE.c_str());
GELOGE(FAILED, "Failed to set attr ATTR_FP_CEILING_MODE");
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed", ATTR_FP_CEILING_MODE.c_str());
GELOGE(FAILED, "[Set][Str] %s in model failed", ATTR_FP_CEILING_MODE.c_str());
return FAILED;
}
GELOGI("Set attr ATTR_FP_CEILING_MODE to model, value is %s.", fp_ceiling_mode.c_str());
@@ -459,31 +467,27 @@ Status ModelBuilder::BuildModelDef(ge::Model &model) {
int64_t core_type = (ge_core_type == kVectorCore) ? 1 : 0;
GELOGI("core_type: %ld", core_type);
if (!ge::AttrUtils::SetInt(&model, ATTR_MODEL_CORE_TYPE, core_type)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_MODEL_CORE_TYPE.c_str());
GELOGE(FAILED, "SetInt of ATTR_CORE_TYPE failed.");
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed", ATTR_MODEL_CORE_TYPE.c_str());
GELOGE(FAILED, "[Set][Attr] %s in model failed", ATTR_MODEL_CORE_TYPE.c_str());
}
InitL1FusionOption();
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(&model, ATTR_NAME_SWITCH_FOR_L1_FUSION, is_l1_fusion_enable_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_NAME_SWITCH_FOR_L1_FUSION.c_str());
GELOGE(FAILED, "SetBool of ATTR_NAME_SWITCH_FOR_L1_FUSION failed.");
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed", ATTR_NAME_SWITCH_FOR_L1_FUSION.c_str());
GELOGE(FAILED, "[Set][Attr] %s in model failed.", ATTR_NAME_SWITCH_FOR_L1_FUSION.c_str());
return FAILED);
const DumpProperties &dump_properties = DumpManager::GetInstance().GetDumpProperties(session_id_);
bool is_op_debug = dump_properties.IsOpDebugOpen();
if (is_op_debug) {
if (!ge::AttrUtils::SetBool(&model, ATTR_OP_DEBUG_FLAG, is_op_debug)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_OP_DEBUG_FLAG.c_str());
GELOGE(FAILED, "SetBool of ATTR_OP_DEBUG_FLAG failed.");
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed", ATTR_OP_DEBUG_FLAG.c_str());
GELOGE(FAILED, "[Set][Attr] %s in model failed", ATTR_OP_DEBUG_FLAG.c_str());
return FAILED;
}
uint32_t op_debug_mode = dump_properties.GetOpDebugMode();
GELOGI("Get op debug mode:%d", op_debug_mode);
if (!ge::AttrUtils::SetInt(&model, ATTR_OP_DEBUG_MODE, op_debug_mode)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed",
ATTR_OP_DEBUG_MODE.c_str());
GELOGE(FAILED, "SetBool of ATTR_OP_DEBUG_MODE failed.");
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed", ATTR_OP_DEBUG_MODE.c_str());
GELOGE(FAILED, "[Set][Attr] %s in model failed", ATTR_OP_DEBUG_MODE.c_str());
return FAILED;
}
}
@@ -556,7 +560,7 @@ Status ModelBuilder::MergeWeights() {
if (weight == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get const weight in op:%s(%s)",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Can't get const op weight, name: %s", node->GetName().c_str());
GELOGE(FAILED, "[Call][MutableTensor] Can't get const op weight, name:%s", node->GetName().c_str());
return FAILED;
}

@@ -581,14 +585,15 @@ Status ModelBuilder::MergeWeights() {
GE_IF_BOOL_EXEC(base_addr == nullptr,
REPORT_INNER_ERROR("E19999", "Check weight in op:%s(%s) is nullptr",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Base addr is nullptr.");
GELOGE(FAILED, "[Check][Param] weight in op:%s(%s) is nullptr",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED);
if (weight_offset_ - offset < weight_data.size()) {
REPORT_INNER_ERROR("E19999", "left weight size not enough for op:%s(%s) left_size:%zu, weight_size:%zu",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
weight_offset_ - offset, weight_data.size());
GELOGE(FAILED, "left weight size not enough. left_size:%lu, weight_size:%lu",
weight_offset_ - offset, weight_data.size());
GELOGE(FAILED, "[Check][Param] left weight size not enough for op:%s(%s). left_size:%lu, weight_size:%lu",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), weight_offset_ - offset, weight_data.size());
return FAILED;
}
uintptr_t dst_ptr = reinterpret_cast<uintptr_t>(base_addr) + offset;
@@ -615,7 +620,7 @@ Status ModelBuilder::MergeWeights() {
REPORT_CALL_ERROR("E19999", "mem copy failed. errret:%u, "
"dst_ptr:%lx, dst_size:%lu, src_ptr:%lx, src_size:%lu,",
err, dst_ptr, SECUREC_MEM_MAX_LEN, src_ptr, SECUREC_MEM_MAX_LEN);
GELOGE(FAILED, "mem copy failed. errret:%u, "
GELOGE(FAILED, "[Update][Data] mem copy failed. errret:%u, "
"dst_ptr:%lx, dst_size:%lu, src_ptr:%lx, src_size:%lu",
err, dst_ptr, SECUREC_MEM_MAX_LEN, src_ptr, SECUREC_MEM_MAX_LEN);
return FAILED;
@@ -647,6 +652,13 @@ Status ModelBuilder::SaveAtomicTBEKernel(const OpDescPtr &op_desc) {
std::vector<char> data(kernel_buffer.GetData(), kernel_buffer.GetData() + kernel_buffer.GetSize());
tbe_kernel = MakeShared<OpKernelBin>(kernel_name, std::move(data));
GE_CHECK_NOTNULL(tbe_kernel);
GELOGI("Node [%s][%s] start recovery extra attr %s from %s", atomic_op_desc->GetName().c_str(),
atomic_op_desc->GetType().c_str(), ge::OP_EXTATTR_NAME_TBE_KERNEL, ATTR_NAME_TBE_KERNEL_NAME.c_str());
if (!(atomic_op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel))) {
std::string error = "Node" + FmtToStr(atomic_op_desc->GetName()) + "set extra tbeKernel attr failed";
GE_ERRORLOG_AND_ERRORMSG(ge::FAILED, error.c_str());
return ge::FAILED;
}
}
}
if (tbe_kernel == nullptr) {
@@ -695,13 +707,22 @@ Status ModelBuilder::SaveDataToModel(ge::Model &model, ge::GeModel &ge_model) {
GE_CHECK_NOTNULL(kernel_buffer.GetData());
std::vector<char> data(kernel_buffer.GetData(), kernel_buffer.GetData() + kernel_buffer.GetSize());
tbe_kernel = std::make_shared<OpKernelBin>(kernel_name, std::move(data));
GE_CHECK_NOTNULL(tbe_kernel);
GELOGI("Node [%s][%s] start recovery extra attr %s from %s", node_op_desc->GetName().c_str(),
node_op_desc->GetType().c_str(), ge::OP_EXTATTR_NAME_TBE_KERNEL, ATTR_NAME_TBE_KERNEL_NAME.c_str());
if (!(node_op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel))) {
std::string error = "Node" + FmtToStr(node_op_desc->GetName()) + "set extra tbeKernel attr failed";
GE_ERRORLOG_AND_ERRORMSG(ge::FAILED, error.c_str());
return ge::FAILED;
}
}
}
GE_IF_BOOL_EXEC(tbe_kernel == nullptr, continue);
if (tbe_name_set.count(tbe_kernel->GetName()) > 0) {
REPORT_INNER_ERROR("E19999", "tbe_kernel name %s can't be the same, judge for op:%s(%s),",
REPORT_INNER_ERROR("E19999", "tbe_kernel name %s can't be the same, judge for op:%s(%s)",
tbe_kernel->GetName().c_str(), n->GetName().c_str(), n->GetType().c_str());
GELOGE(FAILED, "tbe_kernel name %s can't be the same", tbe_kernel->GetName().c_str());
GELOGE(FAILED, "[Check][Param] tbe_kernel name %s can't be the same, judge for op:%s(%s)",
tbe_kernel->GetName().c_str(), n->GetName().c_str(), n->GetType().c_str());
return FAILED;
}
tbe_name_set.insert(tbe_kernel->GetName());
@@ -719,9 +740,10 @@ Status ModelBuilder::SaveDataToModel(ge::Model &model, ge::GeModel &ge_model) {
node_op_desc->TryGetExtAttr(ge::OP_EXTATTR_CUSTAICPU_KERNEL, CustAICPUKernelPtr());
GE_IF_BOOL_EXEC(cust_aicpu_kernel == nullptr, continue);
if (aicpu_name_set.count(cust_aicpu_kernel->GetName()) > 0) {
REPORT_INNER_ERROR("E19999", "aicpu_kernel name %s can't be the same, judge for op:%s(%s),",
REPORT_INNER_ERROR("E19999", "aicpu_kernel name %s can't be the same, judge for op:%s(%s)",
cust_aicpu_kernel->GetName().c_str(), n->GetName().c_str(), n->GetType().c_str());
GELOGE(FAILED, "aicpu_kernel name %s can't be the same", cust_aicpu_kernel->GetName().c_str());
GELOGE(FAILED, "[Check][Param] aicpu_kernel name %s can't be the same, judge for op:%s(%s)",
cust_aicpu_kernel->GetName().c_str(), n->GetName().c_str(), n->GetType().c_str());
return FAILED;
}
aicpu_name_set.insert(cust_aicpu_kernel->GetName());
@@ -730,11 +752,11 @@ Status ModelBuilder::SaveDataToModel(ge::Model &model, ge::GeModel &ge_model) {
}

if (!tbe_kernel_store_.Build()) {
GELOGE(FAILED, "TBE Kernels store build failed!");
GELOGE(FAILED, "[Call][Build] TBE Kernels store build failed!");
return FAILED;
}
if (!cust_aicpu_kernel_store_.Build()) {
GELOGE(FAILED, "custom AICPU kernels store build failed!");
GELOGE(FAILED, "[Call][Build] custom AICPU kernels store build failed!");
return FAILED;
}
ge_model.SetTBEKernelStore(tbe_kernel_store_);
@@ -744,14 +766,14 @@ Status ModelBuilder::SaveDataToModel(ge::Model &model, ge::GeModel &ge_model) {
GeAttrValue::BYTES task_def_bytes;
if (!AttrUtils::GetZeroCopyBytes(model, MODEL_ATTR_TASKS, task_def_bytes)) {
REPORT_CALL_ERROR("E19999", "Get attr:%s in model failed", MODEL_ATTR_TASKS.c_str());
GELOGE(INTERNAL_ERROR, "Get zero copy bytes fail.");
GELOGE(INTERNAL_ERROR, "[Get][Attr] %s in model failed", MODEL_ATTR_TASKS.c_str());
return INTERNAL_ERROR;
}
int byte_size = static_cast<int>(task_def_bytes.GetSize());
std::shared_ptr<domi::ModelTaskDef> task = ge::MakeShared<domi::ModelTaskDef>();
GE_CHECK_NOTNULL(task);
GE_CHK_BOOL_EXEC(ReadProtoFromArray(task_def_bytes.GetData(), byte_size, task.get()), return INTERNAL_ERROR,
"ReadProtoFromArray failed.");
"[Read][Proto] From Array failed.");
ge_model.SetModelTaskDef(task);

// Add graph
@@ -780,11 +802,12 @@ void ModelBuilder::SetModelVersion(ge::Model &model) {
Status ModelBuilder::PreBuildModel() {
if ((compute_graph_ == nullptr) || !(compute_graph_->IsValid())) {
REPORT_INNER_ERROR("E19999", "Check compute_graph no valid");
GELOGE(FAILED, "Graph_ is not valid.");
GELOGE(FAILED, "[Check][Param] Graph_ is not valid.");
return FAILED;
}

GE_CHK_STATUS_RET(SetInputOutputDesc(), "SetInputOutputDesc Failed!");
GE_CHK_STATUS_RET(SetInputOutputDesc(),
"[Set][InputOutputDesc] Failed! graph:%s", compute_graph_->GetName().c_str());

AddNodeInputProperty();

@@ -792,14 +815,15 @@ Status ModelBuilder::PreBuildModel() {
}

Status ModelBuilder::BuildModelForGetTask(ge::Model &model) {
GE_CHK_STATUS_RET(AdjustInputTensorFlag(), "AdjustInputTensorFlag failed!");
GE_CHK_STATUS_RET(AdjustInputTensorFlag(), "[Adjust][InputTensorFlag] failed! graph:%s",
compute_graph_->GetName().c_str());

ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kStreamAlloc);
// Assign logical streams.
StreamAllocator stream_allocator(compute_graph_, subgraphs_);
GE_TIMESTAMP_START(AssignLogicalStreams);
GE_CHK_STATUS_RET(stream_allocator.AssignLogicalStreams(stream_max_parallel_num_, hcom_parallel_),
"Assign logical streams failed.");
"[Assign][LogicalStreams] failed. graph:%s", compute_graph_->GetName().c_str());
GE_TIMESTAMP_END(AssignLogicalStreams, "GraphBuilder::AssignLogicalStreams");

ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kMemoryAlloc);
@@ -810,34 +834,36 @@ Status ModelBuilder::BuildModelForGetTask(ge::Model &model) {
GE_TIMESTAMP_START(AssignMemory);
MemoryAssigner mem_assigner(compute_graph_);
GE_CHK_STATUS_RET(mem_assigner.AssignMemory(is_loop_graph_, mem_type_to_mem_offset_, zero_copy_mem_size_),
"Assign Memory Failed!");
"[Assign][Memory] Failed! graph:%s", compute_graph_->GetName().c_str());
GE_TIMESTAMP_END(AssignMemory, "GraphBuilder::AssignMemory");

ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther);
GE_TIMESTAMP_START(SetInputOutputOffset);
SetInputOutputOffsetPass input_output_offset;
GE_CHK_STATUS_RET(input_output_offset.Run(compute_graph_), "Set input output offset failed.");
GE_CHK_STATUS_RET(input_output_offset.Run(compute_graph_),
"[Set][InputOutputOffset] failed. graph:%s", compute_graph_->GetName().c_str());
GE_TIMESTAMP_END(SetInputOutputOffset, "SetInputOutputOffsetPass::Run");

// Compile single op in graph build stage
GE_TIMESTAMP_START(CompileSingleOp);
GE_CHK_STATUS_RET(CompileSingleOp(), "ATC builder CompileSingleOp() return fail.");
GE_CHK_STATUS_RET(CompileSingleOp(), "[Compile][SingleOp] fail. graph:%s", compute_graph_->GetName().c_str());
GE_TIMESTAMP_EVENT_END(CompileSingleOp, "GraphBuilder::CompileSingleOp");

ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kStreamAlloc);
// Refresh real streams and insert event nodes.
GE_TIMESTAMP_START(RefreshRealStream);
GE_CHK_STATUS_RET(stream_allocator.RefreshRealStream(stream_num_, event_num_), "RefreshRealStream failed.");
GE_CHK_STATUS_RET(stream_allocator.RefreshRealStream(stream_num_, event_num_),
"[Refresh][RealStream] failed. graph:%s", compute_graph_->GetName().c_str());
huge_streams_ = stream_allocator.GetHugeStreams();
GE_TIMESTAMP_END(RefreshRealStream, "GraphBuilder::RefreshRealStream");

ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther);
GE_TIMESTAMP_START(MergeWeights);
GE_CHK_STATUS_RET(MergeWeights(), "MergeWeights Failed!");
GE_CHK_STATUS_RET(MergeWeights(), "[Merge][Weights] Failed! graph:%s", compute_graph_->GetName().c_str());
GE_TIMESTAMP_END(MergeWeights, "GraphBuilder::MergeWeights");

GE_TIMESTAMP_START(BuildModelDef);
GE_CHK_STATUS_RET(BuildModelDef(model), "BuildModelDef failed!");
GE_CHK_STATUS_RET(BuildModelDef(model), "[Build][ModelDef] failed! graph:%s", compute_graph_->GetName().c_str());
GE_TIMESTAMP_END(BuildModelDef, "GraphBuilder::BuildModelDef");

SetModelVersion(model);
@@ -847,7 +873,7 @@ Status ModelBuilder::BuildModelForGetTask(ge::Model &model) {

Status ModelBuilder::BuildModelForGetDynShapeTask(ge::Model &model_def) {
GE_TIMESTAMP_START(BuildModelDef);
GE_CHK_STATUS_RET(BuildModelDef(model_def), "BuildModelDef failed!");
GE_CHK_STATUS_RET(BuildModelDef(model_def), "[Build][ModelDef] failed!");
GE_TIMESTAMP_END(BuildModelDef, "GraphBuilder::BuildModelDef");
SetModelVersion(model_def);
return SUCCESS;
@@ -860,7 +886,7 @@ Status ModelBuilder::CompileSingleOp() {
std::shared_ptr<GELib> instance = ge::GELib::GetInstance();
if ((instance == nullptr) || !instance->InitFlag()) {
REPORT_INNER_ERROR("E19999", "Check GELib instance not init before");
GELOGE(ge::GE_CLI_GE_NOT_INITIALIZED, "CompileSingleOp failed.");
GELOGE(ge::GE_CLI_GE_NOT_INITIALIZED, "[Check][Param] CompileSingleOp failed.");
return ge::GE_CLI_GE_NOT_INITIALIZED;
}

@@ -883,7 +909,7 @@ Status ModelBuilder::CompileSingleOp() {
if (kernel_lib_name.empty()) {
REPORT_INNER_ERROR("E19999", "Check kernel lib name empty of op:%s(%s)",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(ge::INTERNAL_ERROR, "Get node:%s(%s) kernel lib failed.", node->GetName().c_str(),
GELOGE(ge::INTERNAL_ERROR, "[Get][Name] of node:%s(%s) kernel lib failed.", node->GetName().c_str(),
node->GetType().c_str());
return ge::INTERNAL_ERROR;
}
@@ -895,7 +921,7 @@ Status ModelBuilder::CompileSingleOp() {
} else {
REPORT_INNER_ERROR("E19999", "Get ops kernel info store failed for op:%s(%s), op_kernel_name:%s,",
node->GetName().c_str(), node->GetType().c_str(), kernel_lib_name.c_str());
GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "Get op %s ops kernel info store failed", node->GetName().c_str());
GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "[Get][OpsKernelInfoStore] for op %s failed", node->GetName().c_str());
return ge::GE_GRAPH_PARAM_NULLPTR;
}
}
@@ -912,7 +938,7 @@ Status ModelBuilder::CompileSingleOp() {
if (ret != ge::SUCCESS) {
REPORT_CALL_ERROR("E19999", "Batch compile op failed, kernel lib name, node size:%zu,",
node_vector.size());
GELOGE(ret, "Compile op failed, kernel lib name is %s", kernel_lib_name.c_str());
GELOGE(ret, "[Compile][Op] failed, kernel lib name is %s", kernel_lib_name.c_str());
return ret;
}
}
@@ -960,10 +986,10 @@ void ModelBuilder::SetModelCheckAicpuAttr(ge::Model &model, std::set<std::string
compute_graph_->GetName().c_str(), aicpu_op_types.size(), aicpu_optype_list.size(), aicpu_tf_op_types.size(),
aicpu_tf_optype_list.size());
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(&model, "needCheckCpu", aicpu_optype_list), return,
"Set attr needCheckCpu fail.");
"[Set][Attr] needCheckCpu fail.");

GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(&model, "needCheckTf", aicpu_tf_optype_list), return,
"Set attr needCheckTf fail.");
"[Set][Attr] needCheckTf fail.");
return;
}
} // namespace ge

+ 1
- 1
ge/graph/build/model_builder.h View File

@@ -93,7 +93,7 @@ class ModelBuilder {

uint64_t session_id_;

map<int64_t, size_t> mem_type_to_mem_offset_;
map<uint64_t, size_t> mem_type_to_mem_offset_;

size_t weight_offset_;



+ 22
- 18
ge/graph/build/run_context.cc View File

@@ -29,13 +29,15 @@ Status RunContextUtil::InitMemInfo(uint8_t *data_mem_base, uint64_t data_mem_siz
uint64_t weight_mem_size) {
if ((data_mem_size > 0) && (data_mem_base == nullptr)) {
REPORT_INNER_ERROR("E19999", "InitMemInfo param data_mem_base is null but data_mem_size = %lu", data_mem_size);
GELOGE(PARAM_INVALID, "InitMemInfo param data_mem_base is null but data_mem_size = %lu.", data_mem_size);
GELOGE(PARAM_INVALID, "[Check][Param] InitMemInfo param data_mem_base is null but data_mem_size = %lu.",
data_mem_size);
return PARAM_INVALID;
}
if ((weight_mem_size > 0) && (weight_mem_base == nullptr)) {
REPORT_INNER_ERROR("E19999", "InitMemInfo param weight_mem_base is null but weight_mem_size = %lu",
weight_mem_size);
GELOGE(PARAM_INVALID, "InitMemInfo param weight_mem_base is null but weight_mem_size = %lu.", weight_mem_size);
GELOGE(PARAM_INVALID, "[Check][Param] InitMemInfo param weight_mem_base is null but weight_mem_size = %lu.",
weight_mem_size);
return PARAM_INVALID;
}
if (mem_type_to_data_mem_base.empty() || mem_type_to_data_mem_size.empty() ||
@@ -44,9 +46,8 @@ Status RunContextUtil::InitMemInfo(uint8_t *data_mem_base, uint64_t data_mem_siz
"is not equal to the size of mem_type_to_data_mem_size[%zu].",
mem_type_to_data_mem_base.size(), mem_type_to_data_mem_size.size());
GELOGE(PARAM_INVALID,
"InitMemInfo param mem_type_to_data_mem_base size[%zu] is not equal to the size of "
"mem_type_to_data_mem_size[%zu].",
mem_type_to_data_mem_base.size(), mem_type_to_data_mem_size.size());
"[Check][Param] InitMemInfo param mem_type_to_data_mem_base size[%zu] is not equal to the size of "
"mem_type_to_data_mem_size[%zu].", mem_type_to_data_mem_base.size(), mem_type_to_data_mem_size.size());
return PARAM_INVALID;
}
data_mem_base_ = data_mem_base;
@@ -63,7 +64,7 @@ Status RunContextUtil::CreateRtModelResources(uint32_t stream_num, uint32_t even
rtError_t rt_ret = rtModelCreate(&rt_model_, 0);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "call rtModelCreate failed, ret:%d,", static_cast<int>(rt_ret));
GELOGE(RT_FAILED, "rtModelCreate failed. rt_ret = %d", static_cast<int>(rt_ret));
GELOGE(RT_FAILED, "[Call][RtModelCreate] failed. rt_ret = %d", static_cast<int>(rt_ret));
return RT_FAILED;
}

@@ -74,7 +75,7 @@ Status RunContextUtil::CreateRtModelResources(uint32_t stream_num, uint32_t even
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "call rtStreamCreate failed, ret:%d, index:%u,",
static_cast<int>(rt_ret), i);
GELOGE(RT_FAILED, "rtStreamCreate failed. rt_ret = %d, index = %u", static_cast<int>(rt_ret), i);
GELOGE(RT_FAILED, "[Call][RtStreamCreate] failed. rt_ret = %d, index = %u", static_cast<int>(rt_ret), i);
return RT_FAILED;
}
stream_list_.emplace_back(stream);
@@ -83,7 +84,7 @@ Status RunContextUtil::CreateRtModelResources(uint32_t stream_num, uint32_t even
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "call rtModelBindStream failed, ret:%d, index:%u,",
static_cast<int>(rt_ret), i);
GELOGE(RT_FAILED, "Bind stream and model failed. rt_ret = %d, index = %u", static_cast<int>(rt_ret), i);
GELOGE(RT_FAILED, "[Bind][StreamAndModel] failed. rt_ret = %d, index = %u", static_cast<int>(rt_ret), i);
return RT_FAILED;
}
}
@@ -97,7 +98,7 @@ Status RunContextUtil::CreateRtModelResources(uint32_t stream_num, uint32_t even
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "call rtEventCreate failed, ret:%d, index:%u,",
static_cast<int>(rt_ret), i);
GELOGE(RT_FAILED, "rtEventCreate failed. rt_ret = %d, index = %u", static_cast<int>(rt_ret), i);
GELOGE(RT_FAILED, "[Call][RtEventCreate] failed. rt_ret = %d, index = %u", static_cast<int>(rt_ret), i);
return RT_FAILED;
}
event_list_.emplace_back(event);
@@ -110,7 +111,7 @@ Status RunContextUtil::CreateRtModelResources(uint32_t stream_num, uint32_t even
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "call rtLabelCreateV2 failed, ret:%d, index:%u,",
static_cast<int>(rt_ret), i);
GELOGE(RT_FAILED, "rtLabelCreate failed. rt_ret = %d, index = %u", static_cast<int>(rt_ret), i);
GELOGE(RT_FAILED, "[Call][RtLabelCreate] failed. rt_ret = %d, index = %u", static_cast<int>(rt_ret), i);
return RT_FAILED;
}
label_list_.emplace_back(label);
@@ -162,40 +163,43 @@ Status RunContextUtil::CreateRunContext(Model &model, const ComputeGraphPtr &gra
// check params
if (graph == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param graph nullptr, session_id:%lu,", session_id);
GELOGE(PARAM_INVALID, "CreateRunContext param graph is null. session_id=%lu", session_id);
GELOGE(PARAM_INVALID, "[Check][Param] CreateRunContext param graph is null. session_id=%lu", session_id);
return PARAM_INVALID;
}

uint32_t stream_num = 0;
if (!AttrUtils::GetInt(&model, ATTR_MODEL_STREAM_NUM, stream_num)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s failed for model, session_id:%lu,",
REPORT_INNER_ERROR("E19999", "Get Attr:%s failed from model, session_id:%lu,",
ATTR_MODEL_STREAM_NUM.c_str(), session_id);
GELOGE(INTERNAL_ERROR, "Get stream_num attr from model_def failed. session_id=%lu", session_id);
GELOGE(INTERNAL_ERROR, "[Get][Attr] %s failed from model. session_id=%lu",
ATTR_MODEL_STREAM_NUM.c_str(), session_id);
return INTERNAL_ERROR;
}
GELOGD("Stream_num = %u", stream_num);

uint32_t event_num = 0;
if (!AttrUtils::GetInt(&model, ATTR_MODEL_EVENT_NUM, event_num)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s failed for model, session_id:%lu,",
REPORT_INNER_ERROR("E19999", "Get Attr:%s failed from model, session_id:%lu,",
ATTR_MODEL_EVENT_NUM.c_str(), session_id);
GELOGE(INTERNAL_ERROR, "Get event_num attr from model failed. session_id=%lu", session_id);
GELOGE(INTERNAL_ERROR, "[Get][Attr] %s failed from model, session_id:%lu,",
ATTR_MODEL_EVENT_NUM.c_str(), session_id);
return INTERNAL_ERROR;
}
GELOGD("Event_num = %u", event_num);

uint32_t label_num = 0;
if (!AttrUtils::GetInt(&model, ATTR_MODEL_LABEL_NUM, label_num)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s failed for model, session_id:%lu,",
REPORT_INNER_ERROR("E19999", "Get Attr:%s failed from model, session_id:%lu,",
ATTR_MODEL_LABEL_NUM.c_str(), session_id);
GELOGE(INTERNAL_ERROR, "Get label_num attr from model failed. session_id=%lu", session_id);
GELOGE(INTERNAL_ERROR, "[Get][Attr] %s failed from model, session_id:%lu,",
ATTR_MODEL_LABEL_NUM.c_str(), session_id);
return INTERNAL_ERROR;
}
GELOGD("Label_num = %u", label_num);

Status ret = CreateRtModelResources(stream_num, event_num, label_num);
if (ret != SUCCESS) {
GELOGE(ret, "CreateRtModelResources failed. session_id=%lu", session_id);
GELOGE(ret, "[Create][RtModelResources] failed. session_id=%lu", session_id);
DestroyRtModelResources();
return ret;
}


+ 80
- 66
ge/graph/build/stream_allocator.cc View File

@@ -176,8 +176,8 @@ Status StreamAllocator::AssignLogicalStreams(const std::map<std::string, int> &m

auto gelib = GELib::GetInstance();
if (gelib == nullptr) {
REPORT_INNER_ERROR("E19999", "Check GELib instance nullptr");
GELOGE(FAILED, "Get GELib instance failed.");
REPORT_INNER_ERROR("E19999", "Check GELib instance nullptr, graph:%s", whole_graph_->GetName().c_str());
GELOGE(FAILED, "[Get][Instance] of GELib failed. graph:%s", whole_graph_->GetName().c_str());
return FAILED;
}

@@ -188,7 +188,7 @@ Status StreamAllocator::AssignLogicalStreams(const std::map<std::string, int> &m

Status status = logical_allocator.Assign(whole_graph_, subgraphs_, stream_num_);
if (status != SUCCESS) {
GELOGE(status, "Assign logical streams failed.");
GELOGE(status, "[Assign][LogicalStreams] failed. graph:%s", whole_graph_->GetName().c_str());
return status;
}
GE_DUMP(whole_graph_, "AfterAssignedLogicalStreams");
@@ -203,62 +203,62 @@ Status StreamAllocator::RefreshRealStream(int64_t &stream_num, int64_t &event_nu

Status status = AssignSingleStream();
if (status != SUCCESS) {
GELOGE(status, "AssignSingleStream failed!");
GELOGE(status, "[Assign][SingleStream] failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}

status = SetActiveStreamsByLabel();
if (status != SUCCESS) {
GELOGE(status, "SetActiveStreamsByLabel failed!");
GELOGE(status, "[Set][ActiveStreams] By Label failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}

status = SetActiveStreamsForSubgraphs();
if (status != SUCCESS) {
GELOGE(status, "SetActiveStreamsForSubgraphs failed.");
GELOGE(status, "[Set][ActiveStreams] For Subgraphs failed. graph:%s", whole_graph_->GetName().c_str());
return status;
}

status = InsertSyncEvents();
if (status != SUCCESS) {
GELOGE(status, "InsertSyncEventId failed!");
GELOGE(status, "[Insert][SyncEventId] failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}

status = OptimizeSyncEvents();
if (status != SUCCESS) {
GELOGE(status, "OptimizeSyncEventId failed!");
GELOGE(status, "[Optimize][SyncEventId] failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}

vector<set<int64_t>> split_streams(stream_num_);
status = SplitStreams(split_streams);
if (status != SUCCESS) {
GELOGE(status, "SplitStreams failed!");
GELOGE(status, "[Split][Streams] failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}

status = UpdateActiveStreams(split_streams);
if (status != SUCCESS) {
GELOGE(status, "UpdateActiveStreams failed!");
GELOGE(status, "[Update][ActiveStreams] failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}

status = RefreshContinuousEvents();
if (status != SUCCESS) {
GELOGE(status, "RefreshContinuousEvents failed!");
GELOGE(status, "[Refresh][ContinuousEvents] failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}

status = RefreshEventsWithReuse();
if (status != SUCCESS) {
GELOGE(status, "[Refresh][Events]RefreshEventsWithReuse failed!");
GELOGE(status, "[Refresh][Events] With Reuse failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}

status = InsertSyncEventNodes();
if (status != SUCCESS) {
GELOGE(status, "InsertSyncEventNode failed!");
GELOGE(status, "[Insert][SyncEventNode] failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}

@@ -293,7 +293,7 @@ Status StreamAllocator::AssignSingleStream() {
if (stream_num_ > 1) {
REPORT_INNER_ERROR("E19999", "The number of ts streams is %ld, only one is supported",
stream_num_);
GELOGE(FAILED, "The number of ts streams is %ld, only one is supported.", stream_num_);
GELOGE(FAILED, "[Check][Param] The number of ts streams is %ld, only one is supported.", stream_num_);
return FAILED;
}

@@ -311,7 +311,7 @@ Status StreamAllocator::AssignSingleStream() {
uint32_t max_normal_task_count = 0;
Status status = GetMaxStreamAndTask(false, max_normal_stream_count, max_normal_task_count);
if (status != SUCCESS) {
GELOGE(status, "Get max task count of normal stream failed.");
GELOGE(status, "[Get][MaxCount] of normal stream and task failed. graph:%s", whole_graph_->GetName().c_str());
return status;
}

@@ -369,7 +369,8 @@ Status StreamAllocator::SetActiveStreamsByLabel() {
REPORT_INNER_ERROR("E19999", "Set Attr:%s for op:%s(%s) failed",
ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "SetListInt failed.");
GELOGE(FAILED, "[Set][Attr] %s for op:%s(%s) failed",
ATTR_NAME_ACTIVE_STREAM_LIST.c_str(), node->GetName().c_str(), node->GetType().c_str());
return FAILED);
}

@@ -422,7 +423,7 @@ Status StreamAllocator::SetActiveStreamsForSubgraphs() {
REPORT_INNER_ERROR("E19999", "Set Attr:%s for op:%s(%s) failed",
ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
first_active_node->GetName().c_str(), first_active_node->GetType().c_str());
GELOGE(FAILED, "Set active streams for node %s failed.", first_active_node->GetName().c_str());
GELOGE(FAILED, "[Set][Attr] active streams for node %s failed.", first_active_node->GetName().c_str());
return FAILED;
}
}
@@ -438,7 +439,7 @@ Status StreamAllocator::InsertSyncEvents() {
NodePtr next_node = peer_in_anchor->GetOwnerNode();
Status status = InsertOneEventInTwoNodes(cur_node, next_node);
if (status != SUCCESS) {
GELOGE(status, "InsertOneEventInTwoNodes failed!");
GELOGE(status, "[Insert][OneEvent] In Two Nodes failed! cur node:%s", cur_node->GetName().c_str());
return status;
}
}
@@ -451,7 +452,7 @@ Status StreamAllocator::InsertSyncEvents() {
NodePtr next_node = peer_in_anchor->GetOwnerNode();
Status status = InsertOneEventInTwoNodes(cur_node, next_node);
if (status != SUCCESS) {
GELOGE(status, "InsertOneEventInTwoNodes failed!");
GELOGE(status, "[Insert][OneEvent] In Two Nodes failed! cur node:%s", cur_node->GetName().c_str());
return status;
}
}
@@ -460,7 +461,8 @@ Status StreamAllocator::InsertSyncEvents() {

Status status = InsertEventsForSubgraph();
if (status != SUCCESS) {
GELOGE(status, "InsertEventsBetweenSubAndParentGraphNodes failed!");
GELOGE(status, "[Insert][Events] Between Sub And Parent GraphNodes failed! graph:%s",
whole_graph_->GetName().c_str());
return status;
}

@@ -493,7 +495,8 @@ Status StreamAllocator::InsertOneEventInTwoNodes(const NodePtr &cur_node, const
if (next_stream_id == kInvalidStream) {
REPORT_INNER_ERROR("E19999", "Stream id of next_node %s(%s) should not be %ld",
next_node->GetName().c_str(), next_node->GetType().c_str(), kInvalidStream);
GELOGE(FAILED, "Stream id of next_node %s should not be %ld", next_node->GetName().c_str(), kInvalidStream);
GELOGE(FAILED, "[Check][Param] Stream id of next_node %s should not be %ld",
next_node->GetName().c_str(), kInvalidStream);
return FAILED;
}

@@ -542,7 +545,7 @@ Status StreamAllocator::InsertEventsForSubgraph() {
for (const auto &next_node : parent_node->GetOutAllNodes()) {
Status status = InsertOneEventInTwoNodes(node, next_node);
if (status != SUCCESS) {
GELOGE(status, "InsertOneEventInTwoNodes failed!");
GELOGE(status, "[Insert][OneEvent] In Two Nodes failed! node:%s", node->GetName().c_str());
return status;
}
}
@@ -566,19 +569,19 @@ Status StreamAllocator::OptimizeSyncEvents() {

Status status = OptimizeBySendEvents(stream_nodes);
if (status != SUCCESS) {
GELOGE(status, "OptimizeBySendEvents failed!");
GELOGE(status, "[Optimize][StreamNodes] By Send Events failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}

status = OptimizeByRecvEvents(stream_nodes);
if (status != SUCCESS) {
GELOGE(status, "OptimizeByRecvEvents failed!");
GELOGE(status, "[Optimize][StreamNodes] By Recv Events failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}

status = OptimizeByStreamActivate();
if (status != SUCCESS) {
GELOGE(status, "OptimizeByStreamActivate failed!");
GELOGE(status, "[Call][OptimizeByStreamActivate] failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}
for (auto pair : node_to_send_events_) {
@@ -708,11 +711,11 @@ Status StreamAllocator::OptimizeByStreamActivate() {
bool StreamAllocator::IsRecvNodeActivatedBySendNode(const NodePtr &send_node_ptr, const NodePtr &recv_node_ptr) const {
GE_CHECK_NOTNULL_EXEC(send_node_ptr->GetOpDesc(),
REPORT_INNER_ERROR("E19999", "Check param send_node_ptr nullptr");
GELOGE(FAILED, "op desc is nullptr");
GELOGE(FAILED, "[Check][Param] op desc is nullptr");
return false);
GE_CHECK_NOTNULL_EXEC(recv_node_ptr->GetOpDesc(),
REPORT_INNER_ERROR("E19999", "Check param recv_node_ptr nullptr");
GELOGE(FAILED, "op desc is nullptr");
GELOGE(FAILED, "[Check][Param] op desc is nullptr");
return false);
auto cur_stream_id = send_node_ptr->GetOpDesc()->GetStreamId();
if (AttrUtils::HasAttr(recv_node_ptr->GetOpDesc(), ATTR_NAME_STREAM_LABEL)) {
@@ -826,7 +829,7 @@ Status StreamAllocator::SplitStreams(vector<set<int64_t>> &split_streams) {
uint32_t max_stream_count = 0;
uint32_t max_task_count = 0;
GE_CHK_STATUS_RET(GetMaxStreamAndTask(false, max_stream_count, max_task_count),
"Get max stream and task count failed.");
"[Get][MaxCount] of stream and task failed.");

for (const auto &cur_node : whole_graph_->GetNodes(whole_graph_->GetGraphUnknownFlag())) {
GE_CHECK_NOTNULL(cur_node);
@@ -839,7 +842,7 @@ Status StreamAllocator::SplitStreams(vector<set<int64_t>> &split_streams) {
if (stream_id > last_stream_id) {
REPORT_INNER_ERROR("E19999", "streamid(%ld) > last_stream_id(%ld), check invalid",
stream_id, last_stream_id);
GELOGE(FAILED, "SplitStreams:streamid(%ld) > last_stream_id(%ld)", stream_id, last_stream_id);
GELOGE(FAILED, "[Check][Param] SplitStreams:streamid(%ld) > last_stream_id(%ld)", stream_id, last_stream_id);
return FAILED;
}
bool is_stream_first_node = (stream_node_num_vec[stream_id] == 0);
@@ -854,7 +857,7 @@ Status StreamAllocator::SplitStreams(vector<set<int64_t>> &split_streams) {
if (stream_continuous_2_node_num_map[continuous_stream_label] > max_node_num_one_stream) {
REPORT_INNER_ERROR("E19999", "Check node[%s] stream_id[%ld] continuous stream label[%s] unsatisfied",
op_desc->GetName().c_str(), stream_id, continuous_stream_label.c_str());
GELOGE(FAILED, "SplitStreams:node[%s] stream_id[%ld] continuous stream label[%s] unsatisfied ",
GELOGE(FAILED, "[Check][Param] SplitStreams:node[%s] stream_id[%ld] continuous stream label[%s] unsatisfied ",
op_desc->GetName().c_str(), stream_id, continuous_stream_label.c_str());
return FAILED;
}
@@ -877,7 +880,8 @@ Status StreamAllocator::SplitStreams(vector<set<int64_t>> &split_streams) {
if (HasContinuousStreamLabel(op_desc, cur_continuous_stream_label)) {
// get stored nodes
auto nodes = stream_continuous_2_nodes_map[cur_continuous_stream_label];
GE_RETURN_WITH_LOG_IF_FALSE(!nodes.empty(), "split stream with continuous stream label %s failed",
GE_RETURN_WITH_LOG_IF_FALSE(!nodes.empty(),
"[Check][Param] split stream with continuous stream label %s failed",
cur_continuous_stream_label.c_str());
for (const auto &node : nodes) {
auto stored_op_desc = node->GetOpDesc();
@@ -893,7 +897,7 @@ Status StreamAllocator::SplitStreams(vector<set<int64_t>> &split_streams) {
auto iter = std::find(stream_2_nodes_map[stream_id].begin(), stream_2_nodes_map[stream_id].end(), not_cur);
GE_RETURN_WITH_LOG_IF_FALSE(
(iter != stream_2_nodes_map[stream_id].end()) && (iter != stream_2_nodes_map[stream_id].begin()),
"split stream with continuous stream label %s failed", cur_continuous_stream_label.c_str());
"[Check][Param] split stream with continuous stream label %s failed", cur_continuous_stream_label.c_str());
iter--;
pre_node = *iter;
}
@@ -905,7 +909,9 @@ Status StreamAllocator::SplitStreams(vector<set<int64_t>> &split_streams) {

// Add the send/recv event to the first and last nodes of the split stream.
if (pre_node != nullptr) {
GE_CHK_STATUS_RET(AddEventId(pre_node, not_cur, cur_node, not_use_cur), "AddEventId failed.");
GE_CHK_STATUS_RET(AddEventId(pre_node, not_cur, cur_node, not_use_cur),
"[Add][EventId] failed, pre node:%s, not cur node:%s, cur node:%s.",
pre_node->GetName().c_str(), not_cur->GetName().c_str(), cur_node->GetName().c_str());
}
}

@@ -943,12 +949,12 @@ Status StreamAllocator::UpdateActiveStreams(const vector<set<int64_t>> &split_st
for (auto &node : whole_graph_->GetNodes(whole_graph_->GetGraphUnknownFlag())) {
if ((node->GetType() == STREAMSWITCH) || (node->GetType() == STREAMSWITCHN)) {
if (UpdateActiveStreamsForSwitchNode(node) != SUCCESS) {
GELOGE(FAILED, "Update active streams for switch node: %s failed.", node->GetName().c_str());
GELOGE(FAILED, "[Update][ActiveStreams] for switch node: %s failed.", node->GetName().c_str());
return FAILED;
}
} else {
if (UpdateActiveStreamsForActiveNode(split_streams, node) != SUCCESS) {
GELOGE(FAILED, "Update active streams for active node: %s failed.", node->GetName().c_str());
GELOGE(FAILED, "[Update][ActiveStreams] for active node: %s failed.", node->GetName().c_str());
return FAILED;
}
}
@@ -956,13 +962,13 @@ Status StreamAllocator::UpdateActiveStreams(const vector<set<int64_t>> &split_st

Status status = UpdateActiveStreamsForSubgraphs();
if (status != SUCCESS) {
GELOGE(status, "Update active streams for subgraphs failed!");
GELOGE(status, "[Update][ActiveStreams] for subgraphs failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}

status = SetActiveStreamsForLoop();
if (status != SUCCESS) {
GELOGE(status, "SetActiveStreamsForLoop failed!");
GELOGE(status, "[Set][ActiveStreams] For Loop failed! graph:%s", whole_graph_->GetName().c_str());
return status;
}

@@ -990,7 +996,7 @@ void StreamAllocator::UpdateLabelStreams(const vector<set<int64_t>> &split_strea
Status StreamAllocator::UpdateActiveStreamsForSwitchNode(NodePtr &switch_node) {
vector<NodePtr> active_nodes;
if (InsertActiveNodesAfterSwitch(switch_node, active_nodes) != SUCCESS) {
GELOGE(FAILED, "Insert active nodes after node %s failed.", switch_node->GetName().c_str());
GELOGE(FAILED, "[Insert][ActiveNodes] after node %s failed.", switch_node->GetName().c_str());
return FAILED;
}
if (active_nodes.empty()) {
@@ -1010,7 +1016,8 @@ Status StreamAllocator::UpdateActiveStreamsForSwitchNode(NodePtr &switch_node) {
if (!AttrUtils::SetListInt(op_desc, ATTR_NAME_ACTIVE_STREAM_LIST, stream_ids)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "SetListInt failed.");
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED;
}

@@ -1024,21 +1031,21 @@ Status StreamAllocator::InsertActiveNodesAfterSwitch(NodePtr &switch_node, vecto
vector<string> ori_active_label_list;
if (!AttrUtils::GetListStr(switch_desc, ATTR_NAME_ACTIVE_LABEL_LIST, ori_active_label_list) ||
ori_active_label_list.empty()) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s fail for op:%s(%s)", ATTR_NAME_ACTIVE_LABEL_LIST.c_str(),
REPORT_INNER_ERROR("E19999", "Get Attr:%s fail from op:%s(%s)", ATTR_NAME_ACTIVE_LABEL_LIST.c_str(),
switch_node->GetName().c_str(), switch_node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Get active label list of switch %s failed.", switch_node->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Get][Attr] active label list of switch %s failed.", switch_node->GetName().c_str());
return INTERNAL_ERROR;
}

vector<string> active_label_list;
vector<NodePtr> added_active_nodes;
if (AddActiveNodes(switch_node, ori_active_label_list, active_label_list, added_active_nodes) != SUCCESS) {
GELOGE(FAILED, "Add active nodes after node %s failed.", switch_node->GetName().c_str());
GELOGE(FAILED, "[Add][ActiveNodes] after node %s failed.", switch_node->GetName().c_str());
return FAILED;
}

if (SetActiveLabelList(switch_node, active_label_list) != SUCCESS) {
GELOGE(FAILED, "set active label list failed");
GELOGE(FAILED, "[Set][ActiveLabelList] failed, node:%s", switch_node->GetName().c_str());
return FAILED;
}

@@ -1051,7 +1058,8 @@ Status StreamAllocator::InsertActiveNodesAfterSwitch(NodePtr &switch_node, vecto
if (switch_node->GetOutControlAnchor()->LinkTo(active_node->GetInControlAnchor()) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Link from %s to %s failed",
switch_node->GetName().c_str(), active_node->GetName().c_str());
GELOGE(FAILED, "Link %s to %s failed.", switch_node->GetName().c_str(), active_node->GetName().c_str());
GELOGE(FAILED, "[Link][Nodes] from %s to %s failed.",
switch_node->GetName().c_str(), active_node->GetName().c_str());
return FAILED;
}
active_nodes.emplace_back(active_node);
@@ -1068,7 +1076,8 @@ Status StreamAllocator::UpdateActiveStreamsForActiveNode(const vector<set<int64_
if (static_cast<size_t>(logical_stream) >= split_streams.size()) {
REPORT_INNER_ERROR("E19999", "Check logical stream:%u is out of range:%zu",
logical_stream, split_streams.size());
GELOGE(FAILED, "logical stream is out of range.");
GELOGE(FAILED, "[Check][Param] logical stream:%u is out of range(0, %zu).",
logical_stream, split_streams.size());
return FAILED;
}
const set<int64_t> &new_split_streams = split_streams[logical_stream];
@@ -1088,7 +1097,7 @@ Status StreamAllocator::UpdateActiveStreamsForActiveNode(const vector<set<int64_
if (!AttrUtils::SetListInt(node->GetOpDesc(), ATTR_NAME_ACTIVE_STREAM_LIST, new_active_streams)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "Set active streams for node %s failed.", node->GetName().c_str());
GELOGE(FAILED, "[Set][Attr] active streams for node %s failed.", node->GetName().c_str());
return FAILED;
}
}
@@ -1130,7 +1139,7 @@ Status StreamAllocator::UpdateActiveStreamsForSubgraphs() const {
if (!AttrUtils::SetListInt(active_op, ATTR_NAME_ACTIVE_STREAM_LIST, active_streams)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
active_op->GetName().c_str(), active_op->GetType().c_str());
GELOGE(FAILED, "Set active streams for node %s failed.", active_node->GetName().c_str());
GELOGE(FAILED, "[Set][Attr] active streams for node %s failed.", active_node->GetName().c_str());
return FAILED;
}
}
@@ -1200,7 +1209,7 @@ Status StreamAllocator::SetActiveStreamsForLoop() {
if (pre_switch_node == nullptr) {
REPORT_INNER_ERROR("E19999", "Find switch node before loop active node %s fail",
node->GetName().c_str());
GELOGE(FAILED, "find switch node before loop active node %s failed", node->GetName().c_str());
GELOGE(FAILED, "[Find][SwitchNode] before loop active node %s failed", node->GetName().c_str());
return FAILED;
}

@@ -1210,7 +1219,8 @@ Status StreamAllocator::SetActiveStreamsForLoop() {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)",
ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "SetListInt failed.");
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
node->GetName().c_str(), node->GetType().c_str());
return FAILED);
for (const auto &stream_id : loop_active_streams) {
GELOGI("Active stream %u for node: %s.", stream_id, node->GetName().c_str());
@@ -1258,7 +1268,7 @@ Status StreamAllocator::CheckStreamActived() const {
if (iter != active_streams.end()) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) cannot active its own stream %u, check invalid ",
node->GetName().c_str(), node->GetType().c_str(), stream_id);
GELOGE(FAILED, "Node %s cannot active its own stream %u.", node->GetName().c_str(), stream_id);
GELOGE(FAILED, "[Check][Param] Node %s cannot active its own stream %u.", node->GetName().c_str(), stream_id);
return FAILED;
}
}
@@ -1376,7 +1386,7 @@ Status StreamAllocator::RefreshContinuousEvents() {
auto find_it = old_to_new_events.find(send_events[i]);
if (find_it == old_to_new_events.end()) {
REPORT_INNER_ERROR("E19999", "Check invalid send event %u", send_events[i]);
GELOGE(FAILED, "RefreshContinuousEvents: invalid send event %u", send_events[i]);
GELOGE(FAILED, "[Check][Param] RefreshContinuousEvents: invalid send event %u", send_events[i]);
return FAILED;
}
send_events[i] = find_it->second;
@@ -1390,7 +1400,7 @@ Status StreamAllocator::RefreshContinuousEvents() {
auto find_it = old_to_new_events.find(recv_events[i]);
if (find_it == old_to_new_events.end()) {
REPORT_INNER_ERROR("E19999", "Check invalid recv event %u", recv_events[i]);
GELOGE(FAILED, "RefreshContinuousEvents: invalid recv event %u", recv_events[i]);
GELOGE(FAILED, "[Check][Param] RefreshContinuousEvents: invalid recv event %u", recv_events[i]);
return FAILED;
}
recv_events[i] = find_it->second;
@@ -1430,7 +1440,8 @@ Status StreamAllocator::InsertSyncEventNodes() {
REPORT_INNER_ERROR("E19999", "Set Attr:%s for op:%s(%s) failed, event_id:%u,",
RECV_ATTR_EVENT_ID.c_str(),
node->GetName().c_str(), node->GetType().c_str(), event_id);
GELOGE(FAILED, "SetInt failed.");
GELOGE(FAILED, "[Set][Attr] %s for op:%s(%s) failed, event_id:%u,",
RECV_ATTR_EVENT_ID.c_str(), node->GetName().c_str(), node->GetType().c_str(), event_id);
return FAILED);
(void)AttrUtils::SetListStr(op_desc_ptr, ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES,
std::move(std::vector<std::string>()));
@@ -1441,7 +1452,7 @@ Status StreamAllocator::InsertSyncEventNodes() {
if (status != SUCCESS) {
REPORT_INNER_ERROR("E19999", "Add edge from node %s to node %s failed",
recv_node->GetName().c_str(), node->GetName().c_str());
GELOGE(status, "Add edge for node %s and node %s failed.", recv_node->GetName().c_str(),
GELOGE(status, "[Add][Edge] for node %s and node %s failed.", recv_node->GetName().c_str(),
node->GetName().c_str());
return status;
}
@@ -1478,7 +1489,7 @@ Status StreamAllocator::InsertSyncEventNodes() {
if (status != SUCCESS) {
REPORT_INNER_ERROR("E19999", "Add edge from node %s to node %s failed",
node->GetName().c_str(), send_node->GetName().c_str());
GELOGE(status, "Add edge for node %s and node %s failed.", node->GetName().c_str(),
GELOGE(status, "[Add][Edge] for node %s and node %s failed.", node->GetName().c_str(),
send_node->GetName().c_str());
return status;
}
@@ -1491,7 +1502,8 @@ Status StreamAllocator::InsertSyncEventNodes() {
if (status != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Insert Graph Events fail, graph:%s,",
whole_graph_->GetName().c_str());
GELOGE(status, "Graph ReorderEventNodes failed");
GELOGE(status, "[Insert][GraphEvents] Graph ReorderEventNodes failed, graph:%s,",
whole_graph_->GetName().c_str());
return status;
}

@@ -1544,7 +1556,8 @@ Status StreamAllocator::GetMaxStreamAndTask(bool huge_stream, uint32_t &max_stre
if (ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "call rtGetMaxStreamAndTask fail, ret:%d, stream_type:%u,",
static_cast<int>(ret), stream_type);
GELOGE(FAILED, "Get max stream and task count by rts failed.");
GELOGE(FAILED, "[Call][RtGetMaxStreamAndTask] Get max stream and task count by rts failed, "
"ret:%d, stream_type:%u,", static_cast<int>(ret), stream_type);
return FAILED;
}
GELOGD("Allowed max stream count: %u, max task count per stream: %u.", max_stream_count, max_task_count);
@@ -1687,7 +1700,7 @@ Status StreamAllocator::AddActiveNodes(NodePtr &switch_node, const vector<string
const string &active_label = ori_active_label_list[i];
if (labeled_streams_.find(active_label) == labeled_streams_.end()) {
REPORT_INNER_ERROR("E19999", "can not find stream label:%s", active_label.c_str());
GELOGE(FAILED, "can not find stream label %s", active_label.c_str());
GELOGE(FAILED, "[Check][Param] can not find stream label %s", active_label.c_str());
return FAILED;
}
if (labeled_streams_[active_label].size() <= 1) {
@@ -1715,32 +1728,32 @@ Status StreamAllocator::AddActiveNodes(NodePtr &switch_node, const vector<string
if (switch_node->GetOutControlAnchor()->Unlink(node->GetInControlAnchor()) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Unlink %s to %s failed",
switch_node->GetName().c_str(), node->GetName().c_str());
GELOGE(FAILED, "Unlink %s to %s failed.", switch_node->GetName().c_str(), node->GetName().c_str());
GELOGE(FAILED, "[Unlink][Nodes] %s to %s failed.", switch_node->GetName().c_str(), node->GetName().c_str());
return FAILED;
}
GE_CHECK_NOTNULL(active_node->GetOutControlAnchor());
if (active_node->GetOutControlAnchor()->LinkTo(node->GetInControlAnchor()) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Link %s to %s failed",
active_node->GetName().c_str(), node->GetName().c_str());
GELOGE(FAILED, "Link %s to %s failed.", active_node->GetName().c_str(), node->GetName().c_str());
GELOGE(FAILED, "[Link][Nodes] %s to %s failed.", active_node->GetName().c_str(), node->GetName().c_str());
return FAILED;
}
}

if (SetSwitchBranchNodeLabel(active_node, name) != SUCCESS) {
GELOGE(FAILED, "Set switch branch node label failed.");
GELOGE(FAILED, "[Set][SwitchBranchNodeLabel] failed, node:%s.", active_node->GetName().c_str());
return FAILED;
}
if (SetStreamLabel(active_node, name) != SUCCESS) {
GELOGE(FAILED, "Set stream label failed.");
GELOGE(FAILED, "[Set][StreamLabel] failed, node:%s.", active_node->GetName().c_str());
return FAILED;
}
if (SetActiveLabelList(active_node, {active_label}) != SUCCESS) {
GELOGE(FAILED, "Set active label list failed.");
GELOGE(FAILED, "[Set][ActiveLabelList] failed, node:%s.", active_node->GetName().c_str());
return FAILED;
}
if (SetActiveStreamList(active_node, active_label) != SUCCESS) {
GELOGE(FAILED, "Set active stream list failed.");
GELOGE(FAILED, "[Set][ActiveStreamList] failed, node:%s.", active_node->GetName().c_str());
return FAILED;
}

@@ -1753,7 +1766,7 @@ Status StreamAllocator::AddActiveNodes(NodePtr &switch_node, const vector<string
Status StreamAllocator::SetActiveStreamList(NodePtr &active_node, const string &active_label) {
if (labeled_streams_.find(active_label) == labeled_streams_.end()) {
REPORT_INNER_ERROR("E19999", "Can not find stream label:%s", active_label.c_str());
GELOGE(FAILED, "Can not find stream label %s.", active_label.c_str());
GELOGE(FAILED, "[Check][Param] Can not find stream label %s.", active_label.c_str());
return FAILED;
}
set<int64_t> &streams = labeled_streams_[active_label];
@@ -1761,7 +1774,8 @@ Status StreamAllocator::SetActiveStreamList(NodePtr &active_node, const string &
if (!AttrUtils::SetListInt(active_node->GetOpDesc(), ATTR_NAME_ACTIVE_STREAM_LIST, active_streams)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
active_node->GetName().c_str(), active_node->GetType().c_str());
GELOGE(FAILED, "SetListInt of %s failed.", ATTR_NAME_ACTIVE_STREAM_LIST.c_str());
GELOGE(FAILED, "[Set][Attr] %s failed for op:%s(%s).", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
active_node->GetName().c_str(), active_node->GetType().c_str());
return FAILED;
}
return SUCCESS;


+ 5
- 7
ge/graph/build/stream_graph_optimizer.cc View File

@@ -128,8 +128,8 @@ Status StreamGraphOptimizer::OptimizeStreamedSubGraph(const ComputeGraphPtr &com
REPORT_INNER_ERROR("E19999", "Check stream_id:%ld in op:%s(%s) is bigger than "
"run_context.graphStreamList.size():%zu", stream_id, op_desc->GetName().c_str(),
op_desc->GetType().c_str(), run_context.graphStreamList.size());
GELOGE(FAILED, "stream_id %ld is bigger than run_context.graphStreamList.size() %zu", stream_id,
run_context.graphStreamList.size());
GELOGE(FAILED, "[Check][Param] stream_id %ld is bigger than run_context.graphStreamList.size() %zu",
stream_id, run_context.graphStreamList.size());
return FAILED;
}
run_context.stream = run_context.graphStreamList[stream_id];
@@ -145,11 +145,9 @@ Status StreamGraphOptimizer::OptimizeStreamedSubGraph(const ComputeGraphPtr &com
REPORT_CALL_ERROR("E19999", "Call optimize streamed subgraph failed, subgraph: %s, engine_name: %s, graph "
"Optimizer num: %zu, ret: %u", subgraph->GetName().c_str(), engine_name.c_str(),
graph_optimizers.size(), ret);
GELOGE(
ret,
"[optimizeStreamedSubGraph]: optimize streamed subgraph failed, subgraph: %s, engine_name: %s, graph "
"Optimizer num: %zu, ret: %u",
subgraph->GetName().c_str(), engine_name.c_str(), graph_optimizers.size(), ret);
GELOGE(ret, "[Optimize][StreamGraph] failed, subgraph: %s, engine_name: %s, graph "
"Optimizer num: %zu, ret: %u",
subgraph->GetName().c_str(), engine_name.c_str(), graph_optimizers.size(), ret);
return ret;
}
GELOGD(


+ 75
- 60
ge/graph/build/task_generator.cc View File

@@ -72,7 +72,7 @@ Status TaskGenerator::GetTaskInfo(Model &model, ComputeGraphPtr &graph, uint64_t
// Check params
if (graph == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param graph is null, session_id:%lu", session_id);
GELOGE(PARAM_INVALID, "GetTaskInfo param graph is null. session_id=%lu", session_id);
GELOGE(PARAM_INVALID, "[Check][Param] GetTaskInfo param graph is null. session_id=%lu", session_id);
return PARAM_INVALID;
}

@@ -83,7 +83,7 @@ Status TaskGenerator::GetTaskInfo(Model &model, ComputeGraphPtr &graph, uint64_t
GE_DUMP(graph, "GenerateTaskAfter");

if (ret != SUCCESS) {
GELOGE(ret, "GenerateTask failed. session_id=%lu", session_id);
GELOGE(ret, "[Generate][Task] failed. session_id=%lu", session_id);
return ret;
}

@@ -98,7 +98,8 @@ Status TaskGenerator::GetTaskInfo(Model &model, ComputeGraphPtr &graph, uint64_t
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(model, ATTR_MODEL_TASK_INDEX_OP_NAME, op_name),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for model:%s",
ATTR_MODEL_TASK_INDEX_OP_NAME.c_str(), model.GetName().c_str());
GELOGE(FAILED, "SetListStr failed.");
GELOGE(FAILED, "[Set][Attr] %s fail for model:%s",
ATTR_MODEL_TASK_INDEX_OP_NAME.c_str(), model.GetName().c_str());
return FAILED);

GELOGI("GenerateTask Success, task list:%zu, op map:%zu, logic mem base:%p, logic weight base:%p, logic var base:%p",
@@ -113,7 +114,8 @@ Status TaskGenerator::GetTaskInfo(Model &model, ComputeGraphPtr &graph, uint64_t
if (task_def == nullptr) {
REPORT_INNER_ERROR("E19999", "Add task_def in ModelTaskDef fail, session_id:%lu, graph:%s, model:%s",
session_id, graph->GetName().c_str(), model.GetName().c_str());
GELOGE(FAILED, "task_def is nullptr.");
GELOGE(FAILED, "[Check][Param] task_def is nullptr, session_id:%lu, graph:%s, model:%s",
session_id, graph->GetName().c_str(), model.GetName().c_str());
return FAILED;
}
*task_def = task_def_temp;
@@ -121,7 +123,7 @@ Status TaskGenerator::GetTaskInfo(Model &model, ComputeGraphPtr &graph, uint64_t

ret = AddModelTaskToModel(model_task_def, session_id, model, run_context);
if (ret != SUCCESS) {
GELOGE(ret, "AddModelTaskToModel failed. session_id=%lu", session_id);
GELOGE(ret, "[Add][ModelTask] To Model failed. session_id=%lu", session_id);
return ret;
}

@@ -135,28 +137,33 @@ Status TaskGenerator::AddModelTaskToModel(const ModelTaskDef &model_task_def, ui
AttrUtils::SetInt(model, MODEL_ATTR_TASK_GEN_BASE_ADDR, reinterpret_cast<uintptr_t>(run_context.dataMemBase)),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for model:%s",
MODEL_ATTR_TASK_GEN_BASE_ADDR.c_str(), model.GetName().c_str());
GELOGE(FAILED, "SetInt MODEL_ATTR_TASK_GEN_BASE_ADDR failed.");
GELOGE(FAILED, "[Set][Attr] %s fail for model:%s",
MODEL_ATTR_TASK_GEN_BASE_ADDR.c_str(), model.GetName().c_str());
return FAILED);
GE_CHK_BOOL_EXEC(
AttrUtils::SetInt(model, MODEL_ATTR_TASK_GEN_WEIGHT_ADDR, reinterpret_cast<uintptr_t>(run_context.weightMemBase)),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for model:%s",
MODEL_ATTR_TASK_GEN_WEIGHT_ADDR.c_str(), model.GetName().c_str());
GELOGE(FAILED, "SetInt MODEL_ATTR_TASK_GEN_WEIGHT_ADDR failed.");
GELOGE(FAILED, "[Set][Attr] %s fail for model:%s",
MODEL_ATTR_TASK_GEN_WEIGHT_ADDR.c_str(), model.GetName().c_str());
return FAILED);
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(model, ATTR_MODEL_TASK_GEN_VAR_ADDR, reinterpret_cast<uintptr_t>(var_mem_base_)),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for model:%s",
ATTR_MODEL_TASK_GEN_VAR_ADDR.c_str(), model.GetName().c_str());
GELOGE(FAILED, "SetInt ATTR_MODEL_TASK_GEN_VAR_ADDR failed.");
GELOGE(FAILED, "[Set][Attr] %s fail for model:%s",
ATTR_MODEL_TASK_GEN_VAR_ADDR.c_str(), model.GetName().c_str());
return FAILED);
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(model, ATTR_MODEL_VAR_SIZE, var_mem_size_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for model:%s",
ATTR_MODEL_VAR_SIZE.c_str(), model.GetName().c_str());
GELOGE(FAILED, "SetInt ATTR_MODEL_VAR_SIZE failed.");
GELOGE(FAILED, "[Set][Attr] %s fail for model:%s",
ATTR_MODEL_VAR_SIZE.c_str(), model.GetName().c_str());
return FAILED);
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(model, MODEL_ATTR_SESSION_ID, session_id),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for mode:%s",
MODEL_ATTR_SESSION_ID.c_str(), model.GetName().c_str());
GELOGE(FAILED, "SetInt MODEL_ATTR_SESSION_ID failed.");
GELOGE(FAILED, "[Set][Attr] %s fail for mode:%s",
MODEL_ATTR_SESSION_ID.c_str(), model.GetName().c_str());
return FAILED);

size_t task_size = model_task_def.ByteSizeLong();
@@ -164,15 +171,15 @@ Status TaskGenerator::AddModelTaskToModel(const ModelTaskDef &model_task_def, ui
if (!model_task_def.SerializePartialToArray(serial_buff.GetData(), static_cast<int>(task_size))) {
REPORT_INNER_ERROR("E19999", "model_task_def's serialize failed, model name = %s, task_size=%zu",
model.GetName().c_str(), task_size);
GELOGE(FAILED, "model_task_def's serialize failed, model name = %s, task_size=%zu.", model.GetName().c_str(),
task_size);
GELOGE(FAILED, "[Call][SerializePartialToArray] failed, model name = %s, task_size=%zu.",
model.GetName().c_str(), task_size);
return FAILED;
}
if (!AttrUtils::SetZeroCopyBytes(model, MODEL_ATTR_TASKS, std::move(serial_buff))) {
REPORT_INNER_ERROR("E19999", "Set model task to model failed, model name = %s, task_size=%zu",
model.GetName().c_str(), task_size);
GELOGE(FAILED, "Set model task to model failed, model name = %s, task_size=%zu.", model.GetName().c_str(),
task_size);
GELOGE(FAILED, "[Call][SetZeroCopyBytes] Set model task to model failed, model name = %s, task_size=%zu.",
model.GetName().c_str(), task_size);
return FAILED;
}

@@ -191,7 +198,8 @@ Status TaskGenerator::UpdateOpIsVarAttr(const OpDescPtr &op_desc, uint64_t sessi
GE_CHK_BOOL_EXEC(AttrUtils::SetListBool(op_desc, kIsInputVar, input_var),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", kIsInputVar,
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "SetListBool failed.");
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", kIsInputVar,
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED);
}

@@ -204,7 +212,8 @@ Status TaskGenerator::UpdateOpIsVarAttr(const OpDescPtr &op_desc, uint64_t sessi
GE_CHK_BOOL_EXEC(AttrUtils::SetListBool(op_desc, kIsOutputVar, output_var),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", kIsOutputVar,
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "SetListBool failed.");
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", kIsOutputVar,
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED);
}
return SUCCESS;
@@ -280,10 +289,11 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra
std::shared_ptr<GELib> ge_lib = GELib::GetInstance();
if ((ge_lib == nullptr) || !ge_lib->InitFlag()) {
REPORT_INNER_ERROR("E19999", "Check GELib instance not init before");
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GenerateTask failed.");
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Check][Param] GenerateTask failed, because GELib instance not init before.");
return GE_CLI_GE_NOT_INITIALIZED;
}
GE_CHK_STATUS_RET(MarkNodeAndSetIndex(graph), "MarkNodeAndSetIndex failed.");
GE_CHK_STATUS_RET(MarkNodeAndSetIndex(graph),
"[Call][MarkNodeAndSetIndex] failed, graph:%s.", graph->GetName().c_str());
ProfilingPoint profiling_point;
vector<uint32_t> all_reduce_nodes;
GE_CHK_STATUS_RET(FindProfilingTaskIndex(graph, profiling_point, all_reduce_nodes));
@@ -304,12 +314,13 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra
rtStream_t stream = nullptr;
bool is_unknown_shape = graph->GetGraphUnknownFlag() || GetContext().GetHostExecFlag();
if (is_unknown_shape) {
GE_CHK_STATUS_RET(SetUnknownShapeStream(run_context, stream), "Set unknown shape stream failed.");
GE_CHK_STATUS_RET(SetUnknownShapeStream(run_context, stream),
"[Set][UnknownShapeStream] failed, graph:%s.", graph->GetName().c_str());
}
std::function<void()> callback = [&]() {
if (is_unknown_shape) {
if (DestroyUnknownShapeStream(run_context, stream) != SUCCESS) {
GELOGE(FAILED, "Destory unknown shape stream failed.");
GELOGE(FAILED, "[Destroy][UnknownShapeStream] failed.");
}
}
};
@@ -335,7 +346,7 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra
FusionTaskInfo{run_context, graph, node, op_desc, node_index, ge_lib,
ops_kernel_manager, task_def_list, op_name_map, profiling_point, all_reduce_nodes};
GE_CHK_STATUS_RET(GenerateTaskForFusionNode(fusion_task_info, fusion_nodes, fusion_nodes_seen),
"Call GenerateTaskForFusionNode node:%s(%s) failed", name.c_str(), type.c_str());
"[Call][GenerateTaskForFusionNode] node:%s(%s) failed", name.c_str(), type.c_str());
// continue directly
if (ge::AttrUtils::GetInt(op_desc, ATTR_NAME_FUSION_GROUP_KEY, group_key)) {
GELOGI("Fusion node[name:%s, type:%s] do not need generate task again.", name.c_str(), type.c_str());
@@ -349,13 +360,11 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra
if (kernel_info_store == nullptr) {
REPORT_INNER_ERROR("E19999", "Get ops kernel info store failed for op:%s(%s), op_kernel_name:%s",
node->GetName().c_str(), node->GetType().c_str(), op_kernel_lib_name.c_str());
GELOGE(INTERNAL_ERROR,
"No ops kernel store or ops kernel builder found. node:%s(%s), op_kernel_lib_name=%s.",
name.c_str(),
type.c_str(), op_kernel_lib_name.c_str());
GELOGE(INTERNAL_ERROR, "[Call][GetOpsKernelInfoStore] No ops kernel store or ops kernel builder found. "
"node:%s(%s), op_kernel_lib_name=%s.", name.c_str(), type.c_str(), op_kernel_lib_name.c_str());
return INTERNAL_ERROR;
}
GE_CHK_STATUS_RET(UpdateAnchorStatus(node), "Call UpdateAnchorStatus node:%s(%s) failed", name.c_str(),
GE_CHK_STATUS_RET(UpdateAnchorStatus(node), "[Call][UpdateAnchorStatus] node:%s(%s) failed", name.c_str(),
type.c_str());
// Profiling task
size_t task_list_size_before = task_def_list.size();
@@ -365,7 +374,8 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra
int64_t stream_id = 0;
if (!is_unknown_shape) {
stream_id = op_desc->GetStreamId();
GE_CHK_STATUS_RET(SetKnownShapeStream(run_context, stream_id), "node[name:%s(%s), id:%ld] stream id is invalid.",
GE_CHK_STATUS_RET(SetKnownShapeStream(run_context, stream_id),
"[Set][KnownShapeStream] node[name:%s(%s), id:%ld] stream id is invalid.",
name.c_str(), type.c_str(), op_id);
}
GELOGD("Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task.", op_kernel_lib_name.c_str(),
@@ -376,8 +386,7 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call OpsKernelBuilderManager GenerateTask fail for op:%s(%s)",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(ret, "Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task failed.",
op_kernel_lib_name.c_str(), name.c_str(), type.c_str(), op_id, stream_id);
GELOGE(ret, "[Generate][Task] fail for op:%s(%s)", node->GetName().c_str(), node->GetType().c_str());
return ret;
}
// Profiling task
@@ -388,9 +397,9 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra
REPORT_INNER_ERROR("E19999", "Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task "
"but task num from %zu to %zu, check invalid", op_kernel_lib_name.c_str(), name.c_str(),
type.c_str(), op_id, stream_id, task_list_size_before, task_list_size_after);
GELOGE(FAILED, "Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task. but task num from %zu to %zu.",
op_kernel_lib_name.c_str(), name.c_str(), type.c_str(), op_id, stream_id, task_list_size_before,
task_list_size_after);
GELOGE(FAILED, "[Check][Param] Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task. "
"but task num from %zu to %zu.", op_kernel_lib_name.c_str(), name.c_str(), type.c_str(),
op_id, stream_id, task_list_size_before, task_list_size_after);
return FAILED;
}

@@ -455,15 +464,15 @@ Status TaskGenerator::GenerateTaskForFusionNode(FusionTaskInfo &fusion_task_info
REPORT_INNER_ERROR("E19999", "Get ops kernel info store failed for op:%s(%s), op_kernel_name:%s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
op_kernel_lib_name.c_str());
GELOGE(INTERNAL_ERROR,
"Fusion: No ops kernel store or ops kernel builder found. fusion_node:%s(%s), op_kernel_lib_name=%s.",
GELOGE(INTERNAL_ERROR, "[Call][GetOpsKernelInfoStore] Fusion: No ops kernel store or ops kernel builder found."
" fusion_node:%s(%s), op_kernel_lib_name=%s.",
fusion_node_name.c_str(), fusion_node_type.c_str(), op_kernel_lib_name.c_str());
return INTERNAL_ERROR;
}

ret = UpdateAnchorStatus(fusion_node);
if (ret != SUCCESS) {
GELOGE(ret, "Fusion: Call UpdateAnchorStatus fusion_node:%s(%s) failed", fusion_node_name.c_str(),
GELOGE(ret, "[Update][AnchorStatus] fusion_node:%s(%s) failed", fusion_node_name.c_str(),
fusion_node_type.c_str());
return ret;
}
@@ -474,8 +483,9 @@ Status TaskGenerator::GenerateTaskForFusionNode(FusionTaskInfo &fusion_task_info
REPORT_INNER_ERROR("E19999", "Fusion: fusion_node[name:%s(%s), id:%ld] stream id is invalid, "
"stream list size=%zu", fusion_node_name.c_str(), fusion_node_type.c_str(),
op_id, run_context.graphStreamList.size());
GELOGE(INTERNAL_ERROR, "Fusion: fusion_node[name:%s(%s), id:%ld] stream id is invalid, stream list size=%zu",
fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, run_context.graphStreamList.size());
GELOGE(INTERNAL_ERROR, "[Check][Param] Fusion: fusion_node[name:%s(%s), id:%ld] stream id is invalid, "
"stream list size=%zu", fusion_node_name.c_str(), fusion_node_type.c_str(), op_id,
run_context.graphStreamList.size());
return INTERNAL_ERROR;
}
// profiling task
@@ -488,8 +498,7 @@ Status TaskGenerator::GenerateTaskForFusionNode(FusionTaskInfo &fusion_task_info
REPORT_CALL_ERROR("E19999", " Call %s to generate fusion_node:[fusion_node_name:%s(%s), "
"id:%ld, stream_id:%ld] task failed", op_kernel_lib_name.c_str(),
fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id);
GELOGE(ret,
"Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), "
GELOGE(ret, "[Generate][Task] Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), "
"id:%ld, stream_id:%ld] task failed.",
op_kernel_lib_name.c_str(), fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id);
return ret;
@@ -503,8 +512,7 @@ Status TaskGenerator::GenerateTaskForFusionNode(FusionTaskInfo &fusion_task_info
"id:%ld, stream_id:%ld] task, but task num from %zu to %zu, check invalid",
fusion_node_name.c_str(), fusion_node_type.c_str(), op_kernel_lib_name.c_str(),
op_id, stream_id, task_list_size_before, task_list_size_after);
GELOGE(FAILED,
"Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), "
GELOGE(FAILED, "[Check][Param] Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), "
"id:%ld, stream_id:%ld] task. but task num from %zu to %zu.",
op_kernel_lib_name.c_str(), fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id,
task_list_size_before, task_list_size_after);
@@ -539,16 +547,18 @@ Status TaskGenerator::UpdateAnchorStatus(const NodePtr &node) {
if (NodeUtils::SetAllAnchorStatus(node) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "SetAllAnchorStatus fail for op:%s(%s)",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "NodeUtils::SetAllAnchorStatus failed.");
GELOGE(INTERNAL_ERROR, "[Set][AllAnchorStatus] failed, op:%s(%s)",
node->GetName().c_str(), node->GetType().c_str());
return INTERNAL_ERROR;
}
for (auto &anchor : node->GetAllInDataAnchors()) {
auto peer_anchor = anchor->GetPeerOutAnchor();
if (peer_anchor == nullptr) {
if (AnchorUtils::SetStatus(anchor, ANCHOR_SUSPEND) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set in peer anchor status fail for op:%s(%s), anchor_index:%d,",
REPORT_CALL_ERROR("E19999", "Set in peer anchor status fail for op:%s(%s), anchor_index:%d",
node->GetName().c_str(), node->GetType().c_str(), anchor->GetIdx());
GELOGE(INTERNAL_ERROR, "AnchorUtils::SetStatus failed.");
GELOGE(INTERNAL_ERROR, "[Set][Status] failed, op:%s(%s), anchor_index:%d",
node->GetName().c_str(), node->GetType().c_str(), anchor->GetIdx());
return INTERNAL_ERROR;
}
continue;
@@ -558,16 +568,18 @@ Status TaskGenerator::UpdateAnchorStatus(const NodePtr &node) {
bool is_const = NodeUtils::GetConstOpType(peer_anchor->GetOwnerNode(), const_type);
if (is_const && (const_type == CONSTANT)) {
if (AnchorUtils::SetStatus(anchor, ANCHOR_CONST) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set in anchor CONST status fail for op:%s(%s), anchor_index:%d,",
REPORT_CALL_ERROR("E19999", "Set in anchor CONST status fail for op:%s(%s), anchor_index:%d",
node->GetName().c_str(), node->GetType().c_str(), anchor->GetIdx());
GELOGE(INTERNAL_ERROR, "AnchorUtils::SetStatus failed.");
GELOGE(INTERNAL_ERROR, "[Set][Status] failed. op:%s(%s), anchor_index:%d.",
node->GetName().c_str(), node->GetType().c_str(), anchor->GetIdx());
return INTERNAL_ERROR;
}
} else {
if (AnchorUtils::SetStatus(anchor, ANCHOR_DATA) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set in anchor DATA status fail for op:%s(%s), anchor_index:%d,",
REPORT_CALL_ERROR("E19999", "Set in anchor DATA status fail for op:%s(%s), anchor_index:%d",
node->GetName().c_str(), node->GetType().c_str(), anchor->GetIdx());
GELOGE(INTERNAL_ERROR, "AnchorUtils::SetStatus failed.");
GELOGE(INTERNAL_ERROR, "[Set][Status] failed, op:%s(%s), anchor_index:%d.",
node->GetName().c_str(), node->GetType().c_str(), anchor->GetIdx());
return INTERNAL_ERROR;
}
}
@@ -580,7 +592,7 @@ Status TaskGenerator::MarkNodeAndSetIndex(ComputeGraphPtr &graph) {
auto ge_lib = GELib::GetInstance();
if ((ge_lib == nullptr) || !ge_lib->InitFlag()) {
REPORT_INNER_ERROR("E19999", "Check GELib instance not init before");
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GE is not initialized or is finalized.");
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Check][Param] GE is not initialized or is finalized.");
return GE_CLI_GE_NOT_INITIALIZED;
}

@@ -588,7 +600,7 @@ Status TaskGenerator::MarkNodeAndSetIndex(ComputeGraphPtr &graph) {
if (all_nodes.empty()) {
REPORT_INNER_ERROR("E19999", "Check param all_nodes empty in graph:%s",
graph->GetName().c_str());
GELOGE(GE_GRAPH_GRAPH_NODE_NULL, "Graph's node is empty");
GELOGE(GE_GRAPH_GRAPH_NODE_NULL, "[Check][Param] Graph's node is empty, graph:%s", graph->GetName().c_str());
return GE_GRAPH_GRAPH_NODE_NULL;
}

@@ -612,7 +624,7 @@ Status TaskGenerator::MarkNodeAndSetIndex(ComputeGraphPtr &graph) {
for (const auto &stream_ops : all_stream_ops) {
Status status = MarkFirstAndLastOps(stream_ops.second, is_single_stream);
if (status != SUCCESS) {
GELOGE(status, "Mark first and last nodes failed.");
GELOGE(status, "[Mark][FirstAndLastOps] failed, graph:%s.", graph->GetName().c_str());
return status;
}
}
@@ -644,9 +656,8 @@ Status TaskGenerator::MarkFirstAndLastOps(const vector<OpDescPtr> &ops, bool is_
string op_kernel_lib_name = op_desc->GetOpKernelLibName();
if (op_kernel_lib_name.empty()) {
REPORT_INNER_ERROR("E19999", "Get ops kernel info store failed for op:%s(%s), op_kernel_name:%s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
op_kernel_lib_name.c_str());
GELOGE(INTERNAL_ERROR, "node:%s(%s) get op kernel lib failed.", op_desc->GetName().c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str(), op_kernel_lib_name.c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] node:%s(%s) get op kernel lib failed.", op_desc->GetName().c_str(),
op_desc->GetType().c_str());
return INTERNAL_ERROR;
}
@@ -664,12 +675,14 @@ Status TaskGenerator::MarkFirstAndLastOps(const vector<OpDescPtr> &ops, bool is_
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(op_pair.first, kIsFirstNode, true),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", kIsFirstNode,
op_pair.first->GetName().c_str(), op_pair.first->GetType().c_str());
GELOGE(FAILED, "SetBool failed.");
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", kIsFirstNode,
op_pair.first->GetName().c_str(), op_pair.first->GetType().c_str());
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(op_pair.second, kIsLastNode, true),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", kIsLastNode,
op_pair.second->GetName().c_str(), op_pair.second->GetType().c_str());
GELOGE(FAILED, "SetBool failed.");
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", kIsLastNode,
op_pair.second->GetName().c_str(), op_pair.second->GetType().c_str());
return FAILED);
}
}
@@ -980,7 +993,8 @@ Status TaskGenerator::InsertProfilingArTaskBefore(const OpDescPtr &op_desc, std:
GE_IF_BOOL_EXEC(TypeUtils::CheckUint64MulOverflow(i, kProfilingArStep),
REPORT_INNER_ERROR("E19999", "Multiply result is out of range when calc profiling ar log id "
"for node:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Multiply result is out of range.");
GELOGE(FAILED, "[Check][Param] Multiply result is out of range. node:%s(%s)",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED);
ar_log_id = i * kProfilingArStep + kProfilingArStartLogid;
is_insert_all_reduce_task = true;
@@ -1074,7 +1088,8 @@ Status TaskGenerator::InsertProfilingArTaskAfter(const OpDescPtr &op_desc, std::
GE_IF_BOOL_EXEC(TypeUtils::CheckUint64MulOverflow(i, kProfilingArStep),
REPORT_INNER_ERROR("E19999", "Multiply result is out of range when calc profiling ar log id "
"for node:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Multiply result is out of range.");
GELOGE(FAILED, "[Check][Param] Multiply result is out of range. node:%s(%s)",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED);
ar_log_id = i * kProfilingArStep + kProfilingArEndLogid;
is_insert_all_reduce_task = true;
@@ -1184,7 +1199,7 @@ Status TaskGenerator::SetUnknownShapeStream(RunContext &run_context, rtStream_t
rtError_t rt_ret = rtModelBindStream(run_context.model, stream, 0);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtModelBindStream failed, ret:0x%X", rt_ret);
GELOGE(FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
GELOGE(FAILED, "[Call][RtModelBindStream] failed, ret: 0x%X", rt_ret);
GE_CHK_RT_RET(rtStreamDestroy(stream));
return FAILED;
}
@@ -1199,7 +1214,7 @@ Status TaskGenerator::DestroyUnknownShapeStream(RunContext &run_context, rtStrea

Status TaskGenerator::SetKnownShapeStream(RunContext &run_context, int64_t stream_id) {
if (stream_id < 0 || stream_id >= static_cast<int64_t>(run_context.graphStreamList.size())) {
GELOGE(INTERNAL_ERROR, "Stream id[%ld] is invalid, stream list size=%zu", stream_id,
GELOGE(INTERNAL_ERROR, "[Check][Param] Stream id[%ld] is invalid, stream list size=%zu", stream_id,
run_context.graphStreamList.size());
return INTERNAL_ERROR;
}


+ 2
- 2
ge/graph/common/bcast.cc View File

@@ -37,7 +37,7 @@ Status BCast::GenerateBcastInfo(const kVecInt &sx, const kVecInt &sy) {
Reverse(x);
Reverse(y);
ExtendTensorDim(x, y);
GE_RETURN_WITH_LOG_IF_ERROR(SetShapeDifferentInfo(x, y), "GenerateBcastInfo failed.");
GE_RETURN_WITH_LOG_IF_ERROR(SetShapeDifferentInfo(x, y), "[Set][ShapeDifferentInfo] GenerateBcastInfo failed.");
}
ReverseAllIntermediateShapes();
return domi::SUCCESS;
@@ -76,7 +76,7 @@ Status BCast::SetShapeDifferentInfo(const kVecInt &x, const kVecInt &y) {
REPORT_INNER_ERROR("E19999", "SetShapeDifferentInfo failed. Two tensor shapes are not compatible "
"according to the broadcasting rule.");
GELOGE(domi::PARAM_INVALID,
"SetShapeDifferentInfo failed. Two tensor shapes are not compatible "
"[Check][Param] SetShapeDifferentInfo failed. Two tensor shapes are not compatible "
"according to the broadcasting rule.");
return domi::PARAM_INVALID;
}


+ 57
- 13
ge/graph/common/omg_util.cc View File

@@ -16,9 +16,6 @@

#include "graph/common/omg_util.h"

#include <algorithm>

#include "framework/common/debug/ge_log.h"
#include "graph/debug/ge_attr_define.h"
#include "graph/utils/graph_utils.h"
#include "graph/utils/tensor_utils.h"
@@ -38,9 +35,10 @@ Status GetOriginalType(const ge::NodePtr &node, string &type) {
GE_CHECK_NOTNULL(node->GetOpDesc());
bool ret = ge::AttrUtils::GetStr(node->GetOpDesc(), ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE, type);
if (!ret) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s fail for op:%s(%s)", ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE.c_str(),
REPORT_INNER_ERROR("E19999", "Get Attr:%s fail from op:%s(%s)", ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Get FrameWorkOp original type [%s]", type.c_str());
GELOGE(INTERNAL_ERROR, "[Get][Attr] %s fail from op:%s(%s)", ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE.c_str(),
node->GetName().c_str(), node->GetType().c_str());
return INTERNAL_ERROR;
}
GELOGD("Get FrameWorkOp original type [%s]", type.c_str());
@@ -61,7 +59,8 @@ Status SetStreamLabel(const ge::NodePtr &node, const std::string &label) {
if (!AttrUtils::SetStr(tmp_desc, ge::ATTR_NAME_STREAM_LABEL, label)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_STREAM_LABEL.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "Op: %s set ATTR_NAME_STREAM_LABEL failed", node->GetName().c_str());
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_STREAM_LABEL.c_str(),
node->GetName().c_str(), node->GetType().c_str());
return FAILED;
}

@@ -80,7 +79,8 @@ Status SetCycleEvent(const ge::NodePtr &node) {
if (!AttrUtils::SetBool(tmp_desc, ge::ATTR_NAME_STREAM_CYCLE_EVENT_FLAG, true)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_STREAM_CYCLE_EVENT_FLAG.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "Op: %s set ATTR_NAME_STREAM_CYCLE_EVENT_FLAG failed", node->GetName().c_str());
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_STREAM_CYCLE_EVENT_FLAG.c_str(),
node->GetName().c_str(), node->GetType().c_str());
return FAILED;
}

@@ -100,7 +100,8 @@ Status SetActiveLabelList(const ge::NodePtr &node, const std::vector<std::string
if (!AttrUtils::SetListStr(tmp_desc, ge::ATTR_NAME_ACTIVE_LABEL_LIST, active_label_list)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_ACTIVE_LABEL_LIST.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "Op: %s set ATTR_NAME_ACTIVE_LABEL_LIST failed", node->GetName().c_str());
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_ACTIVE_LABEL_LIST.c_str(),
node->GetName().c_str(), node->GetType().c_str());
return FAILED;
}

@@ -120,7 +121,8 @@ Status SetSwitchBranchNodeLabel(const ge::NodePtr &node, const std::string &bran
if (!AttrUtils::SetStr(tmp_desc, ge::ATTR_NAME_SWITCH_BRANCH_NODE_LABEL, branch_label)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_SWITCH_BRANCH_NODE_LABEL.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "Op: %s set ATTR_NAME_SWITCH_BRANCH_NODE_LABEL failed", node->GetName().c_str());
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_SWITCH_BRANCH_NODE_LABEL.c_str(),
node->GetName().c_str(), node->GetType().c_str());
return FAILED;
}

@@ -140,7 +142,8 @@ Status SetSwitchTrueBranchFlag(const ge::NodePtr &node, bool value) {
if (!AttrUtils::SetBool(tmp_desc, ge::ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG, value)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "Op: %s set ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG failed", node->GetName().c_str());
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG.c_str(),
node->GetName().c_str(), node->GetType().c_str());
return FAILED;
}

@@ -160,7 +163,8 @@ Status SetOriginalNodeName(const ge::NodePtr &node, const std::string &orig_name
if (!AttrUtils::SetStr(tmp_desc, ge::ATTR_NAME_ORIG_NODE_NAME, orig_name)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_ORIG_NODE_NAME.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "Op: %s set ATTR_NAME_ORIG_NODE_NAME failed", node->GetName().c_str());
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_ORIG_NODE_NAME.c_str(),
node->GetName().c_str(), node->GetType().c_str());
return FAILED;
}

@@ -179,7 +183,8 @@ Status SetCyclicDependenceFlag(const ge::NodePtr &node) {
if (!AttrUtils::SetBool(tmp_desc, ge::ATTR_NAME_CYCLIC_DEPENDENCE_FLAG, true)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_CYCLIC_DEPENDENCE_FLAG.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "Op: %s set ATTR_NAME_CYCLIC_DEPENDENCE_FLAG failed", node->GetName().c_str());
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_CYCLIC_DEPENDENCE_FLAG.c_str(),
node->GetName().c_str(), node->GetType().c_str());
return FAILED;
}

@@ -200,7 +205,8 @@ Status SetNextIteration(const ge::NodePtr &node, const std::string &next) {
if (!AttrUtils::SetStr(tmp_desc, ge::ATTR_NAME_NEXT_ITERATION, next)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_NEXT_ITERATION.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "Op: %s set ATTR_NAME_NEXT_ITERATION failed", node->GetName().c_str());
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_NEXT_ITERATION.c_str(),
node->GetName().c_str(), node->GetType().c_str());
return FAILED;
}

@@ -244,4 +250,42 @@ Status GetMemorySize(const NodePtr &node, int64_t &output_size) {
output_size = kBufferPoolMemAlignSize + size + kBufferPoolMemAlignSize;
return SUCCESS;
}

///
/// @brief Check Is Unknown shape Tensor
/// @param [in] tensor_desc
/// @return true: Unknown / false: Known
///
bool IsUnknownShapeTensor(const GeTensorDesc &tensor_desc) {
const static int kUnknowShape = -1;
const static int kUnknowRank = -2;
for (auto dim_size : tensor_desc.GetShape().GetDims()) {
if (dim_size == kUnknowShape || dim_size == kUnknowRank) {
return true;
}
}

return false;
}

///
/// @brief Set Op _force_unknown_shape flag
/// @param [in] node
/// @param [in] force_unknown, set attribute if true
/// @return
///
void MarkForceUnknownShape(const NodePtr &node, bool force_unknown) {
GE_RT_VOID_CHECK_NOTNULL(node);
if (!force_unknown) {
return;
}

GELOGD("[%s] mark as force unknown shape node", node->GetName().c_str());
if (!AttrUtils::SetBool(node->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, force_unknown)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_FORCE_UNKNOWN_SHAPE.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_FORCE_UNKNOWN_SHAPE.c_str(),
node->GetName().c_str(), node->GetType().c_str());
}
}
} // namespace ge

+ 15
- 0
ge/graph/common/omg_util.h View File

@@ -117,6 +117,21 @@ void AlignMemSize(int64_t &mem_size, int64_t align_size);
/// @return Status
///
Status GetMemorySize(const NodePtr &node, int64_t &output_size);

///
/// @brief Check Is Unknown shape Tensor
/// @param [in] tensor_desc
/// @return true: Unknown / false: Known
///
bool IsUnknownShapeTensor(const GeTensorDesc &tensor_desc);

///
/// @brief Set Op _force_unknown_shape flag
/// @param [in] node
/// @param [in] force_unknown, set attribute if true
/// @return
///
void MarkForceUnknownShape(const NodePtr &node, bool force_unknown);
} // namespace ge

#endif // GE_GRAPH_COMMON_OMG_UTIL_H_

+ 143
- 63
ge/graph/execute/graph_execute.cc View File

@@ -43,7 +43,7 @@ GraphExecutor::~GraphExecutor() {
rt_ret = rtFreeHost(buffer_addr);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtFreeHost failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[GraphManager] subgraph free buffer failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtFreeHost] subgraph free buffer failed, ret: 0x%X", rt_ret);
}
}
}
@@ -55,17 +55,17 @@ Status GraphExecutor::SetCondition(std::mutex *mutex, std::condition_variable *c
std::shared_ptr<GraphModelListener> listener) {
if (mutex == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param mutex nullptr");
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[SetCondition] input param mutex is nullptr.");
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[Check][Param] input param mutex is nullptr.");
return GE_GRAPH_PARAM_NULLPTR;
}
if (cond == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param cond nullptr");
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[SetCondition] input param cond is nullptr.");
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[Check][Param] input param cond is nullptr.");
return GE_GRAPH_PARAM_NULLPTR;
}
if (listener == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param listener nullptr");
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[SetCondition] input param listener is nullptr.");
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[Check][Param] input param listener is nullptr.");
return GE_GRAPH_PARAM_NULLPTR;
}

@@ -82,7 +82,7 @@ Status GraphExecutor::SetCondition(std::mutex *mutex, std::condition_variable *c
Status GraphExecutor::SetGraphContext(GraphContextPtr graph_context_ptr) {
if (graph_context_ptr == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param graph_context_ptr nullptr");
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[SetGraphContext] input param graph_context_ptr is nullptr");
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[Check][Param] input param graph_context_ptr is nullptr");
return GE_GRAPH_PARAM_NULLPTR;
}
graph_context_ = graph_context_ptr;
@@ -94,7 +94,7 @@ Status GraphExecutor::SetDynamicSize(uint32_t model_id, const std::vector<uint64
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->SetDynamicSize(model_id, batch_num, dynamic_type);
if (ret != SUCCESS) {
GELOGE(ret, "SetDynamicSize failed");
GELOGE(ret, "[Set][DynamicSize] failed, model_id:%u", model_id);
return ret;
}
return SUCCESS;
@@ -109,7 +109,7 @@ Status GraphExecutor::FreeInOutBuffer() {
rt_ret = rtFreeHost(*iter);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtFreeHost failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[GraphManager] subgraph free buffer failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtFreeHost] subgraph free buffer failed, ret: 0x%X", rt_ret);
(void)buffer_addr_.erase(buffer_addr_.begin(), iter);
return GE_GRAPH_FREE_FAILED;
}
@@ -144,7 +144,7 @@ Status GraphExecutor::MallocInOutBuffer(const std::vector<uint64_t> &buffer_size
buffer_size_.clear();
auto rt_ret = FreeInOutBuffer();
if (rt_ret != SUCCESS) {
GELOGE(RT_FAILED, "[SubGraphInfo] MallocInOutBuffer free buffer failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Free][Buffer] failed, ret: 0x%X", rt_ret);
return RT_FAILED;
}
}
@@ -154,9 +154,8 @@ Status GraphExecutor::MallocInOutBuffer(const std::vector<uint64_t> &buffer_size
void *tmp_buf = nullptr;
rt_ret = rtMallocHost(&tmp_buf, buffer_size[i]);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMallocHost failed, size:%lu, ret:0x%X",
buffer_size[i], rt_ret);
GELOGE(RT_FAILED, "[GraphManager] subgraph malloc buffer failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMallocHost failed, size:%lu, ret:0x%X", buffer_size[i], rt_ret);
GELOGE(RT_FAILED, "[Malloc][Buffer] failed, size:%lu, ret:0x%X", buffer_size[i], rt_ret);
return GE_GRAPH_MALLOC_FAILED;
}
malloc_flag_ = true;
@@ -190,7 +189,7 @@ Status GraphExecutor::PrepareInputData(const std::vector<GeTensor> &input_tensor

Status ret = MallocInOutBuffer(bufferSizeVec, addrVec);
if (ret != SUCCESS) {
GELOGE(GE_GRAPH_MALLOC_FAILED, "[GraphExecutor] Malloc mem failed");
GELOGE(GE_GRAPH_MALLOC_FAILED, "[Malloc][Mem] failed");
return GE_GRAPH_MALLOC_FAILED;
}

@@ -203,7 +202,8 @@ Status GraphExecutor::PrepareInputData(const std::vector<GeTensor> &input_tensor
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, dst_size:%lu, src_size:%zu, ret:0x%X",
bufferSizeVec[i], in_tensor->GetData().size(), rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, dst_size:%lu, src_size:%zu, ret:0x%X",
bufferSizeVec[i], in_tensor->GetData().size(), rt_ret);
return RT_FAILED;
}
}
@@ -247,7 +247,7 @@ Status GraphExecutor::SyncExecuteModel(uint32_t model_id, const std::vector<GeTe
GELOGI("[ExecuteGraph] GetInputOutputDescInfo via new ome begin.");
Status ret = GetInputOutputDescInfo(model_id, inputs_desc, output_desc);
if (ret != SUCCESS) {
GELOGE(GE_GRAPH_GET_IN_OUT_FAILED, "[GraphExecutor] GetInputOutputDescInfo failed, modelId=%u.", model_id);
GELOGE(GE_GRAPH_GET_IN_OUT_FAILED, "[Get][InputOutputDescInfo] failed, modelId=%u.", model_id);
return GE_GRAPH_GET_IN_OUT_FAILED;
}
outputs_desc_.assign(output_desc.begin(), output_desc.end());
@@ -257,14 +257,13 @@ Status GraphExecutor::SyncExecuteModel(uint32_t model_id, const std::vector<GeTe
input_data.model_id = model_id;
ret = PrepareInputData(input_tensor, input_data, output_data, output_desc);
if (ret != SUCCESS) {
GELOGE(GE_GRAPH_PREPARE_FAILED, "[GraphExecutor] PrepareInputData failed, modelId=%u.", model_id);
GELOGE(GE_GRAPH_PREPARE_FAILED, "[Prepare][InputData] failed, modelId=%u.", model_id);
return GE_GRAPH_PREPARE_FAILED;
}

if (graph_run_listener_->ResetResult() != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call graph_run_listener_.ResetResult fail, model_id:%u",
model_id);
GELOGE(GE_GRAPH_EXECUTE_FAILED, "Reset result failed");
REPORT_CALL_ERROR("E19999", "Call graph_run_listener_.ResetResult fail, model_id:%u", model_id);
GELOGE(GE_GRAPH_EXECUTE_FAILED, "[Reset][Result] failed, model_id:%u", model_id);
return GE_GRAPH_EXECUTE_FAILED;
}

@@ -272,7 +271,7 @@ Status GraphExecutor::SyncExecuteModel(uint32_t model_id, const std::vector<GeTe
GELOGI("[ExecuteGraph] DataInput via new ome begin.");
ret = DataInput(input_data, output_data);
if (ret != SUCCESS) {
GELOGE(GE_GRAPH_DATA_INPUT_FAILED, "[GraphExecutor] push data failed, modelId=%u.", model_id);
GELOGE(GE_GRAPH_DATA_INPUT_FAILED, "[Call][DataInput] push data failed, modelId=%u.", model_id);
return GE_GRAPH_DATA_INPUT_FAILED;
}
GELOGI("[GraphExecutor] input data push to wrapper finish, waiting for result...");
@@ -287,10 +286,8 @@ Status GraphExecutor::SyncExecuteModel(uint32_t model_id, const std::vector<GeTe
// Run graph return
uint32_t result_code = graph_run_listener_->GetResultCode();
if (result_code != SUCCESS && result_code != END_OF_SEQUENCE) {
REPORT_CALL_ERROR("E19999", "Graph_run_listener_ run fail, result:%u, model_id:%u",
result_code, model_id);
GELOGE(GE_GRAPH_EXECUTE_FAILED, "[GraphExecutor] execute model failed, ret=%u, modelId=%u.", result_code,
model_id);
REPORT_CALL_ERROR("E19999", "Graph_run_listener_ run fail, result:%u, model_id:%u", result_code, model_id);
GELOGE(GE_GRAPH_EXECUTE_FAILED, "[Execute][Model] failed, ret=%u, modelId=%u.", result_code, model_id);
return GE_GRAPH_EXECUTE_FAILED;
}
}
@@ -299,13 +296,13 @@ Status GraphExecutor::SyncExecuteModel(uint32_t model_id, const std::vector<GeTe
CHECK_FALSE_EXEC(outputDataTmp.length != 0,
REPORT_INNER_ERROR("E19999", "Param output_data.length is 0 in model:%u, check invalid",
model_id);
GELOGE(GE_GRAPH_EXECUTE_FAILED, "Failed to allocate memory, length is 0.");
GELOGE(GE_GRAPH_EXECUTE_FAILED, "[Check][Param] Failed to allocate memory, "
"length is 0, model id:%u", model_id);
return GE_GRAPH_EXECUTE_FAILED);
std::unique_ptr<uint8_t> outBufTmp(new (std::nothrow) uint8_t[outputDataTmp.length]);
if (outBufTmp == nullptr) {
REPORT_CALL_ERROR("E19999", "New output buffer fail, length:%lu, model:%u",
outputDataTmp.length, model_id);
GELOGE(FAILED, "Failed to allocate memory.");
REPORT_CALL_ERROR("E19999", "New output buffer fail, length:%lu, model:%u", outputDataTmp.length, model_id);
GELOGE(FAILED, "[Allocate][Memory] failed, length:%lu, model:%u", outputDataTmp.length, model_id);
return FAILED;
}
GE_PRINT_DYNAMIC_MEMORY(new, "the output memory of data on training.", sizeof(uint8_t) * outputDataTmp.length)
@@ -314,7 +311,8 @@ Status GraphExecutor::SyncExecuteModel(uint32_t model_id, const std::vector<GeTe
CHECK_FALSE_EXEC(ret_value == RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, dst_size:%lu, src_size:%zu, ret:0x%X",
outputDataTmp.length, outputDataTmp.length, ret_value);
GELOGE(GE_GRAPH_EXECUTE_FAILED, "Call rt api rtMemcpy failed, ret: 0x%X", ret);
GELOGE(GE_GRAPH_EXECUTE_FAILED, "[Call][RtMemcpy] failed, dst_size:%lu, src_size:%zu, ret:0x%X",
outputDataTmp.length, outputDataTmp.length, ret_value);
return GE_GRAPH_EXECUTE_FAILED);
GeTensor outTensor;
std::vector<int64_t> shapeDims;
@@ -348,7 +346,7 @@ void GraphExecutor::InitModelIdInfo(std::vector<uint32_t> &out_model_id_info,
Status GraphExecutor::FreeExecuteMemory() {
auto ret = FreeInOutBuffer();
if (ret != SUCCESS) {
GELOGE(ret, "[FreeExecuteMemory] FreeInOutBuffer Error!");
GELOGE(ret, "[Free][InOutBuffer] Error!");
return ret;
}

@@ -368,13 +366,14 @@ Status GraphExecutor::ExecuteGraph(GraphId graph_id, const GeRootModelPtr &ge_ro
if (!init_flag_) {
REPORT_INNER_ERROR("E19999", "No SetCondition called before, graph:%u, check invalid",
graph_id);
GELOGE(GE_GRAPH_EXECUTE_NOT_INIT, "[GraphExecutor] AI Core Engine without calling SetCondition!");
GELOGE(GE_GRAPH_EXECUTE_NOT_INIT, "[Check][Param] AI Core Engine without calling SetCondition! graph id:%u",
graph_id);
return GE_GRAPH_EXECUTE_NOT_INIT;
}
GE_CHECK_NOTNULL_EXEC(ge_root_model, return FAILED);
Status ret = SyncExecuteModel(ge_root_model->GetModelId(), input_tensor, output_tensor);
if (ret != SUCCESS) {
GELOGE(GE_GRAPH_SYNC_MODEL_FAILED, "[GraphExecutor] SyncExecuteModel Error!");
GELOGE(GE_GRAPH_SYNC_MODEL_FAILED, "[SyncExecute][Model] Error! graph id:%u", graph_id);
return GE_GRAPH_SYNC_MODEL_FAILED;
}

@@ -382,7 +381,7 @@ Status GraphExecutor::ExecuteGraph(GraphId graph_id, const GeRootModelPtr &ge_ro
}

Status GraphExecutor::ExecuteGraphAsync(GraphId graph_id, const GeRootModelPtr &ge_root_model,
const std::vector<InputTensorInfo> &input_tensor,
const std::vector<ge::Tensor> &input_tensor,
const RunAsyncCallback& callback) {
GELOGI("[GraphExecutor] Start to async execute graph, graph_id=%u", graph_id);
if (graph_id != last_graph_id_) {
@@ -395,7 +394,7 @@ Status GraphExecutor::ExecuteGraphAsync(GraphId graph_id, const GeRootModelPtr &
GE_CHECK_NOTNULL_EXEC(ge_root_model, return FAILED);
Status ret = AsyncExecuteModel(ge_root_model, input_tensor, callback);
if (ret != SUCCESS) {
GELOGE(GE_GRAPH_SYNC_MODEL_FAILED, "[GraphExecutor] AsyncExecuteModel Error!");
GELOGE(GE_GRAPH_SYNC_MODEL_FAILED, "[AsyncExecute][Model] Error! graph id:%u", graph_id);
return GE_GRAPH_SYNC_MODEL_FAILED;
}

@@ -403,6 +402,73 @@ Status GraphExecutor::ExecuteGraphAsync(GraphId graph_id, const GeRootModelPtr &
return SUCCESS;
}

Status GraphExecutor::GetExecuteData(const std::vector<GeTensor> &input_tensor, std::vector<DataBuffer> &blobs,
std::vector<GeTensorDesc> &tensor_desc) {
for (const auto &tensor : input_tensor) {
DataBuffer in_data_buf;
// check placement
in_data_buf.data = const_cast<uint8_t *>(tensor.GetData().data());
in_data_buf.length = tensor.GetData().size();
in_data_buf.isDataSupportMemShare = false;
blobs.emplace_back(in_data_buf);
tensor_desc.emplace_back(tensor.GetTensorDesc());
}
return SUCCESS;
}

Status GraphExecutor::ExecuteGraphWithStream(GraphId graph_id,
rtStream_t stream,
const GeRootModelPtr &ge_root_model,
const std::vector<GeTensor> &input_tensor,
std::vector<GeTensor> &output_tensor) {
GELOGI("[GraphExecutor] Start to execute graph with stream, graph id = %u, stream = %p.", graph_id, stream);
if (!init_flag_) {
REPORT_INNER_ERROR("E19999", "No SetCondition called before, graph id = %u, stream = %p, check invalid.",
graph_id, stream);
GELOGE(GE_GRAPH_EXECUTE_NOT_INIT, "[Check][Param] AI Core Engine without calling SetCondition! graph id = %u",
graph_id);
return GE_GRAPH_EXECUTE_NOT_INIT;
}

if (graph_id != last_graph_id_) {
auto ret = FreeExecuteMemory();
if (ret != SUCCESS) {
return ret;
}
}
last_graph_id_ = graph_id;

GE_CHECK_NOTNULL_EXEC(ge_root_model, return FAILED);
auto model_id = ge_root_model->GetModelId();
InputData input_data;
input_data.index = 0;
input_data.model_id = model_id;
std::vector<GeTensorDesc> input_desc;
auto ret = GetExecuteData(input_tensor, input_data.blobs, input_desc);
if (ret != SUCCESS) {
return ret;
}
OutputData output_data;
output_data.index = 0;
output_data.model_id = model_id;
std::vector<GeTensorDesc> output_desc;
ret = GetExecuteData(output_tensor, output_data.blobs, output_desc);
if (ret != SUCCESS) {
return ret;
}

auto async_mode = true;
auto model_manager = ge::ModelManager::GetInstance();
GE_CHECK_NOTNULL(model_manager);
ret = model_manager->ExecuteModel(model_id, stream, async_mode, input_data, input_desc, output_data, output_desc);
if (ret != SUCCESS) {
return ret;
}

GELOGI("[GraphExecutor] Async execute graph with stream success graph id = %u, stream = %p.", graph_id, stream);
return SUCCESS;
}

bool CompareByLoad(const Uint32Pair &lhs, const Uint32Pair &rhs) {
return lhs.second < rhs.second;
}
@@ -449,21 +515,21 @@ Status GraphExecutor::SetCallback(uint32_t model_id, const GeRootModelPtr &ge_ro
auto model = model_manager->GetHybridModel(model_id);
GE_CHECK_NOTNULL(model);
if (model->SetRunAsyncListenerCallback(callback) != SUCCESS) {
GELOGE(FAILED, "SetRunAsyncListenerCallback failed.");
GELOGE(FAILED, "[Set][RunAsyncListenerCallback] failed, model_id %u", model_id);
return FAILED;
}
} else {
auto model = model_manager->GetModel(model_id);
GE_CHECK_NOTNULL(model);
if (model->SetRunAsyncListenerCallback(callback) != SUCCESS) {
GELOGE(FAILED, "SetRunAsyncListenerCallback failed.");
GELOGE(FAILED, "[Set][RunAsyncListenerCallback] failed, model_id %u", model_id);
return FAILED;
}
}
return SUCCESS;
}

Status GraphExecutor::AsyncExecuteModel(const GeRootModelPtr &ge_root_model, const std::vector<InputTensorInfo> &inputs,
Status GraphExecutor::AsyncExecuteModel(const GeRootModelPtr &ge_root_model, const std::vector<ge::Tensor> &inputs,
const RunAsyncCallback &callback) {
uint32_t model_id = GetExecuteModelId(ge_root_model);
if (model_id == kInvalidModelId) {
@@ -475,24 +541,24 @@ Status GraphExecutor::AsyncExecuteModel(const GeRootModelPtr &ge_root_model, con
GE_CHECK_NOTNULL(model_manager);
GELOGI("RunAsync begin.model_id %u", model_id);
if (SetCallback(model_id, ge_root_model, callback) != SUCCESS) {
GELOGE(FAILED, "RunAsync: SetCallBack for model fail");
GELOGE(FAILED, "[Set][CallBack] for model fail, model_id %u", model_id);
return FAILED;
}

Status ret = model_manager->DataInputTensor(model_id, inputs);
if (ret != SUCCESS) {
GELOGE(ret, "RunAsync: DataInput fail");
GELOGE(ret, "[Call][DataInputTensor] RunAsync: DataInput fail, model_id %u", model_id);
return ret;
}

GELOGI("RunAsync success.");
} catch (std::bad_alloc &) {
REPORT_INNER_ERROR("E19999", "Bad memory allocation exception occur failed");
GELOGE(MEMALLOC_FAILED, "RunAsync failed, bad memory allocation occur !");
REPORT_INNER_ERROR("E19999", "Bad memory allocation exception occur failed, model_id %u", model_id);
GELOGE(MEMALLOC_FAILED, "[Run][Async] failed, bad memory allocation occur, model_id %u", model_id);
return MEMALLOC_FAILED;
} catch (...) {
REPORT_INNER_ERROR("E19999", "Some exceptions occur failed");
GELOGE(FAILED, "RunAsync failed, some exceptions occur !");
REPORT_INNER_ERROR("E19999", "Some exceptions occur failed, model_id %u", model_id);
GELOGE(FAILED, "[Run][Async] failed, some exceptions occur, model_id %u", model_id);
return FAILED;
}

@@ -505,16 +571,16 @@ Status GraphExecutor::DataInput(const InputData &input_data, OutputData &output_
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->DataInput(input_data, output_data);
if (ret != SUCCESS) {
GELOGE(ret, "DataInput: DataInput failed.");
GELOGE(ret, "[Call][DataInput] failed.");
return ret;
}
} catch (std::bad_alloc &) {
REPORT_INNER_ERROR("E19999", "Bad memory allocation exception occur failed");
GELOGE(MEMALLOC_FAILED, "DataInput failed, bad memory allocation occur !");
GELOGE(MEMALLOC_FAILED, "[Call][DataInput] failed, bad memory allocation occur !");
return MEMALLOC_FAILED;
} catch (...) {
REPORT_INNER_ERROR("E19999", "Some exceptions occur failed");
GELOGE(FAILED, "DataInput failed, some exceptions occur !");
GELOGE(FAILED, "[Call][DataInput] failed, some exceptions occur !");
return FAILED;
}

@@ -528,16 +594,16 @@ Status GraphExecutor::GetInputOutputDescInfo(const uint32_t model_id, vector<Inp
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->GetInputOutputDescInfo(model_id, input_desc, output_desc);
if (ret != SUCCESS) {
GELOGE(ret, "GetInputOutputDescInfo failed.");
GELOGE(ret, "[Get][InputOutputDescInfo] failed, model_id:%u.", model_id);
return ret;
}
} catch (std::bad_alloc &) {
REPORT_INNER_ERROR("E19999", "Bad memory allocation exception occur failed");
GELOGE(MEMALLOC_FAILED, "GetInputOutputDescInfo failed, bad memory allocation occur !");
REPORT_INNER_ERROR("E19999", "Bad memory allocation exception occur failed, model_id:%u.", model_id);
GELOGE(MEMALLOC_FAILED, "[Get][InputOutputDescInfo] failed, bad memory allocation occur, model_id:%u.", model_id);
return MEMALLOC_FAILED;
} catch (...) {
REPORT_INNER_ERROR("E19999", "Some exceptions occur failed");
GELOGE(FAILED, "GetInputOutputDescInfo failed, some exceptions occur !");
REPORT_INNER_ERROR("E19999", "Some exceptions occur failed, model_id:%u.", model_id);
GELOGE(FAILED, "[Get][InputOutputDescInfo] failed, some exceptions occur, model_id:%u.", model_id);
return FAILED;
}

@@ -554,16 +620,16 @@ Status GraphExecutor::GetInputOutputDescInfo(const uint32_t model_id, vector<Inp
Status ret = model_manager->GetInputOutputDescInfo(model_id, input_desc, output_desc, input_formats, out_formats,
new_model_desc);
if (ret != SUCCESS) {
GELOGE(ret, "GetInputOutputDescInfo failed.");
GELOGE(ret, "[Get][InputOutputDescInfo] failed, model_id:%u.", model_id);
return ret;
}
} catch (std::bad_alloc &) {
REPORT_INNER_ERROR("E19999", "Bad memory allocation exception occur failed");
GELOGE(MEMALLOC_FAILED, "GetInputOutputDescInfo failed, bad memory allocation occur !");
REPORT_INNER_ERROR("E19999", "Bad memory allocation exception occur failed, model_id:%u.", model_id);
GELOGE(MEMALLOC_FAILED, "[Get][InputOutputDescInfo] failed, bad memory allocation occur, model_id:%u.", model_id);
return MEMALLOC_FAILED;
} catch (...) {
REPORT_INNER_ERROR("E19999", "Some exceptions occur failed");
GELOGE(FAILED, "GetInputOutputDescInfo failed, some exceptions occur !");
REPORT_INNER_ERROR("E19999", "Some exceptions occur failed, model_id:%u.", model_id);
GELOGE(FAILED, "[Get][InputOutputDescInfo] failed, some exceptions occur, model_id:%u.", model_id);
return FAILED;
}

@@ -583,7 +649,7 @@ Status GraphExecutor::GetDynamicBatchInfo(uint32_t model_id, std::vector<std::ve
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->GetDynamicBatchInfo(model_id, batch_info, dynamic_type);
if (ret != SUCCESS) {
GELOGE(ret, "GetDynamicBatchInfo failed.");
GELOGE(ret, "[Get][DynamicBatchInfo] failed, model_id:%u.", model_id);
return ret;
}
return SUCCESS;
@@ -601,7 +667,7 @@ Status GraphExecutor::GetCombinedDynamicDims(uint32_t model_id, std::vector<std:
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->GetCombinedDynamicDims(model_id, batch_info);
if (ret != SUCCESS) {
GELOGE(ret, "GetCombinedDynamicDims failed.");
GELOGE(ret, "[Call][GetCombinedDynamicDims] failed, model_id:%u.", model_id);
return ret;
}
return SUCCESS;
@@ -620,7 +686,7 @@ ge::Status GraphExecutor::GetUserDesignateShapeOrder(uint32_t model_id,
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->GetUserDesignateShapeOrder(model_id, user_input_shape_order);
if (ret != SUCCESS) {
GELOGE(ret, "GetUserDesignateShapeOrder failed.");
GELOGE(ret, "[Get][UserDesignateShapeOrder] failed, model_id:%u.", model_id);
return ret;
}
return SUCCESS;
@@ -631,7 +697,20 @@ Status GraphExecutor::GetCurShape(const uint32_t model_id, std::vector<int64_t>
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->GetCurShape(model_id, batch_info, dynamic_type);
if (ret != SUCCESS) {
GELOGE(ret, "GetCurShape failed");
GELOGE(ret, "[Get][CurShape] failed, model_id:%u", model_id);
return ret;
}
return SUCCESS;
}

Status GraphExecutor::GetOpAttr(uint32_t model_id, const std::string &op_name, const std::string &attr_name,
std::string &attr_value) {
auto model_manager = ge::ModelManager::GetInstance();
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->GetOpAttr(model_id, op_name, attr_name, attr_value);
if (ret != SUCCESS) {
GELOGE(ret, "[Get][OpAttr]Get op:%s attr:%s failed.", op_name.c_str(), attr_name.c_str());
REPORT_CALL_ERROR("E19999", "Get op:%s attr:%s failed.", op_name.c_str(), attr_name.c_str());
return ret;
}
return SUCCESS;
@@ -642,7 +721,7 @@ Status GraphExecutor::GetModelAttr(uint32_t model_id, std::vector<string> &dynam
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->GetModelAttr(model_id, dynamic_output_shape_info);
if (ret != SUCCESS) {
GELOGE(FAILED, "GetModelAttr failed");
GELOGE(FAILED, "[Get][ModelAttr] failed, model_id:%u", model_id);
return ret;
}
return SUCCESS;
@@ -675,7 +754,7 @@ Status GraphExecutor::GetOrigInputInfo(uint32_t model_id, uint32_t index, Origin
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->GetOrigInputInfo(model_id, index, orig_input_info);
if (ret != SUCCESS) {
GELOGE(ret, "GetOrigInputInfo failed.");
GELOGE(ret, "[Get][OrigInputInfo] failed, model_id:%u, index:%u.", model_id, index);
return ret;
}

@@ -689,7 +768,7 @@ Status GraphExecutor::GetAllAippInputOutputDims(uint32_t model_id, uint32_t inde
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->GetAllAippInputOutputDims(model_id, index, input_dims, output_dims);
if (ret != SUCCESS) {
GELOGE(ret, "GetAllAippInputOutputDims failed.");
GELOGE(ret, "[Get][AllAippInputOutputDims] failed, model_id:%u, index:%u.", model_id, index);
return ret;
}

@@ -702,7 +781,8 @@ Status GraphExecutor::GetOpDescInfo(uint32_t device_id, uint32_t stream_id, uint
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->GetOpDescInfo(device_id, stream_id, task_id, op_desc_info);
if (ret != SUCCESS) {
GELOGE(ret, "GetOpDescInfo failed.");
GELOGE(ret, "[Get][OpDescInfo] failed, device_id:%u, stream_id:%u, task_id:%u.",
device_id, stream_id, task_id);
return ret;
}
return SUCCESS;


+ 14
- 2
ge/graph/execute/graph_execute.h View File

@@ -50,7 +50,13 @@ class GraphExecutor {
std::vector<GeTensor> &output_tensor);

ge::Status ExecuteGraphAsync(GraphId graph_id, const GeRootModelPtr &ge_root_model,
const std::vector<InputTensorInfo> &input_tensor, const RunAsyncCallback &callback);
const std::vector<ge::Tensor> &input_tensor, const RunAsyncCallback &callback);

Status ExecuteGraphWithStream(GraphId graph_id,
rtStream_t stream,
const GeRootModelPtr &ge_root_model,
const std::vector<GeTensor> &input_tensor,
std::vector<GeTensor> &output_tensor);

Status SetCondition(std::mutex *mutex, std::condition_variable *cond, std::shared_ptr<GraphModelListener> listener);

@@ -108,6 +114,9 @@ class GraphExecutor {

static Status GetCurShape(const uint32_t model_id, std::vector<int64_t> &batch_info, int32_t &dynamic_type);

static Status GetOpAttr(uint32_t model_id, const std::string &op_name, const std::string &attr_name,
std::string &attr_value);

static Status GetModelAttr(uint32_t model_id, std::vector<string> &dynamic_output_shape_info);

static Status GetOrigInputInfo(uint32_t model_id, uint32_t index, OriginInputInfo &orig_input_info);
@@ -122,10 +131,13 @@ class GraphExecutor {
Status PrepareInputData(const std::vector<GeTensor> &input_tensor, InputData &graph_input_data,
OutputData &graph_output_data, std::vector<InputOutputDescInfo> &output_desc);

Status GetExecuteData(const std::vector<GeTensor> &input_tensor, std::vector<DataBuffer> &blobs,
std::vector<GeTensorDesc> &tensor_desc);

Status SyncExecuteModel(uint32_t model_id, const std::vector<GeTensor> &input_tensor,
std::vector<GeTensor> &output_tensor);

Status AsyncExecuteModel(const GeRootModelPtr &ge_root_model, const std::vector<InputTensorInfo> &input_tensor,
Status AsyncExecuteModel(const GeRootModelPtr &ge_root_model, const std::vector<ge::Tensor> &input_tensor,
const RunAsyncCallback &callback);

void InitModelIdInfo(std::vector<uint32_t> &out_model_id_info, std::vector<SubGraphInfoPtr> &sub_graph_vec,


+ 13
- 9
ge/graph/label/case_label_maker.cc View File

@@ -44,8 +44,8 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
if (graph_names.empty() || graph_names.size() > kMaxCaseBranch) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) subgraph size: %zu, check invalid", case_desc->GetName().c_str(),
case_desc->GetType().c_str(), graph_names.size());
GELOGE(INTERNAL_ERROR, "Node: %s has invalid subgraph, graph size: %zu.", case_desc->GetName().c_str(),
graph_names.size());
GELOGE(INTERNAL_ERROR, "[Check][Param] Node: %s has invalid subgraph, graph size: %zu.",
case_desc->GetName().c_str(), graph_names.size());
return FAILED;
}

@@ -71,7 +71,7 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
if (stream_active == nullptr) {
REPORT_CALL_ERROR("E19999", "Add StreamActive node in graph:%s fail",
graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add stream active failed.", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][StreamActive] in Subgraph: %s failed.", graph->GetName().c_str());
return FAILED;
}

@@ -81,7 +81,7 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
if (label == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetEnter node in graph:%s fail",
graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Call][AddLabelSetEnter] Subgraph: %s add label set failed.", graph->GetName().c_str());
return FAILED;
}
switch_labels.emplace_back(curr_label_index);
@@ -96,7 +96,8 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
if (AddLabelGotoLeave(graph, label_goto_name, last_label_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelGotoLeave node in graph:%s fail",
graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label goto failed.", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Call][AddLabelGotoLeave] Subgraph: %s add label goto failed.",
graph->GetName().c_str());
return FAILED;
}
} else {
@@ -105,7 +106,8 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
if (AddLabelSetLeave(graph, last_label_name, last_label_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetLeave node in graph:%s fail",
graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Call][AddLabelSetLeave] Subgraph: %s add label set failed.",
graph->GetName().c_str());
return FAILED;
}
}
@@ -122,7 +124,8 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
if (switch_node == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSwitchEnter node in graph:%s fail",
first_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label switch failed.", first_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Call][AddLabelSwitchEnter] Subgraph: %s add label switch failed.",
first_graph->GetName().c_str());
return FAILED;
}

@@ -130,7 +133,7 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
if (GraphUtils::AddEdge(switch_node->GetOutControlAnchor(), first_label->GetInControlAnchor()) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail", switch_node->GetName().c_str(),
first_label->GetName().c_str(), first_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add ctrl edge to %s failed.", first_label->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][CtrlEdge] to %s failed.", first_label->GetName().c_str());
return FAILED;
}

@@ -139,7 +142,8 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
if (AddLabelSwitchIndex(first_graph, data_name, pred_desc, switch_node, parent_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSwitchIndex node in graph:%s fail",
first_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add switch input failed.", first_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Call][AddLabelSwitchIndex] Subgraph: %s add switch input failed.",
first_graph->GetName().c_str());
return FAILED;
}



+ 10
- 10
ge/graph/label/if_label_maker.cc View File

@@ -47,7 +47,7 @@ Status IfOpLabelMaker::Run(uint32_t &label_index) {
"then branch graph: %s, else branch graph: %s",
if_desc->GetName().c_str(), if_desc->GetType().c_str(),
then_branch_name.c_str(), else_branch_name.c_str());
GELOGE(INTERNAL_ERROR, "Node: %s has invalid subgraph, then branch: %s, else branch: %s.",
GELOGE(INTERNAL_ERROR, "[Check][Param] Node: %s has invalid subgraph, then branch: %s, else branch: %s.",
if_desc->GetName().c_str(), then_branch_name.c_str(), else_branch_name.c_str());
return FAILED;
}
@@ -72,7 +72,7 @@ Status IfOpLabelMaker::Run(uint32_t &label_index) {
if (then_stream_active == nullptr) {
REPORT_CALL_ERROR("E19999", "Add StreamActive node in graph:%s fail",
then_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add stream active failed.", then_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][StreamActive] in Subgraph:%s failed.", then_sub_graph->GetName().c_str());
return FAILED;
}

@@ -80,14 +80,14 @@ Status IfOpLabelMaker::Run(uint32_t &label_index) {
if (then_enter_label == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetEnter node in graph:%s fail",
then_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", then_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][LabelSetEnter] in Subgraph:%s failed.", then_sub_graph->GetName().c_str());
return FAILED;
}

if (AddLabelGotoLeave(then_sub_graph, then_leave_name, else_leave_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelGotoLeave node in graph:%s fail",
then_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label goto failed.", then_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][LabelGotoLeave] in Subgraph:%s failed.", then_sub_graph->GetName().c_str());
return FAILED;
}

@@ -95,20 +95,20 @@ Status IfOpLabelMaker::Run(uint32_t &label_index) {
if (else_stream_active == nullptr) {
REPORT_CALL_ERROR("E19999", "Add StreamActive node in graph:%s fail",
else_stream_active->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add stream active failed.", else_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][StreamActive] in Subgraph:%s failed.", else_sub_graph->GetName().c_str());
return FAILED;
}

if (AddLabelSetEnter(else_sub_graph, else_enter_name, else_enter_index, else_stream_active) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetEnter node in graph:%s fail",
else_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", else_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][LabelSetEnter] in Subgraph:%s failed.", else_sub_graph->GetName().c_str());
return FAILED;
}
if (AddLabelSetLeave(else_sub_graph, else_leave_name, else_leave_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetLeave node in graph:%s fail",
else_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", else_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][LabelSetLeave] in Subgraph:%s failed.", else_sub_graph->GetName().c_str());
return FAILED;
}

@@ -121,7 +121,7 @@ Status IfOpLabelMaker::Run(uint32_t &label_index) {
if (switch_node == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSwitchEnter node in graph:%s fail",
then_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label switch failed.", then_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][LabelSwitchEnter] in Subgraph:%s failed.", then_sub_graph->GetName().c_str());
return FAILED;
}

@@ -129,7 +129,7 @@ Status IfOpLabelMaker::Run(uint32_t &label_index) {
if (GraphUtils::AddEdge(switch_node->GetOutControlAnchor(), then_enter_label->GetInControlAnchor()) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail", switch_node->GetName().c_str(),
then_enter_label->GetName().c_str(), then_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add ctrl edge to %s failed.", then_enter_label->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][CtrlEdge] to %s failed.", then_enter_label->GetName().c_str());
return FAILED;
}

@@ -138,7 +138,7 @@ Status IfOpLabelMaker::Run(uint32_t &label_index) {
if (AddLabelSwitchIndex(then_sub_graph, data_name, pred_desc, switch_node, parent_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSwitchIndex node in graph:%s fail",
then_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add switch input failed.", then_sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][LabelSwitchIndex] in Subgraph:%s failed.", then_sub_graph->GetName().c_str());
return FAILED;
}



+ 21
- 17
ge/graph/label/label_maker.cc View File

@@ -58,7 +58,7 @@ void LabelMaker::LinkToGraphHead(const ComputeGraphPtr &graph, const NodePtr &no
if (GraphUtils::AddEdge(node->GetOutControlAnchor(), n->GetInControlAnchor()) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail", node->GetName().c_str(),
n->GetName().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Add ctrl edge from %s to %s failed.", node->GetName().c_str(), n->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][CtrlEdge] from %s to %s failed.", node->GetName().c_str(), n->GetName().c_str());
}
}
}
@@ -82,7 +82,7 @@ void LabelMaker::LinkToGraphTail(const ComputeGraphPtr &graph, const NodePtr &no
if (GraphUtils::AddEdge(tail->GetOutControlAnchor(), node->GetInControlAnchor()) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail", tail->GetName().c_str(),
node->GetName().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Add ctrl edge from %s to %s failed.", tail->GetName().c_str(), node->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][CtrlEdge] from %s to %s failed.", tail->GetName().c_str(), node->GetName().c_str());
}
return;
}
@@ -101,7 +101,7 @@ NodePtr LabelMaker::AddStreamActive(const ComputeGraphPtr &graph, const std::str
const auto &node_list = graph->GetDirectNode();
if (node_list.empty()) {
REPORT_INNER_ERROR("E19999", "Check param graph:%s has no node", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "LabelSet: Graph %s node is empty.", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] LabelSet: Graph %s node is empty.", graph->GetName().c_str());
return nullptr;
}

@@ -137,7 +137,7 @@ NodePtr LabelMaker::AddLabelSetEnter(const ComputeGraphPtr &graph, const std::st
const auto &node_list = graph->GetDirectNode();
if (node_list.empty()) {
REPORT_INNER_ERROR("E19999", "Check param graph:%s has no node", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "LabelSet: Graph %s node is empty.", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] LabelSet: Graph %s node is empty.", graph->GetName().c_str());
return nullptr;
}

@@ -153,7 +153,7 @@ NodePtr LabelMaker::AddLabelSetEnter(const ComputeGraphPtr &graph, const std::st
if (GraphUtils::AddEdge(label_set->GetOutControlAnchor(), stream_active->GetInControlAnchor()) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail", label_set->GetName().c_str(),
stream_active->GetName().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Add ctrl edge from %s to %s failed.", label_set->GetName().c_str(),
GELOGE(INTERNAL_ERROR, "[Add][CtrlEdge] from %s to %s failed.", label_set->GetName().c_str(),
stream_active->GetName().c_str());
return nullptr;
}
@@ -202,7 +202,7 @@ NodePtr LabelMaker::AddLabelGotoEnter(const ComputeGraphPtr &graph, const std::s
auto it = node_list.begin();
if (it == node_list.end()) {
REPORT_INNER_ERROR("E19999", "Check param graph:%s has no node", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "LabelGoto: Graph %s node is empty.", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] LabelGoto: Graph %s node is empty.", graph->GetName().c_str());
return nullptr;
}

@@ -216,7 +216,7 @@ NodePtr LabelMaker::AddLabelGotoEnter(const ComputeGraphPtr &graph, const std::s
if (label_goto == nullptr) {
REPORT_CALL_ERROR("E19999", "Add node:%s(%s) to graph:%s fail",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "LabelGoto: Add to graph %s failed.", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][Node] to graph %s failed.", graph->GetName().c_str());
return nullptr;
}

@@ -265,7 +265,7 @@ NodePtr LabelMaker::AddLabelSwitchEnter(const ComputeGraphPtr &graph, const std:
auto it = node_list.begin();
if (it == node_list.end()) {
REPORT_INNER_ERROR("E19999", "Check param graph:%s has no node", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Graph %s node is empty.", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] LabelSwitchByIndex: Graph %s node is empty.", graph->GetName().c_str());
return nullptr;
}

@@ -277,14 +277,14 @@ NodePtr LabelMaker::AddLabelSwitchEnter(const ComputeGraphPtr &graph, const std:
if (op_desc->AddInputDesc(desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add input desc into node:%s(%s) in graph:%s fail",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add input desc failed.");
GELOGE(INTERNAL_ERROR, "[Add][InputDesc] failed.");
return nullptr;
}

if (!AttrUtils::SetListInt(op_desc, ATTR_NAME_LABEL_SWITCH_LIST, labels)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_LABEL_SWITCH_LIST.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add %s failed.", ATTR_NAME_LABEL_SWITCH_INDEX.c_str());
GELOGE(INTERNAL_ERROR, "[Set][Attr] %s failed.", ATTR_NAME_LABEL_SWITCH_INDEX.c_str());
return nullptr;
}

@@ -292,7 +292,7 @@ NodePtr LabelMaker::AddLabelSwitchEnter(const ComputeGraphPtr &graph, const std:
if (label_switch == nullptr) {
REPORT_CALL_ERROR("E19999", "Add node:%s(%s) to graph:%s ahead fail",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add to graph %s failed.", graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][Node] to graph %s failed.", graph->GetName().c_str());
return nullptr;
}

@@ -320,14 +320,15 @@ NodePtr LabelMaker::AddLabelSwitchLeave(const ComputeGraphPtr &graph, const std:
if (op_desc->AddInputDesc(desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add input desc into node:%s(%s) in graph:%s fail",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add input desc failed.");
GELOGE(INTERNAL_ERROR, "[Add][InputDesc] into node:%s(%s) in graph:%s fail",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str());
return nullptr;
}

if (!AttrUtils::SetListInt(op_desc, ATTR_NAME_LABEL_SWITCH_LIST, labels)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_LABEL_SWITCH_LIST.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add %s failed.", ATTR_NAME_LABEL_SWITCH_INDEX.c_str());
GELOGE(INTERNAL_ERROR, "[Set][Attr] %s failed.", ATTR_NAME_LABEL_SWITCH_INDEX.c_str());
return nullptr;
}

@@ -360,20 +361,23 @@ NodePtr LabelMaker::AddLabelSwitchIndex(const ComputeGraphPtr &graph, const std:
if (op_desc->AddInputDesc(desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add input desc into node:%s(%s) in graph:%s fail",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add data input desc failed.");
GELOGE(INTERNAL_ERROR, "[Add][InputDesc] into node:%s(%s) in graph:%s fail",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str());
return nullptr;
}
if (op_desc->AddOutputDesc(desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add output desc into node:%s(%s) in graph:%s fail",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add data output desc failed.");
GELOGE(INTERNAL_ERROR, "[Add][OutputDesc] into node:%s(%s) in graph:%s fail",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str());
return nullptr;
}

if (!AttrUtils::SetInt(op_desc, ATTR_NAME_PARENT_NODE_INDEX, parent_index)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_PARENT_NODE_INDEX.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add %s failed.", ATTR_NAME_PARENT_NODE_INDEX.c_str());
GELOGE(INTERNAL_ERROR, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_PARENT_NODE_INDEX.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return nullptr;
}
NodePtr op_data = graph->AddNodeFront(op_desc);
@@ -384,7 +388,7 @@ NodePtr LabelMaker::AddLabelSwitchIndex(const ComputeGraphPtr &graph, const std:
if (GraphUtils::AddEdge(op_data->GetOutDataAnchor(0), sw_node->GetInDataAnchor(0)) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail", op_data->GetName().c_str(),
sw_node->GetName().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add input edge to %s failed.", op_data->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][CtrlEdge] to %s failed.", op_data->GetName().c_str());
return nullptr;
}



+ 3
- 3
ge/graph/label/partitioned_call_label_maker.cc View File

@@ -41,7 +41,7 @@ Status PartitionedCallLabelMaker::Run(uint32_t &label_index) {
if (sub_graph_name.empty()) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) subgraph_index:%d name is empty, check invalid",
call_desc->GetName().c_str(), call_desc->GetType().c_str(), kSubGraphIndex);
GELOGE(INTERNAL_ERROR, "Node: %s has no subgraph name.", sub_graph_name.c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] Node:%s has no subgraph name.", sub_graph_name.c_str());
return FAILED;
}

@@ -50,7 +50,7 @@ Status PartitionedCallLabelMaker::Run(uint32_t &label_index) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) subgraph_name:%s is not exist in parent_graph, check invalid",
call_desc->GetName().c_str(), call_desc->GetType().c_str(),
sub_graph_name.c_str());
GELOGE(INTERNAL_ERROR, "Node: %s has no subgraph.", sub_graph_name.c_str());
GELOGE(INTERNAL_ERROR, "[Get][SubGraph] Node:%s has no subgraph.", sub_graph_name.c_str());
return FAILED;
}

@@ -59,7 +59,7 @@ Status PartitionedCallLabelMaker::Run(uint32_t &label_index) {
if (stream_active == nullptr) {
REPORT_CALL_ERROR("E19999", "Add StreamActive node in graph:%s fail",
sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add stream active node failed.", sub_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][StreamActive] in Subgraph:%s failed.", sub_graph->GetName().c_str());
return FAILED;
}



+ 10
- 10
ge/graph/label/while_label_maker.cc View File

@@ -47,7 +47,7 @@ Status WhileOpLabelMaker::Run(uint32_t &label_index) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) cond subgraph index:%d or body subgraph index:%d name is empty, "
"check invalid", while_desc->GetName().c_str(), while_desc->GetType().c_str(),
kCondBranchIndex, kBodyBranchIndex);
GELOGE(INTERNAL_ERROR, "Node: %s has invalid subgraph, cond branch: %s, body branch: %s.",
GELOGE(INTERNAL_ERROR, "[Check][Param] Node: %s has invalid subgraph, cond branch: %s, body branch: %s.",
while_desc->GetName().c_str(), cond_name.c_str(), body_name.c_str());
return FAILED;
}
@@ -72,14 +72,14 @@ Status WhileOpLabelMaker::Run(uint32_t &label_index) {
if (cond_stream_active == nullptr) {
REPORT_CALL_ERROR("E19999", "Add StreamActive node in graph:%s fail",
cond_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add stream active failed.", cond_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][StreamActive] in Subgraph:%s failed.", cond_graph->GetName().c_str());
return FAILED;
}

if (AddLabelSetEnter(cond_graph, cond_enter_name, cond_enter_index, cond_stream_active) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetEnter node in graph:%s fail",
cond_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", cond_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][LabelSetEnter] in Subgraph:%s failed.", cond_graph->GetName().c_str());
return FAILED;
}

@@ -87,28 +87,28 @@ Status WhileOpLabelMaker::Run(uint32_t &label_index) {
if (body_stream_active == nullptr) {
REPORT_CALL_ERROR("E19999", "Add StreamActive node in graph:%s fail",
body_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add stream active failed.", body_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][StreamActive] in Subgraph:%s failed.", body_graph->GetName().c_str());
return FAILED;
}

if (AddLabelSetEnter(body_graph, body_enter_name, body_enter_index, body_stream_active) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetEnter node in graph:%s fail",
body_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", body_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][LabelSetEnter] in Subgraph:%s failed.", body_graph->GetName().c_str());
return FAILED;
}

if (AddLabelGotoLeave(body_graph, goto_leave_name, cond_enter_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelGotoLeave node in graph:%s fail",
body_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label goto failed.", body_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][LabelGotoLeave] in Subgraph:%s failed.", body_graph->GetName().c_str());
return FAILED;
}

if (AddLabelSetLeave(body_graph, body_leave_name, body_leave_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetLeave node in graph:%s fail",
body_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", body_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][LabelSetLeave] in Subgraph:%s failed.", body_graph->GetName().c_str());
return FAILED;
}

@@ -126,14 +126,14 @@ Status WhileOpLabelMaker::Run(uint32_t &label_index) {
if (switch_node == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSwitchLeave node in graph:%s fail",
cond_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label switch failed.", cond_graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Add][LabelSwitchLeave] in Subgraph:%s failed.", cond_graph->GetName().c_str());
return FAILED;
}

// link Data input.
const auto &all_in_data = cond_out_node->GetAllInDataAnchors();
if (all_in_data.size() != kCondOutputNum) {
GELOGE(FAILED, "Node: %s Cond sbugraph output size:%zu should equal size:%u.",
GELOGE(FAILED, "[Check][Param] Node: %s Cond sbugraph output size:%zu should equal size:%u.",
switch_node->GetName().c_str(), all_in_data.size(), kCondOutputNum);
return FAILED;
}
@@ -144,7 +144,7 @@ Status WhileOpLabelMaker::Run(uint32_t &label_index) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail",
in_anchor->GetPeerOutAnchor()->GetOwnerNode()->GetName().c_str(),
switch_node->GetName().c_str(), cond_graph->GetName().c_str());
GELOGE(FAILED, "Node: %s Add pred data input failed.", switch_node->GetName().c_str());
GELOGE(FAILED, "[Add][PredDataInput] to Node:%s failed.", switch_node->GetName().c_str());
return FAILED;
}



+ 10
- 0
ge/graph/load/graph_loader.cc View File

@@ -75,6 +75,16 @@ Status GraphLoader::LoadModelOnline(uint32_t &model_id, const std::shared_ptr<ge
return ret;
}

if (ge_root_model_ptr->IsSpecificStream()) {
GELOGI("No need to start a new thread to run model in specific scene.");
rt_ret = rtDeviceReset(GetContext().DeviceId());
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u, ret:0x%X",
GetContext().DeviceId(), rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
}
return SUCCESS;
}
ret = model_manager->Start(model_id);
if (ret != SUCCESS) {
if (model_manager->Unload(model_id) != SUCCESS) {


+ 69
- 83
ge/graph/load/model_manager/cpu_queue_schedule.cc View File

@@ -51,18 +51,16 @@ CpuTaskInfo::~CpuTaskInfo() {
///
Status CpuTaskModelDequeue::Init(uint32_t queue_id, uintptr_t &in_mbuf) {
if ((args_ != nullptr) || (args_size_ > 0)) {
REPORT_INNER_ERROR("E19999", "Param args_ is not nullptr or args_size_:%u > 0,"
"check invalid", args_size_);
GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
REPORT_INNER_ERROR("E19999", "Param args_ is not nullptr or args_size_:%u > 0, check invalid", args_size_);
GELOGE(FAILED, "[Check][Param] Task already initialized, size:%u", args_size_);
return FAILED;
}

args_size_ = sizeof(MbufQueueInfo) + sizeof(uintptr_t); // sizeof(uintptr_t) for save in_mbuf.
rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X",
args_size_, status);
GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X", args_size_, status);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%u, ret:0x%X", args_size_, status);
return RT_ERROR_TO_GE_STATUS(status);
}
in_mbuf = reinterpret_cast<uintptr_t>(args_) + sizeof(MbufQueueInfo);
@@ -73,9 +71,8 @@ Status CpuTaskModelDequeue::Init(uint32_t queue_id, uintptr_t &in_mbuf) {
queue_info.in_mbuf = in_mbuf; // Placeholder, input mbuf addr will save to this place.
status = rtMemcpy(args_, args_size_, &queue_info, sizeof(MbufQueueInfo), RT_MEMCPY_HOST_TO_DEVICE);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X",
args_size_, status);
GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X", args_size_, status);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%u, ret:0x%X", args_size_, status);
return RT_ERROR_TO_GE_STATUS(status);
}

@@ -86,15 +83,14 @@ Status CpuTaskModelDequeue::Distribute() {
if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
REPORT_INNER_ERROR("E19999", "Param args_ is nullptr or args_size_:%u is 0 or stream_ is nullptr,"
"check invalid", args_size_);
GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
GELOGE(FAILED, "[Check][Param] Task not initialized, distribute failed, size:%u", args_size_);
return FAILED;
}

rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskModelDequeue, kCoreDim, args_, args_size_, nullptr, stream_);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtCpuKernelLaunch failed, ret:0x%X",
status);
GELOGE(RT_FAILED, "Call rt CpuKernelLaunch ModelDequeue failed, status: 0x%X", status);
REPORT_CALL_ERROR("E19999", "Call rtCpuKernelLaunch failed, ret:0x%X", status);
GELOGE(RT_FAILED, "[Call][RtCpuKernelLaunch] failed, ret:0x%X", status);
return RT_ERROR_TO_GE_STATUS(status);
}

@@ -111,9 +107,8 @@ Status CpuTaskModelDequeue::Distribute() {
///
Status CpuTaskZeroCopy::Init(std::vector<uintptr_t> &mbuf_list, const map<uint32_t, ZeroCopyOffset> &outside_addrs) {
if ((args_ != nullptr) || (args_size_ > 0)) {
REPORT_INNER_ERROR("E19999", "Param args_ is not nullptr or args_size_:%u > 0,"
"check invalid", args_size_);
GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
REPORT_INNER_ERROR("E19999", "Param args_ is not nullptr or args_size_:%u > 0, check invalid", args_size_);
GELOGE(FAILED, "[Check][Param] Task already initialized, size:%u", args_size_);
return FAILED;
}

@@ -127,7 +122,7 @@ Status CpuTaskZeroCopy::Init(std::vector<uintptr_t> &mbuf_list, const map<uint32
vector<uint64_t> dst_addrs;
for (const auto &addrs : outside_addrs) {
const auto &addrs_mapping_list = addrs.second.GetOutsideAddrs();
GE_CHK_BOOL_EXEC(!addrs_mapping_list.empty(), return PARAM_INVALID, "not set outside_addrs");
GE_CHK_BOOL_EXEC(!addrs_mapping_list.empty(), return PARAM_INVALID, "[Check][Param] not set outside_addrs");
std::map<const void *, std::vector<void *>> virtual_args_addrs = addrs_mapping_list[0];
for (const auto &virtual_args_addr : virtual_args_addrs) {
addr_map_info.addr_num += virtual_args_addr.second.size();
@@ -143,13 +138,21 @@ Status CpuTaskZeroCopy::Init(std::vector<uintptr_t> &mbuf_list, const map<uint32
GE_CHK_RT_RET(rtMalloc(&src_addr_, src_addrs.size() * sizeof(uint64_t), RT_MEMORY_HBM));
rtError_t status = rtMemcpy(src_addr_, src_addrs.size() * sizeof(uint64_t), src_addrs.data(),
src_addrs.size() * sizeof(uint64_t), RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(status != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", status);
GE_IF_BOOL_EXEC(status != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%lu, ret:0x%X",
src_addrs.size() * sizeof(uint64_t), status);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%lu, ret:0x%X",
src_addrs.size() * sizeof(uint64_t), status);
return RT_ERROR_TO_GE_STATUS(status);)

GE_CHK_RT_RET(rtMalloc(&dst_addr_, dst_addrs.size() * sizeof(uint64_t), RT_MEMORY_HBM));
status = rtMemcpy(dst_addr_, dst_addrs.size() * sizeof(uint64_t), dst_addrs.data(),
dst_addrs.size() * sizeof(uint64_t), RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(status != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", status);
GE_IF_BOOL_EXEC(status != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%lu, ret:0x%X",
dst_addrs.size() * sizeof(uint64_t), status);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%lu, ret:0x%X",
dst_addrs.size() * sizeof(uint64_t), status);
return RT_ERROR_TO_GE_STATUS(status);)

// src_addr_list is init to src_addr, which is the point to src_addrs
@@ -160,7 +163,9 @@ Status CpuTaskZeroCopy::Init(std::vector<uintptr_t> &mbuf_list, const map<uint32
}

status = rtMemcpy(args_, args_size_, &addr_map_info, sizeof(AddrMapInfo), RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(status != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", status);
GE_IF_BOOL_EXEC(status != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X", args_size_, status);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%u, ret:0x%X", args_size_, status);
return RT_ERROR_TO_GE_STATUS(status);)
return SUCCESS;
}
@@ -169,15 +174,14 @@ Status CpuTaskZeroCopy::Distribute() {
if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
REPORT_INNER_ERROR("E19999", "Param args_ is nullptr or args_size_:%u is 0 or stream_ is nullptr,"
"check invalid", args_size_);
GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
GELOGE(FAILED, "[Check][Param] Task not initialized, distribute failed, size:%u", args_size_);
return FAILED;
}

rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskZeroCopy, kCoreDim, args_, args_size_, nullptr, stream_);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtCpuKernelLaunch failed, ret:0x%X",
status);
GELOGE(RT_FAILED, "Call rt CpuKernelLaunch ZeroCopy failed, status: 0x%X", status);
REPORT_CALL_ERROR("E19999", "Call rtCpuKernelLaunch failed, ret:0x%X", status);
GELOGE(RT_FAILED, "[Call][RtCpuKernelLaunch] failed, ret:0x%X", status);
return RT_ERROR_TO_GE_STATUS(status);
}

@@ -215,18 +219,16 @@ CpuTaskZeroCopy::~CpuTaskZeroCopy() {
///
Status CpuTaskPrepareOutput::Init(uintptr_t addr, uint32_t size, uintptr_t in_mbuf, uintptr_t &out_mbuf) {
if ((args_ != nullptr) || (args_size_ > 0)) {
REPORT_INNER_ERROR("E19999", "Param args_ is not nullptr or args_size_:%u > 0,"
"check invalid", args_size_);
GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
REPORT_INNER_ERROR("E19999", "Param args_ is not nullptr or args_size_:%u > 0, check invalid", args_size_);
GELOGE(FAILED, "[Check][Param] Task already initialized, size:%u", args_size_);
return FAILED;
}

args_size_ = sizeof(PrepareOutputInfo) + sizeof(uintptr_t); // sizeof(uintptr_t) for save out_mbuf.
rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X",
args_size_, status);
GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X", args_size_, status);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%u, ret:0x%X", args_size_, status);
return RT_ERROR_TO_GE_STATUS(status);
}
out_mbuf = reinterpret_cast<uintptr_t>(args_) + sizeof(PrepareOutputInfo);
@@ -240,9 +242,8 @@ Status CpuTaskPrepareOutput::Init(uintptr_t addr, uint32_t size, uintptr_t in_mb
prepare.out_mbuf = out_mbuf; // Placeholder, output mbuf addr will save to this place.
status = rtMemcpy(args_, args_size_, &prepare, sizeof(PrepareOutputInfo), RT_MEMCPY_HOST_TO_DEVICE);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X",
args_size_, status);
GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X", args_size_, status);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%u, ret:0x%X", args_size_, status);
return RT_ERROR_TO_GE_STATUS(status);
}

@@ -253,15 +254,14 @@ Status CpuTaskPrepareOutput::Distribute() {
if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
REPORT_INNER_ERROR("E19999", "Param args_ is nullptr or args_size_:%u is 0 or stream_ is nullptr,"
"check invalid", args_size_);
GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
GELOGE(FAILED, "[Check][Param] Task not initialized, distribute failed, size:%u", args_size_);
return FAILED;
}

rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskPrepareOutput, kCoreDim, args_, args_size_, nullptr, stream_);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtCpuKernelLaunch failed, ret:0x%X",
status);
GELOGE(RT_FAILED, "Call rt CpuKernelLaunch PrepareOutput failed, status: 0x%X", status);
REPORT_CALL_ERROR("E19999", "Call rtCpuKernelLaunch failed, ret:0x%X", status);
GELOGE(RT_FAILED, "[Call][RtCpuKernelLaunch] failed, ret:0x%X", status);
return RT_ERROR_TO_GE_STATUS(status);
}

@@ -278,9 +278,8 @@ Status CpuTaskPrepareOutput::Distribute() {
///
Status CpuTaskModelEnqueue::Init(uint32_t queue_id, uintptr_t out_mbuf) {
if ((args_ != nullptr) || (args_size_ > 0)) {
REPORT_INNER_ERROR("E19999", "Param args_ is not nullptr or args_size_:%u > 0,"
"check invalid", args_size_);
GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
REPORT_INNER_ERROR("E19999", "Param args_ is not nullptr or args_size_:%u > 0, check invalid", args_size_);
GELOGE(FAILED, "[Check][Param] Task already initialized, size:%u", args_size_);
return FAILED;
}

@@ -288,9 +287,8 @@ Status CpuTaskModelEnqueue::Init(uint32_t queue_id, uintptr_t out_mbuf) {
args_size_ = sizeof(MbufQueueInfo);
rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X",
args_size_, status);
GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X", args_size_, status);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%u, ret:0x%X", args_size_, status);
return RT_ERROR_TO_GE_STATUS(status);
}
GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)
@@ -300,9 +298,8 @@ Status CpuTaskModelEnqueue::Init(uint32_t queue_id, uintptr_t out_mbuf) {
queue_info.in_mbuf = out_mbuf;
status = rtMemcpy(args_, args_size_, &queue_info, args_size_, RT_MEMCPY_HOST_TO_DEVICE);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X",
args_size_, status);
GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X", args_size_, status);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%u, ret:0x%X", args_size_, status);
return RT_ERROR_TO_GE_STATUS(status);
}

@@ -313,15 +310,14 @@ Status CpuTaskModelEnqueue::Distribute() {
if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
REPORT_INNER_ERROR("E19999", "Param args_ is nullptr or args_size_ is 0 or stream_ is nullptr, arg_size:%u,"
"check invalid", args_size_);
GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
GELOGE(FAILED, "[Check][Param] Task not initialized, distribute failed, size:%u", args_size_);
return FAILED;
}

rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskModelEnqueue, kCoreDim, args_, args_size_, nullptr, stream_);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtCpuKernelLaunch failed, ret:0x%X",
status);
GELOGE(RT_FAILED, "Call rt CpuKernelLaunch ModelEnqueue failed, status: 0x%X", status);
REPORT_CALL_ERROR("E19999", "Call rtCpuKernelLaunch failed, ret:0x%X", status);
GELOGE(RT_FAILED, "[Call][RtCpuKernelLaunch] failed, ret:0x%X", status);
return RT_ERROR_TO_GE_STATUS(status);
}

@@ -338,7 +334,7 @@ Status CpuTaskModelEnqueue::Distribute() {
Status CpuTaskActiveEntry::Init(rtStream_t stream) {
if (stream == nullptr) {
REPORT_INNER_ERROR("E19999", "Param stream is nullptr, check invalid");
GELOGE(FAILED, "Task active stream not valid");
GELOGE(FAILED, "[Check][Param] Task active stream not valid");
return FAILED;
}

@@ -348,17 +344,15 @@ Status CpuTaskActiveEntry::Init(rtStream_t stream) {

Status CpuTaskActiveEntry::Distribute() {
if ((active_stream_ == nullptr) || (stream_ == nullptr)) {
REPORT_INNER_ERROR("E19999", "Param stream is nullptr or active_stream_ is nullptr, "
"check invalid");
GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
REPORT_INNER_ERROR("E19999", "Param stream is nullptr or active_stream_ is nullptr, check invalid");
GELOGE(FAILED, "[Check][Param] Task not initialized, distribute failed, size:%u", args_size_);
return FAILED;
}

rtError_t ret = rtStreamActive(active_stream_, stream_);
if (ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtStreamActive failed, ret:0x%X",
ret);
GELOGE(RT_FAILED, "Call rt StreamActive failed, ret: 0x%X", ret);
REPORT_CALL_ERROR("E19999", "Call rtStreamActive failed, ret:0x%X", ret);
GELOGE(RT_FAILED, "[Call][RtStreamActive] failed, ret:0x%X", ret);
return RT_ERROR_TO_GE_STATUS(ret);
}

@@ -374,27 +368,24 @@ Status CpuTaskActiveEntry::Distribute() {
///
Status CpuTaskWaitEndGraph::Init(uint32_t model_id) {
if ((args_ != nullptr) || (args_size_ > 0)) {
REPORT_INNER_ERROR("E19999", "Param args_ is not nullptr or args_size_:%u > 0,"
"check invalid", args_size_);
GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
REPORT_INNER_ERROR("E19999", "Param args_ is not nullptr or args_size_:%u > 0, check invalid", args_size_);
GELOGE(FAILED, "[Check][Param] Task already initialized, size:%u", args_size_);
return FAILED;
}

args_size_ = sizeof(model_id);
rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X",
args_size_, status);
GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X", args_size_, status);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%u, ret:0x%X", args_size_, status);
return RT_ERROR_TO_GE_STATUS(status);
}
GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)

status = rtMemcpy(args_, args_size_, &model_id, args_size_, RT_MEMCPY_HOST_TO_DEVICE);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X",
args_size_, status);
GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X", args_size_, status);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%u, ret:0x%X", args_size_, status);
return RT_ERROR_TO_GE_STATUS(status);
}

@@ -405,15 +396,14 @@ Status CpuTaskWaitEndGraph::Distribute() {
if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
REPORT_INNER_ERROR("E19999", "Param args_ is nullptr or args_size_:%u is 0 or stream_ is nullptr,"
"check invalid", args_size_);
GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
GELOGE(FAILED, "[Check][Param] Task not initialized, distribute failed, size:%u", args_size_);
return FAILED;
}

rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskWaitEndGraph, kCoreDim, args_, args_size_, nullptr, stream_);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtCpuKernelLaunch failed, ret:0x%X",
status);
GELOGE(RT_FAILED, "Call rt CpuKernelLaunch WaitEndGraph failed, status: 0x%X", status);
REPORT_CALL_ERROR("E19999", "Call rtCpuKernelLaunch failed, ret:0x%X", status);
GELOGE(RT_FAILED, "[Call][RtCpuKernelLaunch] failed, ret:0x%X", status);
return RT_ERROR_TO_GE_STATUS(status);
}

@@ -429,27 +419,24 @@ Status CpuTaskWaitEndGraph::Distribute() {
///
Status CpuTaskModelRepeat::Init(uint32_t model_id) {
if ((args_ != nullptr) || (args_size_ > 0)) {
REPORT_INNER_ERROR("E19999", "Param args_ is not nullptr or args_size_:%u > 0,"
"check invalid", args_size_);
GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
REPORT_INNER_ERROR("E19999", "Param args_ is not nullptr or args_size_:%u > 0, check invalid", args_size_);
GELOGE(FAILED, "[Check][Param] Task already initialized, size:%u", args_size_);
return FAILED;
}

args_size_ = sizeof(model_id);
rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X",
args_size_, status);
GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X", args_size_, status);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%u, ret:0x%X", args_size_, status);
return RT_ERROR_TO_GE_STATUS(status);
}
GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)

status = rtMemcpy(args_, args_size_, &model_id, args_size_, RT_MEMCPY_HOST_TO_DEVICE);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X",
args_size_, status);
GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X", args_size_, status);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%u, ret:0x%X", args_size_, status);
return RT_ERROR_TO_GE_STATUS(status);
}

@@ -460,15 +447,14 @@ Status CpuTaskModelRepeat::Distribute() {
if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
REPORT_INNER_ERROR("E19999", "Param args_ is nullptr or args_size_:%u is 0 or stream_ is nullptr,"
"check invalid", args_size_);
GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
GELOGE(FAILED, "[Check][Param] Task not initialized, distribute failed, size:%u", args_size_);
return FAILED;
}

rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskModelRepeat, kCoreDim, args_, args_size_, nullptr, stream_);
if (status != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtCpuKernelLaunch failed, ret:0x%X",
status);
GELOGE(RT_FAILED, "Call rt CpuKernelLaunch ModelRepeat failed, status: 0x%x", status);
REPORT_CALL_ERROR("E19999", "Call rtCpuKernelLaunch failed, ret:0x%X", status);
GELOGE(RT_FAILED, "[Call][RtCpuKernelLaunch] failed, ret:0x%X", status);
return RT_ERROR_TO_GE_STATUS(status);
}



+ 77
- 68
ge/graph/load/model_manager/data_dumper.cc View File

@@ -127,7 +127,7 @@ void DataDumper::ReleaseDevMem(void **ptr) noexcept {
if (*ptr != nullptr) {
rtError_t rt_ret = rtFree(*ptr);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "Call rtFree failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtFree] failed, ret:0x%X", rt_ret);
}

*ptr = nullptr;
@@ -144,7 +144,7 @@ void DataDumper::SaveDumpInput(const std::shared_ptr<Node> &node) {
if (node != nullptr) {
auto input_op_desc = node->GetOpDesc();
if (input_op_desc == nullptr) {
GELOGE(PARAM_INVALID, "input op desc is null.");
GELOGE(PARAM_INVALID, "[Get][OpDesc] input op desc is null.");
return;
}

@@ -153,7 +153,7 @@ void DataDumper::SaveDumpInput(const std::shared_ptr<Node> &node) {
ge::NodePtr dst_node = dst_in_data_anchor->GetOwnerNode();
auto op_desc = dst_node->GetOpDesc();
if (op_desc == nullptr) {
GELOGE(PARAM_INVALID, "input op desc is null.");
GELOGE(PARAM_INVALID, "[Get][OpDesc] input op desc is null.");
return;
}

@@ -179,7 +179,7 @@ void DataDumper::SaveOpDebugId(uint32_t task_id, uint32_t stream_id, void *op_de
void DataDumper::SaveDumpTask(uint32_t task_id, uint32_t stream_id, const std::shared_ptr<OpDesc> &op_desc,
uintptr_t args) {
if (op_desc == nullptr) {
GELOGE(PARAM_INVALID, "Opdesc is nullptr");
GELOGE(PARAM_INVALID, "[Check][Param] Opdesc is nullptr");
return;
}

@@ -190,14 +190,14 @@ void DataDumper::SaveDumpTask(uint32_t task_id, uint32_t stream_id, const std::s
InnerInputMapping &inner_input_mapping = iter.first->second;
auto &data_op = inner_input_mapping.data_op;
if (data_op == nullptr) {
GELOGE(PARAM_INVALID, "data_op is null.");
GELOGE(PARAM_INVALID, "[Check][Param] data_op is null.");
return;
}

auto input_tensor = op_desc->GetInputDescPtr(inner_input_mapping.input_anchor_index);
if (input_tensor == nullptr) {
GELOGE(PARAM_INVALID, "input_tensor is null, index: %d, size: %zu.", inner_input_mapping.input_anchor_index,
op_desc->GetInputsSize());
GELOGE(PARAM_INVALID, "[Get][InputDescPtr] input_tensor in op:%s is null, index:%d, size:%zu.",
op_desc->GetName().c_str(), inner_input_mapping.input_anchor_index, op_desc->GetInputsSize());
return;
}

@@ -205,7 +205,8 @@ void DataDumper::SaveDumpTask(uint32_t task_id, uint32_t stream_id, const std::s
if (AttrUtils::GetInt(input_tensor, ATTR_NAME_INPUT_ORIGIN_SIZE, data_size)) {
GELOGI("Get aipp data size according to attr is %ld", data_size);
} else if (TensorUtils::GetTensorSizeInBytes(*input_tensor, data_size) != SUCCESS) {
GELOGE(PARAM_INVALID, "Get input size filed");
GELOGE(PARAM_INVALID, "[Get][InputSize] failed in %s, index:%u",
op_desc->GetName().c_str(), inner_input_mapping.input_anchor_index);
return;
}

@@ -249,7 +250,7 @@ Status DataDumper::GenerateOutput(toolkit::aicpu::dump::Output &output,
int64_t output_size = 0;
if (TensorUtils::GetTensorSizeInBytes(tensor_descs.at(index), output_size) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get tensor size fail");
GELOGE(PARAM_INVALID, "Get output size filed");
GELOGE(PARAM_INVALID, "[Get][OutputSize] failed");
return PARAM_INVALID;
}
GELOGD("Get output size in dump is %ld", output_size);
@@ -274,34 +275,37 @@ Status DataDumper::DumpRefOutput(const DataDumper::InnerDumpInfo &inner_dump_inf
size_t index;
// parser and find which node's input or output tensor desc is chosen for dump info
if (!ParseNameIndex(node_name_index, dump_op_name, input_or_output, index)) {
GELOGE(PARAM_INVALID, "Op [%s] output desc[%zu] with invalid ATTR_DATA_DUMP_REF attr[%s].",
GELOGE(PARAM_INVALID, "[Check][Param] Op [%s] output desc[%zu] with invalid ATTR_DATA_DUMP_REF attr[%s].",
inner_dump_info.op->GetName().c_str(), i, node_name_index.c_str());
return PARAM_INVALID;
}
GE_CHECK_NOTNULL(compute_graph_);
auto replace_node = compute_graph_->FindNode(dump_op_name);
GE_RT_PARAM_INVALID_WITH_LOG_IF_TRUE(replace_node == nullptr,
"Op [%s] output desc[%zu] with invalid ATTR_DATA_DUMP_REF attr[%s],"
" cannot find redirect node[%s].",
"[Check][Param] Op [%s] output desc[%zu] with invalid ATTR_DATA_DUMP_REF "
"attr[%s], cannot find redirect node[%s].",
inner_dump_info.op->GetName().c_str(), i, node_name_index.c_str(),
dump_op_name.c_str());
auto replace_opdesc = replace_node->GetOpDesc();
GE_CHECK_NOTNULL(replace_opdesc);
auto iter = ref_info_.find(replace_opdesc);
GE_RT_PARAM_INVALID_WITH_LOG_IF_TRUE(iter == ref_info_.end(),
"Op [%s] output desc[%zu] cannot find any saved redirect node[%s]'s info.",
"[Check][Param] Op [%s] output desc[%zu] cannot find "
"any saved redirect node[%s]'s info.",
inner_dump_info.op->GetName().c_str(), i, replace_opdesc->GetName().c_str());
GE_CHECK_NOTNULL(iter->second);
auto addr = reinterpret_cast<uintptr_t>(iter->second);
if (input_or_output == kDumpInput) {
const auto &replace_input_descs = replace_opdesc->GetAllInputsDesc();
addr += kAddrLen * index;
GE_CHK_STATUS_RET(GenerateOutput(output, replace_input_descs, addr, index), "Generate output failed");
GE_CHK_STATUS_RET(GenerateOutput(output, replace_input_descs, addr, index),
"[Generate][Output] failed for %s, index:%zu", inner_dump_info.op->GetName().c_str(), index);
} else if (input_or_output == kDumpOutput) {
const auto &replace_output_descs = replace_opdesc->GetAllOutputsDesc();
const auto replace_input_size = replace_opdesc->GetAllInputsDesc().size();
addr += (index + replace_input_size) * kAddrLen;
GE_CHK_STATUS_RET(GenerateOutput(output, replace_output_descs, addr, index), "Generate output failed");
GE_CHK_STATUS_RET(GenerateOutput(output, replace_output_descs, addr, index),
"[Generate][Output] failed for %s, index:%zu", inner_dump_info.op->GetName().c_str(), index);
}
GELOGD("Op [%s] output desc[%zu] dump info is replaced by node[%s] [%s] tensor_desc [%zu]",
inner_dump_info.op->GetName().c_str(), i, dump_op_name.c_str(), input_or_output.c_str(), index);
@@ -314,9 +318,9 @@ Status DataDumper::DumpOutputWithTask(const InnerDumpInfo &inner_dump_info, tool
std::vector<int64_t> v_memory_type;
bool has_mem_type_attr = ge::AttrUtils::GetListInt(inner_dump_info.op, ATTR_NAME_OUTPUT_MEM_TYPE_LIST, v_memory_type);
GE_RT_PARAM_INVALID_WITH_LOG_IF_TRUE(has_mem_type_attr && (v_memory_type.size() != output_descs.size()),
"DumpOutputWithTask[%s], output size[%zu], output memory type size[%zu]",
inner_dump_info.op->GetName().c_str(), output_descs.size(),
v_memory_type.size());
"[Check][Param] DumpOutputWithTask[%s], output size[%zu], "
"output memory type size[%zu]", inner_dump_info.op->GetName().c_str(),
output_descs.size(), v_memory_type.size());

size_t no_need_dump_output_num = 0;
for (size_t i = 0; i < output_descs.size(); ++i) {
@@ -338,16 +342,16 @@ Status DataDumper::DumpOutputWithTask(const InnerDumpInfo &inner_dump_info, tool
"output which is need to dump.", inner_dump_info.op->GetName().c_str(),
inner_dump_info.op->GetType().c_str(), no_need_dump_output_num, output_descs.size(),
output_addrs.size());
GELOGE(PARAM_INVALID, "The number of output does not match in op:%s(%s). The size[%zu] of output which is no need"
" to dump should not greater than the size[%zu] of output descs minus the size[%zu] of output which is "
"need to dump.", inner_dump_info.op->GetName().c_str(), inner_dump_info.op->GetType().c_str(),
no_need_dump_output_num, output_descs.size(), output_addrs.size());
GELOGE(PARAM_INVALID, "[Check][Param] The number of output does not match in op:%s(%s). The size[%zu] of output "
"which is no need to dump should not greater than the size[%zu] of output descs minus the size[%zu] "
"of output which is need to dump.", inner_dump_info.op->GetName().c_str(),
inner_dump_info.op->GetType().c_str(), no_need_dump_output_num, output_descs.size(), output_addrs.size());
return PARAM_INVALID;
}

// check dump output tensor desc is redirected by attr ATTR_DATA_DUMP_REF
if (AttrUtils::GetStr(&output_desc, ATTR_DATA_DUMP_REF, node_name_index)) {
GE_CHK_STATUS_RET(DumpRefOutput(inner_dump_info, output, i, node_name_index), "DumpRefOutput failed");
GE_CHK_STATUS_RET(DumpRefOutput(inner_dump_info, output, i, node_name_index), "[Dump][RefOutput] failed");
task.mutable_output()->Add(std::move(output));
} else {
if (IsTensorDescWithSkipDumpAddrType(has_mem_type_attr, v_memory_type, i)) {
@@ -356,7 +360,7 @@ Status DataDumper::DumpOutputWithTask(const InnerDumpInfo &inner_dump_info, tool
if (TensorUtils::GetTensorSizeInBytes(output_descs.at(i), output_size) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get output tensor size fail in op:%s(%s), index:%zu",
inner_dump_info.op->GetName().c_str(), inner_dump_info.op->GetType().c_str(), i);
GELOGE(PARAM_INVALID, "Get output size failed.");
GELOGE(PARAM_INVALID, "[Get][OutputSize] failed in %s, index:%zu", inner_dump_info.op->GetName().c_str(), i);
return PARAM_INVALID;
}
GELOGI("Get output size of l1_fusion_dump is %ld", output_size);
@@ -364,7 +368,8 @@ Status DataDumper::DumpOutputWithTask(const InnerDumpInfo &inner_dump_info, tool
} else {
const auto input_size = inner_dump_info.op->GetInputsSize();
auto addr = inner_dump_info.args + (i + input_size) * kAddrLen;
GE_CHK_STATUS_RET(GenerateOutput(output, output_descs, addr, i), "Generate output failed");
GE_CHK_STATUS_RET(GenerateOutput(output, output_descs, addr, i),
"[Generate][Output] failed for %s, index:%zu", inner_dump_info.op->GetName().c_str(), i);
task.mutable_output()->Add(std::move(output));
}
}
@@ -383,11 +388,11 @@ Status DataDumper::DumpOutput(const InnerDumpInfo &inner_dump_info, toolkit::aic
auto output_tensor = inner_dump_info.op->GetOutputDescPtr(inner_dump_info.output_anchor_index);
const std::vector<void *> output_addrs = ModelUtils::GetOutputDataAddrs(*runtime_param_, inner_dump_info.op);
if (output_tensor == nullptr) {
REPORT_INNER_ERROR("E19999", "output_desc tensor is nullptr in op:%s(%s), index:%u, "
"check invalid",
REPORT_INNER_ERROR("E19999", "output_desc tensor is nullptr in op:%s(%s), index:%u, check invalid",
inner_dump_info.op->GetName().c_str(), inner_dump_info.op->GetType().c_str(),
inner_dump_info.output_anchor_index);
GELOGE(PARAM_INVALID, "output_tensor is null, index: %d, size: %zu.", inner_dump_info.output_anchor_index,
GELOGE(PARAM_INVALID, "[Get][OutputDescPtr] output_tensor is null in op:%s, index:%d, size:%zu.",
inner_dump_info.op->GetName().c_str(), inner_dump_info.output_anchor_index,
inner_dump_info.op->GetOutputsSize());
return PARAM_INVALID;
}
@@ -413,7 +418,9 @@ Status DataDumper::DumpOutput(const InnerDumpInfo &inner_dump_info, toolkit::aic
REPORT_INNER_ERROR("E19999", "output_anchor_index:%u >= output addr size:%zu in op:%s(%s), "
"check invalid", inner_dump_info.output_anchor_index, output_addrs.size(),
inner_dump_info.op->GetName().c_str(), inner_dump_info.op->GetType().c_str());
GELOGE(FAILED, "Index is out of range.");
GELOGE(FAILED, "[Check][Param] output_anchor_index:%u >= output addr size:%zu in op:%s(%s)",
inner_dump_info.output_anchor_index, output_addrs.size(),
inner_dump_info.op->GetName().c_str(), inner_dump_info.op->GetType().c_str());
return FAILED;
}
auto data_addr = inner_dump_info.args + kAddrLen * static_cast<uint32_t>(inner_dump_info.input_anchor_index);
@@ -440,7 +447,7 @@ Status DataDumper::GenerateInput(toolkit::aicpu::dump::Input &input, const OpDes
GELOGI("Get aipp input size according to attr is %ld", input_size);
} else if (TensorUtils::GetTensorSizeInBytes(tensor_descs.at(index), input_size) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get tensor size fail");
GELOGE(PARAM_INVALID, "Get input size filed");
GELOGE(PARAM_INVALID, "[Get][TensorSize] failed");
return PARAM_INVALID;
}
GELOGD("Get input size in dump is %ld", input_size);
@@ -456,34 +463,37 @@ Status DataDumper::DumpRefInput(const DataDumper::InnerDumpInfo &inner_dump_info
size_t index;
// parser and find which node's input or output tensor desc is chosen for dump info
if (!ParseNameIndex(node_name_index, dump_op_name, input_or_output, index)) {
GELOGE(PARAM_INVALID, "Op [%s] input desc[%zu] with invalid ATTR_DATA_DUMP_REF attr[%s].",
GELOGE(PARAM_INVALID, "[Call][ParseNameIndex] Op [%s] input desc[%zu] with invalid ATTR_DATA_DUMP_REF attr[%s].",
inner_dump_info.op->GetName().c_str(), i, node_name_index.c_str());
return PARAM_INVALID;
}
GE_CHECK_NOTNULL(compute_graph_);
auto replace_node = compute_graph_->FindNode(dump_op_name);
GE_RT_PARAM_INVALID_WITH_LOG_IF_TRUE(replace_node == nullptr,
"Op [%s] input desc[%zu] with invalid ATTR_DATA_DUMP_REF attr[%s],"
" cannot find redirect node[%s].",
"[Check][Param] Op [%s] input desc[%zu] with invalid ATTR_DATA_DUMP_REF "
"attr[%s], cannot find redirect node[%s].",
inner_dump_info.op->GetName().c_str(), i, node_name_index.c_str(),
dump_op_name.c_str());
auto replace_opdesc = replace_node->GetOpDesc();
GE_CHECK_NOTNULL(replace_opdesc);
auto iter = ref_info_.find(replace_opdesc);
GE_RT_PARAM_INVALID_WITH_LOG_IF_TRUE(iter == ref_info_.end(),
"Op [%s] input desc[%zu] cannot find any saved redirect node[%s]'s info.",
"[Check][Param] Op [%s] input desc[%zu] cannot find "
"any saved redirect node[%s]'s info.",
inner_dump_info.op->GetName().c_str(), i, replace_opdesc->GetName().c_str());
GE_CHECK_NOTNULL(iter->second);
auto addr = reinterpret_cast<uintptr_t>(iter->second);
if (input_or_output == kDumpInput) {
const auto &replace_input_descs = replace_opdesc->GetAllInputsDesc();
addr += kAddrLen * index;
GE_CHK_STATUS_RET(GenerateInput(input, replace_input_descs, addr, index), "Generate input failed");
GE_CHK_STATUS_RET(GenerateInput(input, replace_input_descs, addr, index),
"[Generate][Input] failed for %s, index:%zu", inner_dump_info.op->GetName().c_str(), index);
} else if (input_or_output == kDumpOutput) {
const auto &replace_output_descs = replace_opdesc->GetAllOutputsDesc();
const auto replace_input_size = replace_opdesc->GetAllInputsDesc().size();
addr += (index + replace_input_size) * kAddrLen;
GE_CHK_STATUS_RET(GenerateInput(input, replace_output_descs, addr, index), "Generate input failed");
GE_CHK_STATUS_RET(GenerateInput(input, replace_output_descs, addr, index),
"[Generate][Input] failed for %s, index:%zu", inner_dump_info.op->GetName().c_str(), index);
}
GELOGD("Op [%s] input desc[%zu] dump info is replaced by node[%s] [%s] tensor_desc [%zu]",
inner_dump_info.op->GetName().c_str(), i, dump_op_name.c_str(), input_or_output.c_str(), index);
@@ -498,14 +508,14 @@ Status DataDumper::DumpInput(const InnerDumpInfo &inner_dump_info, toolkit::aicp
REPORT_INNER_ERROR("E19999", "input_desc size:%zu != input addr size:%zu in op:%s(%s)",
input_descs.size(), input_addrs.size(),
inner_dump_info.op->GetName().c_str(), inner_dump_info.op->GetType().c_str());
GELOGE(PARAM_INVALID, "Invalid input desc addrs size %zu, op %s has %zu input desc.", input_addrs.size(),
inner_dump_info.op->GetName().c_str(), input_descs.size());
GELOGE(PARAM_INVALID, "[Check][Param] Invalid input desc addrs size %zu, op %s has %zu input desc.",
input_addrs.size(), inner_dump_info.op->GetName().c_str(), input_descs.size());
return PARAM_INVALID;
}
std::vector<int64_t> v_memory_type;
bool has_mem_type_attr = ge::AttrUtils::GetListInt(inner_dump_info.op, ATTR_NAME_INPUT_MEM_TYPE_LIST, v_memory_type);
GE_RT_PARAM_INVALID_WITH_LOG_IF_TRUE(has_mem_type_attr && (v_memory_type.size() != input_descs.size()),
"DumpInput[%s], input size[%zu], input memory type size[%zu]",
"[Check][Param] DumpInput[%s], input size[%zu], input memory type size[%zu]",
inner_dump_info.op->GetName().c_str(), input_descs.size(), v_memory_type.size());

for (size_t i = 0; i < input_descs.size(); ++i) {
@@ -513,7 +523,8 @@ Status DataDumper::DumpInput(const InnerDumpInfo &inner_dump_info, toolkit::aicp
std::string node_name_index;
// check dump input tensor desc is redirected by attr ATTR_DATA_DUMP_REF
if (AttrUtils::GetStr(&input_descs.at(i), ATTR_DATA_DUMP_REF, node_name_index)) {
GE_CHK_STATUS_RET(DumpRefInput(inner_dump_info, input, i, node_name_index), "DumpRefInput failed");
GE_CHK_STATUS_RET(DumpRefInput(inner_dump_info, input, i, node_name_index),
"[Dump][RefInput] failed, node name index:%s", node_name_index.c_str());
task.mutable_input()->Add(std::move(input));
// normal dump without attr
} else {
@@ -525,14 +536,16 @@ Status DataDumper::DumpInput(const InnerDumpInfo &inner_dump_info, toolkit::aicp
} else if (TensorUtils::GetTensorSizeInBytes(input_descs.at(i), input_size) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get input tensor size fail in op:%s(%s), index:%zu",
inner_dump_info.op->GetName().c_str(), inner_dump_info.op->GetType().c_str(), i);
GELOGE(PARAM_INVALID, "Get input size failed.");
GELOGE(PARAM_INVALID, "[Get][InputTensorSize] fail in op:%s(%s), index:%zu",
inner_dump_info.op->GetName().c_str(), inner_dump_info.op->GetType().c_str(), i);
return PARAM_INVALID;
}
GELOGI("Get input size of l1_fusion_dump is %ld", input_size);
GenerateOpBuffer(input_size, task);
} else {
auto addr = inner_dump_info.args + kAddrLen * i;
GE_CHK_STATUS_RET(GenerateInput(input, input_descs, addr, i), "Generate input failed");
GE_CHK_STATUS_RET(GenerateInput(input, input_descs, addr, i),
"[Generate][Input] failed for op:%s, index:%zu", inner_dump_info.op->GetName().c_str(), i);
task.mutable_input()->Add(std::move(input));
}
}
@@ -554,7 +567,7 @@ Status DataDumper::ExecuteLoadDumpInfo(toolkit::aicpu::dump::OpMappingInfo &op_m
bool ret = op_mapping_info.SerializeToString(&proto_str);
if (!ret || proto_size == 0) {
REPORT_INNER_ERROR("E19999", "Serialize proto to string fail");
GELOGE(PARAM_INVALID, "Protobuf SerializeToString failed, proto size %zu.", proto_size);
GELOGE(PARAM_INVALID, "[Call][SerializeToString] failed, proto size %zu.", proto_size);
return PARAM_INVALID;
}

@@ -565,25 +578,23 @@ Status DataDumper::ExecuteLoadDumpInfo(toolkit::aicpu::dump::OpMappingInfo &op_m

rtError_t rt_ret = rtMalloc(&dev_mem_load_, proto_size, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X",
proto_size, rt_ret);
GELOGE(RT_FAILED, "Call rtMalloc failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X", proto_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%zu, ret:0x%X", proto_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "load dump information.", proto_size)

rt_ret = rtMemcpy(dev_mem_load_, proto_size, proto_str.c_str(), proto_size, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X",
proto_size, rt_ret);
GELOGE(RT_FAILED, "Call rtMemcpy failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X", proto_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%zu, ret:0x%X", proto_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

rt_ret = rtDatadumpInfoLoad(dev_mem_load_, proto_size);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtDatadumpInfoLoad failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "Call rtDatadumpInfoLoad failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtDatadumpInfoLoad failed, length:%zu, ret:0x%X", proto_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtDatadumpInfoLoad] failed, length:%zu, ret:0x%X", proto_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -598,7 +609,7 @@ Status DataDumper::ExecuteUnLoadDumpInfo(toolkit::aicpu::dump::OpMappingInfo &op
bool ret = op_mapping_info.SerializeToString(&proto_str);
if (!ret || proto_size == 0) {
REPORT_INNER_ERROR("E19999", "Serialize proto to string fail");
GELOGE(PARAM_INVALID, "Protobuf SerializeToString failed, proto size %zu.", proto_size);
GELOGE(PARAM_INVALID, "[Call][SerializeToString] failed, proto size %zu.", proto_size);
return PARAM_INVALID;
}

@@ -609,25 +620,23 @@ Status DataDumper::ExecuteUnLoadDumpInfo(toolkit::aicpu::dump::OpMappingInfo &op

rtError_t rt_ret = rtMalloc(&dev_mem_unload_, proto_size, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X",
proto_size, rt_ret);
GELOGE(RT_FAILED, "Call rtMalloc failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X", proto_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%zu, ret:0x%X", proto_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "unload dump information.", proto_size)

rt_ret = rtMemcpy(dev_mem_unload_, proto_size, proto_str.c_str(), proto_size, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X",
proto_size, rt_ret);
GELOGE(RT_FAILED, "Call rtMemcpy failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X", proto_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%zu, ret:0x%X", proto_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

rt_ret = rtDatadumpInfoLoad(dev_mem_unload_, proto_size);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtDatadumpInfoLoad failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "Call rtDatadumpInfoLoad failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtDatadumpInfoLoad failed, length:%zu, ret:0x%X", proto_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtDatadumpInfoLoad] failed, length:%zu, ret:0x%X", proto_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
load_flag_ = false;
@@ -654,7 +663,7 @@ Status DataDumper::LoadDumpInfo() {
SetOpMappingLoopAddr(global_step_, loop_per_iter_, loop_cond_, op_mapping_info);
auto ret = BuildTaskInfo(op_mapping_info);
if (ret != SUCCESS) {
GELOGE(ret, "Build task info failed");
GELOGE(ret, "[Build][TaskInfo] failed, ret:%u, path:%s", ret, dump_path.c_str());
return ret;
}

@@ -663,9 +672,9 @@ Status DataDumper::LoadDumpInfo() {
SetOpDebugIdToAicpu(op_debug_task_id_, op_debug_stream_id_, op_debug_addr_, op_mapping_info);

if (!op_list_.empty() || is_op_debug_ || is_end_graph_) {
auto ret = ExecuteLoadDumpInfo(op_mapping_info);
ret = ExecuteLoadDumpInfo(op_mapping_info);
if (ret != SUCCESS) {
GELOGE(ret, "Execute load dump info failed");
GELOGE(ret, "[Execute][LoadDumpInfo] failed, ret:%u", ret);
return ret;
}
}
@@ -686,7 +695,7 @@ Status DataDumper::BuildTaskInfo(toolkit::aicpu::dump::OpMappingInfo &op_mapping
if (dump_properties_.GetDumpMode() == kDumpOutput) {
Status ret = DumpOutput(op_iter, task);
if (ret != SUCCESS) {
GELOGE(ret, "Dump output failed");
GELOGE(ret, "[Dump][Output] failed, ret:%u, op:%s", ret, op_desc->GetName().c_str());
return ret;
}
op_mapping_info.mutable_task()->Add(std::move(task));
@@ -696,7 +705,7 @@ Status DataDumper::BuildTaskInfo(toolkit::aicpu::dump::OpMappingInfo &op_mapping
if (op_iter.is_task) {
Status ret = DumpInput(op_iter, task);
if (ret != SUCCESS) {
GELOGE(ret, "Dump input failed");
GELOGE(ret, "[Dump][Input] failed, ret:%u, op:%s", ret, op_desc->GetName().c_str());
return ret;
}
}
@@ -706,13 +715,13 @@ Status DataDumper::BuildTaskInfo(toolkit::aicpu::dump::OpMappingInfo &op_mapping
if (dump_properties_.GetDumpMode() == kDumpAll || is_op_debug_) {
auto ret = DumpOutput(op_iter, task);
if (ret != SUCCESS) {
GELOGE(ret, "Dump output failed when in dumping all");
GELOGE(ret, "[Dump][Output] failed when in dumping all, ret:%u, op:%s", ret, op_desc->GetName().c_str());
return ret;
}
if (op_iter.is_task) {
ret = DumpInput(op_iter, task);
if (ret != SUCCESS) {
GELOGE(ret, "Dump input failed when in dumping all");
GELOGE(ret, "[Dump][Input] failed when in dumping all, ret:%u, op:%s", ret, op_desc->GetName().c_str());
return ret;
}
}
@@ -795,7 +804,7 @@ Status DataDumper::UnloadDumpInfo() {
}
auto ret = ExecuteUnLoadDumpInfo(op_mapping_info);
if (ret != SUCCESS) {
GELOGE(ret, "Execute unload dump info failed");
GELOGE(ret, "[Execute][UnLoadDumpInfo] failed, ret:%d", ret);
return ret;
}
return SUCCESS;


+ 1
- 1
ge/graph/load/model_manager/data_inputer.cc View File

@@ -24,7 +24,7 @@

namespace ge {
domi::Status InputDataWrapper::Init(const InputData &input, const OutputData &output) {
GE_CHK_BOOL_RET_STATUS(!is_init, domi::INTERNAL_ERROR, "InputDataWrapper is re-initialized");
GE_CHK_BOOL_RET_STATUS(!is_init, domi::INTERNAL_ERROR, "[Check][Param] InputDataWrapper is re-initialized");

input_ = input;
output_ = output;


+ 524
- 396
ge/graph/load/model_manager/davinci_model.cc
File diff suppressed because it is too large
View File


+ 11
- 8
ge/graph/load/model_manager/davinci_model.h View File

@@ -248,8 +248,6 @@ class DavinciModel {
// get total mem size
size_t TotalMemSize() const { return runtime_param_.mem_size; }

const map<uint32_t, MemInfo> &P2PMemInfos() const { return runtime_param_.memory_infos; }

// model name
string Name() const { return name_; }

@@ -361,6 +359,8 @@ class DavinciModel {

void GetCurShape(vector<int64_t> &batch_info, int32_t &dynamic_type) const;

Status GetOpAttr(const std::string &op_name, const std::string &attr_name, std::string &attr_value) const;

void GetModelAttr(vector<string> &dynamic_output_shape_info) const;

///
@@ -474,6 +474,8 @@ class DavinciModel {

int64_t GetLoadEndTime() { return load_end_time_; }

void SaveSpecifyAttrValues(const OpDescPtr &op_desc);

Status ReportProfilingData();

void SaveDumpOpInfo(const RuntimeParam &model_param, const OpDescPtr &op, uint32_t task_id, uint32_t stream_id) {
@@ -582,10 +584,8 @@ class DavinciModel {
// memory address of model
uintptr_t fixed_mem_base_; // Initial of mem_base_, keep forever.
uint8_t *mem_base_;
uint8_t *p2p_mem_base_;
bool is_inner_mem_base_;
bool is_inner_weight_base_;
bool is_inner_p2p_mem_base_;
// input data manager
DataInputer *data_inputer_;
int64_t load_begin_time_;
@@ -635,7 +635,7 @@ class DavinciModel {
Status UpdateIoTaskArgs(const map<uint32_t, ZeroCopyOffset> &data_info, bool is_input,
const vector<DataBuffer> &blobs, bool is_dynamic, const string &batch_label);

Status CopyInputData(const InputData &input_data, bool device_data = false);
Status CopyInputData(const InputData &input_data);

Status CopyOutputData(uint32_t data_id, OutputData &output_data, rtMemcpyKind_t kind);

@@ -664,13 +664,13 @@ class DavinciModel {

uint8_t *MallocWeightsMem(size_t weights_size);

uint8_t *MallocP2PMem(size_t p2p_data_size);
Status MallocExMem();

void FreeFeatureMapMem();

void FreeWeightsMem();

void FreeP2PMem();
void FreeExMem();

void ReleaseTask();

@@ -880,7 +880,7 @@ class DavinciModel {
Status SinkTimeProfile(const InputData &current_data);

Status InitOutputTensorInfo(const OpDescPtr &op_desc);
Status GenOutputTensorInfo(OutputData *output_data, vector<OutputTensorInfo> &outputs);
Status GenOutputTensorInfo(OutputData *output_data, vector<ge::Tensor> &outputs);

Status InitInputDescInfo(const OpDescPtr &op_desc);
Status InitOutputDescInfo(const OpDescPtr &op_desc, const vector<string> &out_node_name);
@@ -1096,6 +1096,9 @@ class DavinciModel {

// known shape node for dump
void *known_shape_global_step_;

// op name to attrs mapping
std::map<std::string, std::map<std::string, std::vector<std::string>>> op_name_to_attrs_;
};
} // namespace ge
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_DAVINCI_MODEL_H_

+ 115
- 35
ge/graph/load/model_manager/model_manager.cc View File

@@ -27,6 +27,7 @@
#include "graph/load/model_manager/davinci_model.h"
#include "model/ge_root_model.h"
#include "common/formats/utils/formats_trans_utils.h"
#include "toolchain/adx_datadump_server.h"

namespace ge {
thread_local uint32_t device_count = 0;
@@ -48,6 +49,7 @@ const int kTimeSpecNano = 1000000000;
const int kTimeSpecMiro = 1000000;
const int kOpNameMaxSize = 100;
const uint64_t kInferSessionId = 0;
const int32_t kDumpStatus = 0;
#pragma pack(push, 1)
struct CustAicpuSoBuf {
uint64_t kernelSoBuf;
@@ -321,6 +323,58 @@ bool ModelManager::IsNeedHybridLoad(ge::GeRootModel &ge_root_model) {
(void)AttrUtils::GetBool(root_graph, ATTR_NAME_DYNAMIC_SHAPE_PARTITIONED, is_dsp_partitioned_graph);
return is_shape_unknown || is_dsp_partitioned_graph || GetContext().GetHostExecFlag();
}

bool ModelManager::IsDumpSeverInited(uint64_t session_id) {
auto it = session_id_to_dump_server_init_flag_.find(session_id);
return it != session_id_to_dump_server_init_flag_.end() && it->second;
}

Status ModelManager::AddDumpProperties(uint64_t session_id, const DumpProperties &dump_properties) {
if (!IsDumpSeverInited(session_id)) {
if (dump_properties.IsDumpOpen() || dump_properties.IsOpDebugOpen()) {
GE_IF_BOOL_EXEC(AdxDataDumpServerInit() != kDumpStatus,
GELOGE(PARAM_INVALID, "[Init][AdxDataDumpServer] failed, session_id:%lu.", session_id);
return PARAM_INVALID)
GELOGI("Init adx data dump server success");
session_id_to_dump_server_init_flag_[session_id] = true;
}
}
DumpManager::GetInstance().AddDumpProperties(session_id, dump_properties);
return SUCCESS;
}

Status ModelManager::InitDumPropertiesWithNewSessionId(uint64_t session_id) {
DumpProperties dump_properties;
dump_properties.InitByOptions();
GE_CHK_STATUS_RET(AddDumpProperties(session_id, dump_properties), "[Add][DumpProperties] failed.");
return SUCCESS;
}

Status ModelManager::UpdateSessionId(uint32_t model_id, GeModelPtr ge_model,
std::shared_ptr<DavinciModel> &davinci_model, uint64_t &session_id) {
uint64_t new_session_id;
Status ret = GenSessionId(new_session_id);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return ret, "Generate session_id for infer failed.");
ret = davinci_model->UpdateSessionId(new_session_id);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return ret, "Update session_id for infer failed.");
ge_model->InsertSessionMap(model_id, new_session_id);
GELOGD("Update new session id: %lu.", new_session_id);
session_id = new_session_id;
return SUCCESS;
}

bool ModelManager::HasVarNode(ComputeGraphPtr &compute_graph) const {
for (ge::NodePtr &node : compute_graph->GetAllNodes()) {
if (node == nullptr) {
continue;
}
if (node->GetType() == VARIABLE) {
return true;
}
}
return false;
}

///
/// @ingroup domi_ome
/// @brief load model online
@@ -347,10 +401,6 @@ Status ModelManager::LoadModelOnline(uint32_t &model_id, const shared_ptr<ge::Ge
davinci_model->SetId(model_id);
davinci_model->SetDeviceId(GetContext().DeviceId());

const DumpProperties &dump_properties = DumpManager::GetInstance().GetDumpProperties(GetContext().SessionId());
davinci_model->SetDumpProperties(dump_properties);
dump_properties_ = dump_properties;

auto root_graph = ge_root_model->GetRootGraph();
GE_CHECK_NOTNULL(root_graph);
string root_model_name = root_graph->GetName();
@@ -364,15 +414,23 @@ Status ModelManager::LoadModelOnline(uint32_t &model_id, const shared_ptr<ge::Ge
/// In multi-threaded inference, using the same session_id among multiple threads may cause some threads to fail.
/// These session_ids come from the same model, so the values of session_id are the same.
/// Update session_id for infer in load model to avoid the same session_id.
if (!ge_root_model->GetTrainFlag()) {
uint64_t new_session_id;
ret = GenSessionId(new_session_id);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return ret, "Generate session_id for infer failed.");
ret = davinci_model->UpdateSessionId(new_session_id);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return ret, "Update session_id for infer failed.");
ge_model->InsertSessionMap(model_id, new_session_id);
GELOGD("Update new session id: %lu.", new_session_id);
uint64_t session_id = GetContext().SessionId();
// Inference graph with variable node is not support for multi-threads scenario
if (!ge_root_model->GetTrainFlag() && !HasVarNode(root_graph)) {
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(UpdateSessionId(model_id, ge_model, davinci_model, session_id) != SUCCESS,
return ret,
"UpdateSessionId failed.");
GE_CHK_RT_RET(rtSetDevice(GetContext().DeviceId()));
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(InitDumPropertiesWithNewSessionId(session_id) != SUCCESS,
GE_CHK_RT(rtDeviceReset(static_cast<int32_t>(GetContext().DeviceId())));
return ret,
"Init DumProperties with new session_id failed.");
}

const DumpProperties &dump_properties = DumpManager::GetInstance().GetDumpProperties(session_id);
davinci_model->SetDumpProperties(dump_properties);
dump_properties_ = dump_properties;

GE_TIMESTAMP_START(Init);
GE_IF_BOOL_EXEC(SUCCESS != (ret = davinci_model->Init()), GELOGW("DavinciInit failed."); break;);
GE_TIMESTAMP_END(Init, "GraphLoader::ModelInit");
@@ -398,26 +456,32 @@ void ModelManager::InsertModel(uint32_t model_id, shared_ptr<hybrid::HybridDavin
}

Status ModelManager::DeleteModel(uint32_t id) {
std::lock_guard<std::recursive_mutex> lock(map_mutex_);
// These two pointers are used to unbind erase() and model destruction process.
std::shared_ptr<DavinciModel> tmp_model;
std::shared_ptr<hybrid::HybridDavinciModel> tmp_hybrid_model;
{
std::lock_guard<std::recursive_mutex> lock(map_mutex_);

auto it = model_map_.find(id);
auto hybrid_model_it = hybrid_model_map_.find(id);
if (it != model_map_.end()) {
uint64_t session_id = it->second->GetSessionId();
std::string model_key = std::to_string(session_id) + "_" + std::to_string(id) + "_" +
std::to_string(it->second->SubModelId());
auto iter_aicpu_kernel = model_aicpu_kernel_.find(model_key);
if (iter_aicpu_kernel != model_aicpu_kernel_.end()) {
(void)model_aicpu_kernel_.erase(iter_aicpu_kernel);
auto it = model_map_.find(id);
auto hybrid_model_it = hybrid_model_map_.find(id);
if (it != model_map_.end()) {
uint64_t session_id = it->second->GetSessionId();
std::string model_key = std::to_string(session_id) + "_" + std::to_string(id) + "_" +
std::to_string(it->second->SubModelId());
auto iter_aicpu_kernel = model_aicpu_kernel_.find(model_key);
if (iter_aicpu_kernel != model_aicpu_kernel_.end()) {
(void)model_aicpu_kernel_.erase(iter_aicpu_kernel);
}
tmp_model = it->second;
(void)model_map_.erase(it);
} else if (hybrid_model_it != hybrid_model_map_.end()) {
tmp_hybrid_model = hybrid_model_it->second;
(void)hybrid_model_map_.erase(hybrid_model_it);
} else {
REPORT_INNER_ERROR("E19999", "model_id:%u not exist in model_map, check invalid", id);
GELOGE(ACL_ERROR_GE_EXEC_MODEL_ID_INVALID, "model id %u does not exists.", id);
return ACL_ERROR_GE_EXEC_MODEL_ID_INVALID;
}
(void)model_map_.erase(it);
} else if (hybrid_model_it != hybrid_model_map_.end()) {
(void)hybrid_model_map_.erase(hybrid_model_it);
} else {
REPORT_INNER_ERROR("E19999", "model_id:%u not exist in model_map, check invalid",
id);
GELOGE(ACL_ERROR_GE_EXEC_MODEL_ID_INVALID, "model id %u does not exists.", id);
return ACL_ERROR_GE_EXEC_MODEL_ID_INVALID;
}

return SUCCESS;
@@ -542,7 +606,7 @@ Status ModelManager::GetCurDynamicDims(const vector<vector<int64_t>> &user_real_
/// @brief load Input and output TensorInfo for Model
/// @return Status run result
///
Status ModelManager::DataInputTensor(uint32_t model_id, const std::vector<InputTensorInfo> &inputs) {
Status ModelManager::DataInputTensor(uint32_t model_id, const std::vector<ge::Tensor> &inputs) {
std::shared_ptr<DavinciModel> model = GetModel(model_id);
auto hybrid_model = GetHybridModel(model_id);
if (hybrid_model == nullptr) {
@@ -556,9 +620,11 @@ Status ModelManager::DataInputTensor(uint32_t model_id, const std::vector<InputT
input_data.index = 0;
for (size_t i = 0; i < inputs.size(); ++i) {
DataBuffer data;
data.data = inputs[i].data;
data.length = inputs[i].length;
input_data.shapes.emplace_back(inputs[i].dims);
const TensorDesc &tensor_desc = inputs[i].GetTensorDesc();
data.data = reinterpret_cast<void *>(const_cast<uint8_t *>(inputs[i].GetData()));
data.length = inputs[i].GetSize();
data.placement = static_cast<uint32_t>(tensor_desc.GetPlacement());
input_data.shapes.emplace_back(tensor_desc.GetShape().GetDims());
input_data.blobs.push_back(data);
}
if (!GetLocalOmgContext().user_input_dims.empty() && GetLocalOmgContext().need_multi_batch) {
@@ -608,7 +674,6 @@ Status ModelManager::DataInputTensor(uint32_t model_id, const std::vector<InputT

return SUCCESS;
}

///
/// @ingroup domi_ome
/// @brief create model thread, start to execute model
@@ -1045,6 +1110,21 @@ Status ModelManager::GetCurShape(const uint32_t model_id, std::vector<int64_t> &
return SUCCESS;
}

Status ModelManager::GetOpAttr(uint32_t model_id, const std::string &op_name, const std::string &attr_name,
std::string &attr_value) {
auto davinci_model = GetModel(model_id);
if (davinci_model != nullptr) {
return davinci_model->GetOpAttr(op_name, attr_name, attr_value);
}
std::shared_ptr<hybrid::HybridDavinciModel> hybrid_davinci_model = GetHybridModel(model_id);
if (hybrid_davinci_model != nullptr) {
return hybrid_davinci_model->GetOpAttr(op_name, attr_name, attr_value);
}
GELOGE(ACL_ERROR_GE_EXEC_MODEL_ID_INVALID, "[Get][Model]Get model failed, invalid model id:%u.", model_id);
REPORT_INNER_ERROR("E19999", "Get model failed, invalid model id:%u.", model_id);
return ACL_ERROR_GE_EXEC_MODEL_ID_INVALID;
}

Status ModelManager::GetModelAttr(uint32_t model_id, std::vector<string> &dynamic_output_shape_info) {
std::shared_ptr<hybrid::HybridDavinciModel> hybrid_davinci_model = GetHybridModel(model_id);
if (hybrid_davinci_model != nullptr) {


+ 15
- 1
ge/graph/load/model_manager/model_manager.h View File

@@ -122,7 +122,7 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelManager {
///
ge::Status DataInput(const InputData &input_data, OutputData &output_data);

ge::Status DataInputTensor(uint32_t model_id, const std::vector<InputTensorInfo> &inputs);
ge::Status DataInputTensor(uint32_t model_id, const std::vector<ge::Tensor> &inputs);

///
/// @ingroup domi_ome
@@ -246,6 +246,9 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelManager {

ge::Status GetCurShape(const uint32_t model_id, std::vector<int64_t> &batch_info, int32_t &dynamic_type);

ge::Status GetOpAttr(uint32_t model_id, const std::string &op_name, const std::string &attr_name,
std::string &attr_value);

ge::Status GetModelAttr(uint32_t model_id, std::vector<string> &dynamic_output_shape_info);

ge::Status SetDynamicSize(uint32_t model_id, const std::vector<uint64_t> &batch_num, int32_t dynamic_type);
@@ -342,6 +345,16 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelManager {

void GenModelId(uint32_t *id);

Status InitDumPropertiesWithNewSessionId(uint64_t session_id);

bool IsDumpSeverInited(uint64_t session_id);

Status AddDumpProperties(uint64_t session_id, const DumpProperties &dump_properties);

Status UpdateSessionId(uint32_t model_id, GeModelPtr ge_model,
std::shared_ptr<DavinciModel> &davinci_model, uint64_t &session_id);

bool HasVarNode(ComputeGraphPtr &compute_graph) const;

std::map<uint32_t, std::shared_ptr<DavinciModel>> model_map_;
std::map<uint32_t, std::shared_ptr<hybrid::HybridDavinciModel>> hybrid_model_map_;
@@ -358,6 +371,7 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelManager {

static DumpProperties dump_properties_;
bool dump_exception_flag_ = false;
std::map<uint64_t, bool> session_id_to_dump_server_init_flag_;
};
} // namespace ge



+ 16
- 3
ge/graph/load/model_manager/model_utils.cc View File

@@ -21,6 +21,7 @@
#include "graph/utils/tensor_utils.h"
#include "graph/manager/graph_var_manager.h"
#include "graph/types.h"
#include "graph/build/memory/block_mem_assigner.h"

#define VALIDATE_MEM_RANGE(OP, SIZE, OFFSET) \
do { \
@@ -514,10 +515,16 @@ vector<void *> ModelUtils::GetWorkspaceDataAddrs(const RuntimeParam &model_param
bool has_mem_type_attr = ge::AttrUtils::GetListInt(op_desc, TVM_ATTR_NAME_WORKSPACE_TYPE, v_memory_type);
bool has_mem_type_workspace =
ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_WORKSPACE_TYPE_LIST, workspace_memory_type);

vector<int32_t> workspace_no_reuse_scope;
bool has_workspace_no_reuse_scope =
ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_WORKSPACE_MEMORY_NO_REUSE_SCOPE, workspace_no_reuse_scope);

for (size_t i = 0; i < v_workspace_bytes.size(); ++i) {
// Temporary solution, the aicpu workspace of multiple images cannot be shared.
if (has_workspace_reuse && i < workspace_reuse_flag.size() && !workspace_reuse_flag[i] &&
!model_param.is_single_op) {
bool aicpu_work_space = (has_workspace_reuse && i < workspace_reuse_flag.size() && !workspace_reuse_flag[i] &&
!model_param.is_single_op);
if (aicpu_work_space) {
void *mem_addr = model_param.aicpu_mem_mall->Acquire(v_workspace_offset[i], v_workspace_bytes[i]);
v_workspace_data_addr.push_back(mem_addr);
GELOGI(
@@ -548,7 +555,13 @@ vector<void *> ModelUtils::GetWorkspaceDataAddrs(const RuntimeParam &model_param
model_param.graph_id, op_desc->GetName().c_str(), i, v_workspace_offset[i], v_workspace_bytes[i]);
} else {
VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, v_workspace_offset[i]);
uint8_t *mem_addr = model_param.mem_base + v_workspace_offset[i];
uint8_t *mem_addr = nullptr;
bool session_scope_memory = (has_workspace_no_reuse_scope) && (i < workspace_no_reuse_scope.size());
if (session_scope_memory) {
mem_addr = model_param.memory_infos.at(kSessionScopeMemory | RT_MEMORY_HBM).memory_base + v_workspace_offset[i];
} else {
mem_addr = model_param.mem_base + v_workspace_offset[i];
}
v_workspace_data_addr.push_back(mem_addr);
GELOGI("[IMAS]GetWorkspaceDataAddrs graph_%u type[F] name[%s] workspace[%zu] offset[%ld] bytes[%ld] memaddr[%p]",
model_param.graph_id, op_desc->GetName().c_str(), i, v_workspace_offset[i], v_workspace_bytes[i],


+ 6
- 7
ge/graph/load/model_manager/task_info/end_graph_task_info.cc View File

@@ -28,13 +28,13 @@ Status EndGraphTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
GELOGI("InitEndGraphTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model is null!");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model is null!");
return PARAM_INVALID;
}
davinci_model_ = davinci_model;
Status ret = SetStream(task_def.stream_id(), davinci_model->GetStreamList());
if (ret != SUCCESS) {
GELOGE(ret, "SetStream fail, stream_id:%u", task_def.stream_id());
GELOGE(ret, "[Set][Stream] fail, stream_id:%u", task_def.stream_id());
return ret;
}

@@ -51,7 +51,7 @@ Status EndGraphTaskInfo::Distribute() {
rtError_t rt_ret = rtEndGraphEx(model_, stream_, kDumpFlag);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEndGraphEx failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "Call rtEndGraphEx failed, ret: 0x%x", rt_ret);
GELOGE(RT_FAILED, "[Call][RtEndGraphEx] failed, ret:0x%x", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
} else {
@@ -59,7 +59,7 @@ Status EndGraphTaskInfo::Distribute() {
rtError_t rt_ret = rtEndGraph(model_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEndGraph failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "Call rtEndGraph failed, ret: 0x%x", rt_ret);
GELOGE(RT_FAILED, "[Call][RtEndGraph] failed, ret:0x%x", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
}
@@ -68,9 +68,8 @@ Status EndGraphTaskInfo::Distribute() {
uint32_t stream_id = 0;
rtError_t rt_ret = rtModelGetTaskId(davinci_model_->GetRtModelHandle(), &task_id, &stream_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtModelGetTaskId failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtModelGetTaskId failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtModelGetTaskId] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
task_id_ = task_id;


+ 4
- 5
ge/graph/load/model_manager/task_info/event_record_task_info.cc View File

@@ -24,7 +24,7 @@ Status EventRecordTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
GELOGI("EventRecordTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model is null!");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model is null!");
return PARAM_INVALID;
}

@@ -37,7 +37,7 @@ Status EventRecordTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
if (task_def.event_id() >= eventList.size()) {
REPORT_INNER_ERROR("E19999", "Task event_id:%u > model event size:%zu, check invalid",
task_def.event_id(), eventList.size());
GELOGE(INTERNAL_ERROR, "event list size:%zu, cur:%u!", eventList.size(), task_def.event_id());
GELOGE(INTERNAL_ERROR, "[Check][Param] event list size:%zu, cur:%u!", eventList.size(), task_def.event_id());
return INTERNAL_ERROR;
}

@@ -50,9 +50,8 @@ Status EventRecordTaskInfo::Distribute() {
GELOGI("EventRecordTaskInfo Distribute Start.");
rtError_t rt_ret = rtEventRecord(event_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEventRecord failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtEventRecord failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtEventRecord] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}



+ 6
- 8
ge/graph/load/model_manager/task_info/event_wait_task_info.cc View File

@@ -24,7 +24,7 @@ Status EventWaitTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davi
GELOGI("EventWaitTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model is null!");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model is null!");
return PARAM_INVALID;
}

@@ -37,7 +37,7 @@ Status EventWaitTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davi
if (task_def.event_id() >= eventList.size()) {
REPORT_INNER_ERROR("E19999", "Task event_id:%u > model event size:%zu, check invalid",
task_def.event_id(), eventList.size());
GELOGE(INTERNAL_ERROR, "event list size:%zu, cur:%u!", eventList.size(), task_def.event_id());
GELOGE(INTERNAL_ERROR, "[Check][Param] event list size:%zu, cur:%u!", eventList.size(), task_def.event_id());
return INTERNAL_ERROR;
}

@@ -51,17 +51,15 @@ Status EventWaitTaskInfo::Distribute() {
GELOGI("EventWaitTaskInfo Distribute Start.");
rtError_t rt_ret = rtStreamWaitEvent(stream_, event_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtStreamWaitEvent failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtStreamWaitEvent failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtStreamWaitEvent] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

rt_ret = rtEventReset(event_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEventReset failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtEventReset failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtEventReset] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}



+ 3
- 4
ge/graph/load/model_manager/task_info/fusion_start_task_info.cc View File

@@ -24,7 +24,7 @@ Status FusionStartTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
GELOGI("FusionStartTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model is null!");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model is null!");
return PARAM_INVALID;
}

@@ -40,9 +40,8 @@ Status FusionStartTaskInfo::Distribute() {
GELOGI("FusionStartTaskInfo Distribute Start.");
rtError_t rt_ret = rtKernelFusionStart(stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtKernelFusionStart failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtKernelFusionStart failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtKernelFusionStart] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}



+ 2
- 2
ge/graph/load/model_manager/task_info/fusion_stop_task_info.cc View File

@@ -24,7 +24,7 @@ Status FusionStopTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *dav
GELOGI("FusionStopTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model is null!");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model is null!");
return PARAM_INVALID;
}

@@ -41,7 +41,7 @@ Status FusionStopTaskInfo::Distribute() {
rtError_t rt_ret = rtKernelFusionEnd(stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtKernelFusionEnd failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtKernelFusionEnd] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}



+ 27
- 24
ge/graph/load/model_manager/task_info/hccl_task_info.cc View File

@@ -31,7 +31,7 @@ HcclTaskInfo::~HcclTaskInfo() {
rtError_t ret = rtFreeHost(private_def_);
if (ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtFreeHost failed, ret:0x%X", ret);
GELOGE(RT_FAILED, "Call rtFree Fail, ret = 0x%X.", ret);
GELOGE(RT_FAILED, "[Call][RtFree] Fail, ret = 0x%X.", ret);
}
private_def_ = nullptr;
}
@@ -43,7 +43,7 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m
GELOGI("HcclTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model is null!");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model is null!");
return PARAM_INVALID;
}
davinci_model_ = davinci_model;
@@ -71,21 +71,21 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call GetHorovodInputs fail for op:%s(%s)",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(ret, "davinci_model: GetHorovodInputs fail! domi error: %u", ret);
GELOGE(ret, "[Get][HorovodInputs] fail for op:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
return ret;
}
Status dmrt = HcomOmeUtil::GetHcclDataType(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call GetHcclDataType fail for op:%s(%s)",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(dmrt, "davinci_model: GetHcomDataType fail! domi error: %u", dmrt);
GELOGE(dmrt, "[Get][HcomDataType] fail for op:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
return dmrt;
}
dmrt = HcomOmeUtil::GetHcclCount(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call GetHcclCount fail for op:%s(%s)",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(dmrt, "davinci_model: GetHcomCount fail! domi error: %u", dmrt);
GELOGE(dmrt, "[Get][HcomCount] fail for op:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
return dmrt;
}
// Only HCOMBROADCAST and HVDCALLBACKBROADCAST need to get the rootId
@@ -93,14 +93,14 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m
if (dmrt != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call GetAllRootId fail for op:%s(%s)",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(dmrt, "davinci_model: Get rootId fail! domi error: %u", dmrt);
GELOGE(dmrt, "[Get][RootId] fail for op:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
return dmrt;
}

// GE's new process: hccl declares the number of streams required, creates a stream by GE, and sends it to hccl
ret = SetFollowStream(op_desc, davinci_model);
if (ret != SUCCESS) {
GELOGE(ret, "SetStream Fail.");
GELOGE(ret, "[Set][Stream] Fail for op:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
return ret;
}

@@ -111,13 +111,13 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m

ret = SetAddrs(op_desc, kernel_hccl_infos_);
if (ret != SUCCESS) {
GELOGE(ret, "Setaddrs Fail.");
GELOGE(ret, "[Set][Addrs] Fail for op:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
return ret;
}
// GE's new process: hccl declares the need for Workspace size, and GE allocates Workspace
ret = SetWorkspace(op_desc, kernel_hccl_infos_);
if (ret != SUCCESS) {
GELOGE(ret, "SetWorkspace Fail.");
GELOGE(ret, "[Set][Workspace] Fail for op:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
return ret;
}

@@ -156,7 +156,8 @@ Status HcclTaskInfo::SetFollowStream(const ge::ConstOpDescPtr &op_desc, DavinciM
}
ret = CreateStream(hccl_stream_num - created_stream_num, davinci_model, main_stream_id);
if (ret != SUCCESS) {
GELOGE(RT_FAILED, "Create hccl stream failed.");
GELOGE(RT_FAILED, "[Create][Stream] for %s failed, stream id:%ld, stream num:%ld.",
op_desc->GetName().c_str(), main_stream_id, hccl_stream_num - created_stream_num);
return RT_ERROR_TO_GE_STATUS(ret);
}
}
@@ -165,7 +166,8 @@ Status HcclTaskInfo::SetFollowStream(const ge::ConstOpDescPtr &op_desc, DavinciM
GELOGI("need to create follow stream for %s with new mainstream %ld.", op_desc->GetName().c_str(), main_stream_id);
ret = CreateStream(hccl_stream_num, davinci_model, main_stream_id);
if (ret != SUCCESS) {
GELOGE(RT_FAILED, "Create hccl stream failed.");
GELOGE(RT_FAILED, "[Create][Stream] for %s failed, stream id:%ld, stream num:%ld.",
op_desc->GetName().c_str(), main_stream_id, hccl_stream_num);
return RT_ERROR_TO_GE_STATUS(ret);
}
}
@@ -181,7 +183,8 @@ Status HcclTaskInfo::CreateStream(int64_t stream_num, DavinciModel *davinci_mode
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtStreamCreateWithFlags failed, ret:0x%X, stream_idx:%ld, stream_num:%ld",
rt_ret, i, stream_num);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtStreamCreateWithFlags] failed, ret:0x%X, stream_idx:%ld, stream_num:%ld",
rt_ret, i, stream_num);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
// Create slave stream, inactive by default, activated by hccl
@@ -189,7 +192,8 @@ Status HcclTaskInfo::CreateStream(int64_t stream_num, DavinciModel *davinci_mode
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtModelBindStream failed, ret:0x%X, stream_idx:%ld, stream_num:%ld",
rt_ret, i, stream_num);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtModelBindStream] failed, ret:0x%X, stream_idx:%ld, stream_num:%ld",
rt_ret, i, stream_num);
(void)rtStreamDestroy(stream);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -207,7 +211,7 @@ Status HcclTaskInfo::Distribute() {
GELOGI("HcclTaskInfo Distribute Start. begin to call function LoadTask in hccl.");
if (ops_kernel_store_ == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param ops_kernel_store_ nullptr");
GELOGE(INTERNAL_ERROR, "ops kernel store is null.");
GELOGE(INTERNAL_ERROR, "[Check][Param] ops kernel store is null.");
return INTERNAL_ERROR;
}
OpsKernelInfoStore *ops_kernel_info_store = reinterpret_cast<OpsKernelInfoStore *>(ops_kernel_store_);
@@ -217,7 +221,7 @@ Status HcclTaskInfo::Distribute() {
auto result = ops_kernel_info_store->LoadTask(ge_task);
if (result != HCCL_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call ops_kernel_info_store LoadTask fail");
GELOGE(INTERNAL_ERROR, "davinci_model : load task fail, return ret: %u", result);
GELOGE(INTERNAL_ERROR, "[Load][Task] fail, return ret:%u", result);
return INTERNAL_ERROR;
}
GELOGI("HcclTaskInfo Distribute Success.");
@@ -265,8 +269,9 @@ Status HcclTaskInfo::SetAddrs(const std::shared_ptr<OpDesc> &op_desc,
std::vector<GETaskKernelHcclInfo> &kernel_hccl_infos) {
GE_CHECK_NOTNULL(op_desc);
GE_CHK_STATUS_RET(HcomOmeUtil::CheckKernelHcclInfo(op_desc, kernel_hccl_infos),
"HcomOmeUtil:: the number of GETaskKernelHcclInfo is invalid.");
GELOGI("Set hccl task input output address, node[%s}, type[%s] kernel_hccl_infos.size[%zu].",
"[Check][Param] HcomOmeUtil:: the number of GETaskKernelHcclInfo is invalid, node:%s(%s).",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGI("Set hccl task input output address, node[%s], type[%s] kernel_hccl_infos.size[%zu].",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), kernel_hccl_infos.size());
if (op_desc->GetType() == HVDWAIT) {
return SUCCESS;
@@ -300,7 +305,7 @@ Status HcclTaskInfo::SetAddrs(const std::shared_ptr<OpDesc> &op_desc,
} else if (hccl_type == HCOMALLREDUCE ||
hccl_type == HCOMREDUCESCATTER || hccl_type == HVDCALLBACKALLREDUCE || hccl_type == HCOMREDUCE) {
GE_CHK_STATUS_RET(HcomOmeUtil::GetHcclOperationType(op_desc, op_type),
"davinci_model: GetHcomOperationType fail!");
"[Get][HcomOperationType] fail! op:%s", op_desc->GetName().c_str());
kernel_hccl_infos[i].outputDataAddr = output_data_addr;
kernel_hccl_infos[i].opType = op_type;
}
@@ -332,18 +337,16 @@ void HcclTaskInfo::GetPrivateDefByTaskDef(const domi::TaskDef &task) {
private_def_len_ = private_def_temp.size();
rtError_t ret = rtMallocHost(&private_def_, private_def_len_);
if (ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMallocHost failed, ret:0x%X, size:%u",
ret, private_def_len_);
GELOGE(RT_FAILED, "Call rtMallocHost Fail, ret = 0x%X.", ret);
REPORT_CALL_ERROR("E19999", "Call rtMallocHost failed, ret:0x%X, size:%u", ret, private_def_len_);
GELOGE(RT_FAILED, "[Call][RtMallocHost] Fail, ret:0x%X, size:%u", ret, private_def_len_);
return;
}

ret = rtMemcpy(private_def_, private_def_len_, task.private_def().c_str(), private_def_len_,
RT_MEMCPY_HOST_TO_HOST);
if (ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, ret:0x%X, size:%u",
ret, private_def_len_);
GELOGE(RT_FAILED, "Call rtMemcpy Fail, ret = 0x%X.", ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, ret:0x%X, size:%u", ret, private_def_len_);
GELOGE(RT_FAILED, "[Call][RtMemcpy] Fail, ret:0x%X, size:%u", ret, private_def_len_);
return;
}
GELOGI("The first address of the custom info, privateDef=%p.", private_def_);


+ 70
- 64
ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc View File

@@ -48,11 +48,12 @@ Status KernelExTaskInfo::InitTaskExtInfo(const std::string &ext_info, const OpDe
num_inputs,
num_outputs,
unknown_type));
GE_CHK_BOOL_RET_STATUS(ext_handle != nullptr, FAILED, "Malloc aicpu_ext_handle mem failed!");
GE_CHK_BOOL_RET_STATUS(ext_handle != nullptr, FAILED, "[Malloc][Memory] for aicpu_ext_handle failed!");
GE_CHK_STATUS_RET(ext_handle->Parse(ext_info),
"Parse kernel ext info failed, kernel_ext_info_size=%zu.", ext_info.size());
GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true), "UpdateExecuteMode failed.");
"[Parse][KernelExtInfo] failed, kernel_ext_info_size=%zu.", ext_info.size());
GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true), "[Update][ExecuteMode] failed.");
GELOGD("Update aicpu_task ext_info bit_map execute mode to 1.");
topic_type_flag_ = ext_handle->GetTopicTypeFlag();

bool all_shape = false;
(void)AttrUtils::GetBool(op_desc, kAicpuAllshape, all_shape);
@@ -62,29 +63,30 @@ Status KernelExTaskInfo::InitTaskExtInfo(const std::string &ext_info, const OpDe
auto input_desc = op_desc->MutableInputDesc(i);
GE_CHECK_NOTNULL(input_desc);
GE_CHK_STATUS_RET(ext_handle->UpdateInputShapeAndType(i, *input_desc),
"Input[%u] update input shape failed.", i);
"[Call][UpdateInputShapeAndType] Input[%u] update input shape failed, op:%s.",
i, op_desc->GetName().c_str());
}
if (unknown_type != DEPEND_COMPUTE) {
for (uint32_t j = 0; j < num_outputs; j++) {
auto output_desc = op_desc->MutableOutputDesc(j);
GE_CHECK_NOTNULL(output_desc);
GE_CHK_STATUS_RET(ext_handle->UpdateOutputShapeAndType(j, *output_desc),
"Output[%u] update output shape failed.", j);
"[Call][UpdateOutputShapeAndType] Output[%u] update output shape failed, op:%s.",
j, op_desc->GetName().c_str());
}
}
}
auto rt_ret = rtMalloc(&ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X",
ext_info.size(), rt_ret);
GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X", ext_info.size(), rt_ret);
GELOGE(RT_FAILED, "[RtMalloc][ExtInfo] error:0x%X, size=%zu", rt_ret, ext_info.size());
return RT_ERROR_TO_GE_STATUS(rt_ret);)
rt_ret = rtMemcpy(ext_info_addr_, ext_handle->GetExtInfoLen(), ext_handle->GetExtInfo(),
ext_handle->GetExtInfoLen(), RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X",
ext_handle->GetExtInfoLen(), rt_ret);
GELOGE(RT_FAILED, "rtMemcpy ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
GELOGE(RT_FAILED, "[RtMemcpy][ExtInfo] error:0x%X, size=%zu", rt_ret, ext_info.size());
return RT_ERROR_TO_GE_STATUS(rt_ret);)
return SUCCESS;
}
@@ -105,9 +107,8 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
uint32_t op_index = kernel_ex_def.op_index();
OpDescPtr op_desc = davinci_model_->GetOpByIndex(op_index);
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u",
op_index);
GELOGE(INTERNAL_ERROR, "Init aicpu task info error, index is out of range!");
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u", op_index);
GELOGE(INTERNAL_ERROR, "[Get][Op] by index failed, index:%u is out of range!", op_index);
return INTERNAL_ERROR;
}

@@ -116,22 +117,22 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
if (sizeof(STR_FWK_OP_KERNEL) < kernel_ex_def.args_size()) {
REPORT_INNER_ERROR("E19999", "Param kernel_ex_def.args_size():%u > sizeof(STR_FWK_OP_KERNEL):%zu, "
"check invalid", kernel_ex_def.args_size(), sizeof(STR_FWK_OP_KERNEL));
GELOGE(FAILED, "sizeof STR_FWK_OP_KERNEL is: %zu, but args_size is: %u", sizeof(STR_FWK_OP_KERNEL),
GELOGE(FAILED, "[Check][Param] sizeof STR_FWK_OP_KERNEL is: %zu, but args_size is: %u", sizeof(STR_FWK_OP_KERNEL),
kernel_ex_def.args_size());
return FAILED;
}
errno_t sec_ret =
memcpy_s(&fwk_op_kernel, sizeof(STR_FWK_OP_KERNEL), kernel_ex_def.args().data(), kernel_ex_def.args_size());
if (sec_ret != EOK) {
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%zu, ret:0x%X",
sizeof(STR_FWK_OP_KERNEL), sec_ret);
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%zu, ret:0x%X", sizeof(STR_FWK_OP_KERNEL), sec_ret);
GELOGE(FAILED, "[Call][Memcpy] failed, size:%zu, ret: %d", sizeof(STR_FWK_OP_KERNEL), sec_ret);
return FAILED;
}

const auto &ext_info = kernel_ex_def.kernel_ext_info();
GE_CHK_STATUS_RET(InitTaskExtInfo(ext_info, op_desc),
"Init aicpu tf_task ext info failed, ext_info size=%zu", ext_info.size());
"[Init][TaskExtInfo] failed, ext_info size=%zu, op:%s",
ext_info.size(), op_desc->GetName().c_str());

GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, ext_info_addr_=%p", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), ext_info.size(), ext_info_addr_);
@@ -148,15 +149,15 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
davinci_model->SubModelId(), kernel_id) != SUCCESS,
REPORT_CALL_ERROR("E19999", "CreateAicpuKernel fail, session_id:%lu, model_id:%u, kernel_id:%lu",
session_id, davinci_model->Id(), kernel_id);
GELOGE(FAILED, "CreateAicpuKernel error.");
GELOGE(FAILED, "[Create][AicpuKernel] fail, session_id:%lu, model_id:%u, kernel_id:%lu",
session_id, davinci_model->Id(), kernel_id);
return FAILED;)
// 2.3 Create session
GE_CHECK_NOTNULL(ModelManager::GetInstance());
ret = ModelManager::GetInstance()->CreateAicpuSession(session_id);
GE_IF_BOOL_EXEC(ret != SUCCESS,
REPORT_CALL_ERROR("E19999", "CreateAicpuSession fail, session_id:%lu",
session_id);
GELOGE(ret, "CreateAicpuSession error. session id: %lu", session_id);
REPORT_CALL_ERROR("E19999", "CreateAicpuSession fail, session_id:%lu", session_id);
GELOGE(ret, "[Create][AicpuSession] error. session id:%lu", session_id);
return ret;)

kernel_buf_size_ = sizeof(STR_FWK_OP_KERNEL);
@@ -169,7 +170,8 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X",
kernel_ex_def.task_info_size(), rt_ret);
GELOGE(RT_FAILED, "rtMalloc error, ret: Ox%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%u, ret:0x%X",
kernel_ex_def.task_info_size(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret););
rt_ret = rtMemcpy(workspace_base_addr, kernel_ex_def.task_info_size(), kernel_ex_def.task_info().data(),
kernel_ex_def.task_info_size(), RT_MEMCPY_HOST_TO_DEVICE);
@@ -181,17 +183,15 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin

rt_ret = rtMalloc(&kernel_buf_, kernel_buf_size_, RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, ret:0x%X, size:%u",
rt_ret, kernel_buf_size_);
GELOGE(RT_FAILED, "rtMalloc error: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, ret:0x%X, size:%u", rt_ret, kernel_buf_size_);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, ret:0x%X, size:%u", rt_ret, kernel_buf_size_);
return RT_ERROR_TO_GE_STATUS(rt_ret);)

rt_ret = rtMemcpy(kernel_buf_, kernel_buf_size_, static_cast<void *>(&fwk_op_kernel), kernel_buf_size_,
RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, ret:0x%X, size:%u",
rt_ret, kernel_buf_size_);
GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, ret:0x%X, size:%u", rt_ret, kernel_buf_size_);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, ret:0x%X, size:%u", rt_ret, kernel_buf_size_);
return RT_ERROR_TO_GE_STATUS(rt_ret);)

SetIoAddrs(op_desc);
@@ -203,7 +203,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
// 3. Set workspaceaddr, inputOutputDataAddr
Status ge_ret = CopyTaskInfo(kernel_ex_def, rts_param, op_desc);
if (ge_ret != SUCCESS) {
GELOGE(ge_ret, "copy task info to workspace failed.");
GELOGE(ge_ret, "[Copy][TaskInfo] to workspace failed, op:%s.", op_desc->GetName().c_str());
return ge_ret;
}

@@ -211,7 +211,8 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
if (workspace_data_addrs.empty()) {
REPORT_CALL_ERROR("E19999", "workspace_data_addrs is empty in op:%s(%s), check invalid",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "workspace_data_addrs is empty.");
GELOGE(FAILED, "[Check][Param] workspace_data_addrs is empty in op:%s(%s).",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED;
}

@@ -226,23 +227,17 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
if (addrs_size > 0) {
rtError_t rt_ret = rtMalloc(&input_output_addr_, addrs_size, RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, ret:0x%X, size:%lu",
rt_ret, addrs_size);
GELOGE(RT_FAILED, "rtMalloc error, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, ret:0x%X, size:%lu", rt_ret, addrs_size);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, ret:0x%X, size:%lu", rt_ret, addrs_size);
return RT_ERROR_TO_GE_STATUS(rt_ret);)

rt_ret = rtMemcpy(input_output_addr_, addrs_size, io_addrs.data(), addrs_size, RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, ret:0x%X, size:%lu",
rt_ret, addrs_size);
GELOGE(RT_FAILED, "rtMemcpy to input_output_addr_ error: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, ret:0x%X, size:%lu", rt_ret, addrs_size);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, ret:0x%X, size:%lu", rt_ret, addrs_size);
return RT_ERROR_TO_GE_STATUS(rt_ret);)

InitDumpTask(input_output_addr_, op_desc);
if (davinci_model_->GetOpDugReg()) {
GELOGI("Op debug is open in kernel ex task info");
dump_args_ = input_output_addr_;
}
}

uint64_t input_output_addr = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(input_output_addr_));
@@ -257,7 +252,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, ret:0x%X, size:%zu",
rt_ret, sizeof(STR_FWK_OP_KERNEL));
GELOGE(RT_FAILED, "rtMalloc error: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, ret:0x%X, size:%zu", rt_ret, sizeof(STR_FWK_OP_KERNEL));
return RT_ERROR_TO_GE_STATUS(rt_ret);)

rt_ret = rtMemcpy(kernel_buf_, sizeof(STR_FWK_OP_KERNEL), static_cast<void *>(&fwk_op_kernel),
@@ -265,7 +260,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, ret:0x%X, size:%zu",
rt_ret, sizeof(STR_FWK_OP_KERNEL));
GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, ret:0x%X, size:%zu", rt_ret, sizeof(STR_FWK_OP_KERNEL));
return RT_ERROR_TO_GE_STATUS(rt_ret);)

davinci_model_->SetZeroCopyAddr(op_desc, io_addrs, io_addrs.data(), input_output_addr_, addrs_size, 0);
@@ -275,10 +270,15 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
}

void KernelExTaskInfo::InitDumpTask(void *addr, const OpDescPtr &op_desc) {
if (davinci_model_->OpNeedDump(op_desc->GetName())) {
if (davinci_model_->OpNeedDump(op_desc->GetName()) || davinci_model_->GetOpDugReg()) {
GELOGD("Op %s need dump in kernel ex task info", op_desc->GetName().c_str());
dump_flag_ = RT_KERNEL_DUMPFLAG;
dump_args_ = addr;
}
if (davinci_model_->GetOpDugReg()) {
GELOGD("Op debug is open in kernel ex task info");
dump_args_ = addr;
}
}

Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
@@ -286,9 +286,8 @@ Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciMod
uint32_t op_index = kernel_ex_def.op_index();
OpDescPtr op_desc = davinci_model->GetOpByIndex(op_index);
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u",
op_index);
GELOGE(INTERNAL_ERROR, "Init aicpu task info error, index is out of range!");
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u", op_index);
GELOGE(INTERNAL_ERROR, "[Get][Op] By Index, index:%u is out of range!", op_index);
return INTERNAL_ERROR;
}
args_offset_ = davinci_model->GetTotalArgsSize();
@@ -308,7 +307,8 @@ Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciMod
REPORT_INNER_ERROR("E19999", "The output size[%zu] and output index[%u] in op:%s(%s) are inconsistent, "
"check invalid", outputs_size, output_index,
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "The output size[%zu] and output index[%u] are inconsistent.", outputs_size, output_index);
GELOGE(FAILED, "[Check][Param] The output size[%zu] and output index[%u] in op:%s(%s) are inconsistent.",
outputs_size, output_index, op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED;
}
fixed_addr_offset_ = davinci_model->GetFixedAddrsSize(peer_input_name);
@@ -334,11 +334,12 @@ void KernelExTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) {
if (AttrUtils::GetStr(op_desc, ATTR_DYNAMIC_SHAPE_FIXED_ADDR, peer_input_name)) {
uint32_t output_index = davinci_model_->GetFixedAddrOutputIndex(peer_input_name);
if (output_index > output_data_addrs.size()) {
REPORT_INNER_ERROR("E19999", "The output data addr size[%zu] and output index[%u] in op:%s(%s) are inconsistent"
", check invalid", output_data_addrs.size(), output_index,
REPORT_INNER_ERROR("E19999", "The output data addr size[%zu] and output index[%u] in op:%s(%s) "
"are inconsistent, check invalid", output_data_addrs.size(), output_index,
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "The output data addr size[%zu] and output index[%u] are inconsistent.",
output_data_addrs.size(), output_index);
GELOGE(FAILED, "[Check][Param] The output data addr size[%zu] and output index[%u] in op:%s(%s) "
"are inconsistent.", output_data_addrs.size(), output_index,
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return;
}
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end());
@@ -371,7 +372,7 @@ Status KernelExTaskInfo::CopyTaskInfo(const domi::KernelExDef &kernel_def, const
REPORT_INNER_ERROR("E19999", "Node:%s(%s) workspace addr:%zu or size:%zu empty, check invalid",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
workspace_data_addrs.size(), workspace_data_sizes.size());
GELOGE(FAILED, "Node:%s invalid workspace, addrs is %zu, size is %zu.", op_desc->GetName().c_str(),
GELOGE(FAILED, "[Check][Param] Node:%s invalid workspace, addrs is %zu, size is %zu.", op_desc->GetName().c_str(),
workspace_data_addrs.size(), workspace_data_sizes.size());
return FAILED;
}
@@ -379,7 +380,7 @@ Status KernelExTaskInfo::CopyTaskInfo(const domi::KernelExDef &kernel_def, const
if (workspace_data_addrs[0] == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) workspace addr is nullptr, check invalid",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Node:%s workspace addrs is null.", op_desc->GetName().c_str());
GELOGE(FAILED, "[Check][Param] Node:%s workspace addrs is null.", op_desc->GetName().c_str());
return FAILED;
}

@@ -387,7 +388,7 @@ Status KernelExTaskInfo::CopyTaskInfo(const domi::KernelExDef &kernel_def, const
REPORT_INNER_ERROR("E19999", "Node:%s(%s) workspace size:%ld < task info size:%d, check invalid",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
workspace_data_sizes[0], kernel_def.task_info_size());
GELOGE(FAILED, "Node:%s workspace size is %ld, task info size is %d.", op_desc->GetName().c_str(),
GELOGE(FAILED, "[Check][Param] Node:%s workspace size is %ld, task info size is %d.", op_desc->GetName().c_str(),
workspace_data_sizes[0], kernel_def.task_info_size());
return FAILED;
}
@@ -395,9 +396,8 @@ Status KernelExTaskInfo::CopyTaskInfo(const domi::KernelExDef &kernel_def, const
rtError_t rt_ret = rtMemcpy(workspace_data_addrs[0], kernel_def.task_info_size(), kernel_def.task_info().data(),
kernel_def.task_info_size(), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, ret:0x%X, size:%d",
rt_ret, kernel_def.task_info_size());
GELOGE(RT_FAILED, "rtMemcpy error: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, ret:0x%X, size:%d", rt_ret, kernel_def.task_info_size());
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, ret:0x%X, size:%d", rt_ret, kernel_def.task_info_size());
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -406,17 +406,24 @@ Status KernelExTaskInfo::CopyTaskInfo(const domi::KernelExDef &kernel_def, const

Status KernelExTaskInfo::Distribute() {
GELOGI("KernelExTaskInfo Distribute Start.");
// Use the fifth and sixth bits of dump_flag_ indicate the value of topic_type.
// xxxxxxxx xxxxxxxx xxxxxxxx xx00xxxx: DEVICE_ONLY
// xxxxxxxx xxxxxxxx xxxxxxxx xx01xxxx: DEVICE_FIRST
// xxxxxxxx xxxxxxxx xxxxxxxx xx10xxxx: HOST_ONLY
// xxxxxxxx xxxxxxxx xxxxxxxx xx11xxxx: HOST_FIRST
if (topic_type_flag_ > 0) {
dump_flag_ = dump_flag_ | topic_type_flag_;
}
rtError_t rt_ret = rtKernelLaunchEx(kernel_buf_, kernel_buf_size_, dump_flag_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchEx failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchEx failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtKernelLaunchEx] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

if (davinci_model_ == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model_ is null.");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model_ is null.");
return PARAM_INVALID;
}

@@ -424,9 +431,8 @@ Status KernelExTaskInfo::Distribute() {
uint32_t stream_id = 0; // for profiling
rt_ret = rtModelGetTaskId(davinci_model_->GetRtModelHandle(), &task_id, &stream_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtModelGetTaskId failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtModelGetTaskId failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtModelGetTaskId] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
task_id_ = task_id;


+ 1
- 0
ge/graph/load/model_manager/task_info/kernel_ex_task_info.h View File

@@ -76,6 +76,7 @@ class KernelExTaskInfo : public TaskInfo {
vector<void *> io_addrs_;
uint32_t args_offset_ = 0;
int64_t fixed_addr_offset_ = 0;
int32_t topic_type_flag_ = -1;
};
} // namespace ge
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_KERNEL_EX_TASK_INFO_H_

+ 183
- 191
ge/graph/load/model_manager/task_info/kernel_task_info.cc View File

@@ -95,11 +95,10 @@ Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci
rtError_t rt_ret = rtGetFunctionByName(const_cast<char *>(kernel_def.stub_func().c_str()), &stub_func_);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtGetFunctionByName failed for op:%s(%s), "
"bin_file_key:%s, ret:0x%X",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(),
kernel_def.stub_func().c_str(), rt_ret);
GELOGE(RT_FAILED, "execute rtGetFunctionByName failed. stub_func: %s",
kernel_def.stub_func().c_str());
"bin_file_key:%s, ret:0x%X", op_desc_->GetName().c_str(),
op_desc_->GetType().c_str(), kernel_def.stub_func().c_str(), rt_ret);
GELOGE(RT_FAILED, "[Execute][RtGetFunctionByName] failed for op:%s(%s). stub_func:%s",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(), kernel_def.stub_func().c_str());
return RT_ERROR_TO_GE_STATUS(rt_ret););
} else if (kernel_type_ == ccKernelType::TE) {
// get bin_file_key
@@ -109,18 +108,20 @@ Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci
rtError_t rt_ret = rtGetFunctionByName(bin_file_key, &stub_func_);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtGetFunctionByName failed for op:%s(%s), "
"bin_file_key:%s, ret:0x%X",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(),
bin_file_key, rt_ret);
GELOGE(RT_FAILED, "execute rtGetFunctionByName failed. bin_file_key: %s", bin_file_key);
"bin_file_key:%s, ret:0x%X", op_desc_->GetName().c_str(),
op_desc_->GetType().c_str(), bin_file_key, rt_ret);
GELOGE(RT_FAILED, "[Execute][RtGetFunctionByName] failed for op:%s(%s), bin_file_key:%s",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(), bin_file_key);
return RT_ERROR_TO_GE_STATUS(rt_ret););
}

if (context.origin_op_index_size() > CC_FUSION_OP_MAX) {
REPORT_INNER_ERROR("E19999", "context.origin_op_index_size():%d is more than CC_FUSION_OP_MAX(%d), op:%s(%s) ,"
REPORT_INNER_ERROR("E19999", "context.origin_op_index_size():%d is more than CC_FUSION_OP_MAX(%d), op:%s(%s), "
"check invalid", context.origin_op_index_size(), CC_FUSION_OP_MAX,
op_desc_->GetName().c_str(), op_desc_->GetType().c_str());
GELOGE(PARAM_INVALID, "context.origin_op_index_size() is more than CC_FUSION_OP_MAX(%d)", CC_FUSION_OP_MAX);
GELOGE(PARAM_INVALID, "[Check][Param] context.origin_op_index_size():%d is more than CC_FUSION_OP_MAX(%d), "
"op:%s(%s)", context.origin_op_index_size(), CC_FUSION_OP_MAX,
op_desc_->GetName().c_str(), op_desc_->GetType().c_str());
return PARAM_INVALID;
}

@@ -132,10 +133,11 @@ Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci
ctx_.opIndex = context.op_index();
uint16_t *args_offset_tmp = reinterpret_cast<uint16_t *>(const_cast<char *>(context.args_offset().data()));
if (context.args_offset().size() / sizeof(uint16_t) < 1) {
REPORT_INNER_ERROR("E19999", "context.args_offset().size():%zu / sizeof(uint16_t) less than 1, op:%s(%s) ,"
REPORT_INNER_ERROR("E19999", "context.args_offset().size():%zu / sizeof(uint16_t) less than 1, op:%s(%s), "
"check invalid", context.args_offset().size(),
op_desc_->GetName().c_str(), op_desc_->GetType().c_str());
GELOGE(FAILED, "context.args_offset().size() / sizeof(uint16_t) less than 1");
GELOGE(FAILED, "[Check][Param] context.args_offset().size() / sizeof(uint16_t) less than 1, op:%s(%s)",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str());
return FAILED;
}

@@ -149,7 +151,8 @@ Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci
if (kernel_def.args().empty() || args_size_ == 0) {
REPORT_INNER_ERROR("E19999", "kernel_def.args() is empty, op:%s(%s), check invalid",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str());
GELOGE(FAILED, "args is null.");
GELOGE(FAILED, "[Check][Param] args is empty, op:%s(%s)",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str());
return FAILED;
}
ret = InitCceTask(kernel_def);
@@ -181,9 +184,8 @@ void KernelTaskInfo::UpdateSKTTaskId() {
if (davinci_model_ != nullptr) {
rtError_t rt_ret = rtModelGetTaskId(davinci_model_->GetRtModelHandle(), &task_id, &stream_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtModelGetTaskId failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtModelGetTaskId failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtModelGetTaskId] failed, ret:0x%X", rt_ret);
return;
}
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
@@ -201,9 +203,8 @@ void KernelTaskInfo::UpdateTaskId() {
if (davinci_model_ != nullptr) {
rtError_t rt_ret = rtModelGetTaskId(davinci_model_->GetRtModelHandle(), &task_id, &stream_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtModelGetTaskId failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtModelGetTaskId failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtModelGetTaskId] failed, ret:0x%X", rt_ret);
return;
}
task_id_ = task_id;
@@ -214,7 +215,7 @@ void KernelTaskInfo::UpdateTaskId() {

Status KernelTaskInfo::SKTFinalize() {
UpdateSKTTaskId();
GE_CHK_STATUS_RET(SaveSKTDumpInfo(), "skt save dump info failed");
GE_CHK_STATUS_RET(SaveSKTDumpInfo(), "[Save][SKTDumpInfo] failed");
GELOGI("SuperKernel Distribute [skt_id:%u]", skt_id_);
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
skt_info.kernel_list.clear();
@@ -258,13 +259,12 @@ Status KernelTaskInfo::SuperKernelLaunch() {
static_cast<rtSmDesc_t *>(skt_info.last_sm_desc), skt_info.last_stream,
skt_info.last_dump_flag);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchWithFlag failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "SuperKernelLaunch: Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchWithFlag failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtKernelLaunchWithFlag] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
call_save_dump_ = true;
GE_CHK_STATUS_RET(SKTFinalize(), "Skt finalize failed");
GE_CHK_STATUS_RET(SKTFinalize(), "[Call][SKTFinalize] failed");
return SUCCESS;
}
// Create super kernel factory
@@ -272,27 +272,24 @@ Status KernelTaskInfo::SuperKernelLaunch() {
// Init super kernel factory
Status ge_ret = factory->Init();
if (ge_ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call SuperKernelFactory init fail, ret:0x%X",
ge_ret);
GELOGE(ge_ret, "SuperKernelLaunch: SuperKernelFactory init failed");
REPORT_CALL_ERROR("E19999", "Call SuperKernelFactory init fail, ret:0x%X", ge_ret);
GELOGE(ge_ret, "[Init][SuperKernelFactory] failed, ret:0x%X", ge_ret);
return ge_ret;
}
// Call the fuse API
std::unique_ptr<skt::SuperKernel> superKernel = nullptr;
ge_ret = factory->FuseKernels(skt_kernel_list, skt_arg_list, skt_info.last_block_dim, superKernel);
if (ge_ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call SuperKernelFactory FuseKernels fail, ret:0x%X",
ge_ret);
GELOGE(ge_ret, "SuperKernelLaunch: fuse call failed");
REPORT_CALL_ERROR("E19999", "Call SuperKernelFactory FuseKernels fail, ret:0x%X", ge_ret);
GELOGE(ge_ret, "[Call][FuseKernels] failed, ret:0x%X", ge_ret);
return ge_ret;
}
// Launch a super kernel
skt_dump_flag_ = GetDumpFlag();
ge_ret = superKernel->Launch(skt_info.last_stream, skt_dump_flag_);
if (ge_ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call SuperKernelFactory Launch fail, ret:0x%X",
ge_ret);
GELOGE(ge_ret, "SuperKernelLaunch: launch failed");
REPORT_CALL_ERROR("E19999", "Call SuperKernelFactory Launch fail, ret:0x%X", ge_ret);
GELOGE(ge_ret, "[Call][Launch] failed, ret:0x%X", ge_ret);
return ge_ret;
}
GELOGI("SuperKernelLaunch: success[skt_kernel_list size[%zu] skt_arg_list[%zu]]", skt_kernel_list.size(),
@@ -300,7 +297,7 @@ Status KernelTaskInfo::SuperKernelLaunch() {
// record skt addr for release
superkernel_dev_nav_table_ = superKernel->GetNavTablePtr();
superkernel_device_args_addr_ = superKernel->GetDeviceArgsPtr();
GE_CHK_STATUS_RET(SKTFinalize(), "Skt finalize failed");
GE_CHK_STATUS_RET(SKTFinalize(), "[Call][SKTFinalize] failed");
return SUCCESS;
}

@@ -331,14 +328,13 @@ Status KernelTaskInfo::SaveSuperKernelInfo() {
bool KernelTaskInfo::IsMarkedLastNode() {
if (davinci_model_ == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model is null!");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model is null!");
return false;
}
OpDescPtr op_desc = davinci_model_->GetOpByIndex(ctx_.opIndex);
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u",
ctx_.opIndex);
GELOGE(INTERNAL_ERROR, "InitTVMTaskInfo error, index is out of range!");
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u", ctx_.opIndex);
GELOGE(INTERNAL_ERROR, "[Get][Op] by index failed, index:%u is out of range!", ctx_.opIndex);
return false;
}
bool is_last_node = false;
@@ -349,14 +345,13 @@ bool KernelTaskInfo::IsMarkedLastNode() {
bool KernelTaskInfo::IsMarkedFirstNode() {
if (davinci_model_ == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model is null!");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model is null!");
return false;
}
OpDescPtr op_desc = davinci_model_->GetOpByIndex(ctx_.opIndex);
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u",
ctx_.opIndex);
GELOGE(INTERNAL_ERROR, "InitTVMTaskInfo error, index is out of range!");
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u", ctx_.opIndex);
GELOGE(INTERNAL_ERROR, "[Get][Op] by index failed, index:%u is out of range!", ctx_.opIndex);
return false;
}
bool is_first_node = false;
@@ -381,7 +376,7 @@ Status KernelTaskInfo::SuperKernelDistribute() {
if (FirstCallSKTLaunchCheck()) {
ret = SuperKernelLaunch();
if (ret != SUCCESS) {
GELOGE(FAILED, "Call SuperKernelLaunch failed!");
GELOGE(FAILED, "[Call][SuperKernelLaunch] failed, taskid:%u", task_id_);
return FAILED;
}
}
@@ -389,16 +384,15 @@ Status KernelTaskInfo::SuperKernelDistribute() {
// 1.launch before
ret = SuperKernelLaunch();
if (ret != SUCCESS) {
GELOGE(ret, "Call SuperKernelLaunch failed!");
GELOGE(ret, "[Call][SuperKernelLaunch] failed, taskid:%u", task_id_);
return ret;
}
// 2.launch current
rtError_t rt_ret = rtKernelLaunchWithFlag(stub_func_, block_dim_, args_, args_size_,
static_cast<rtSmDesc_t *>(sm_desc_), stream_, dump_flag_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchWithFlag failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchWithFlag failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtKernelLaunchWithFlag] failed, ret:0x%X", rt_ret);
return rt_ret;
}
call_save_dump_ = true;
@@ -407,15 +401,14 @@ Status KernelTaskInfo::SuperKernelDistribute() {
} else {
ret = SaveSuperKernelInfo();
if (ret != SUCCESS) {
GELOGE(ret, "Call SuperKernelLaunch failed!");
GELOGE(ret, "[Call][SaveSuperKernelInfo] failed, taskid:%u", task_id_);
return ret;
}
}
return SUCCESS;
}

Status KernelTaskInfo::Distribute() {
GELOGD("KernelTaskInfo Distribute Start.");
void KernelTaskInfo::SetArgs() {
if (davinci_model_->IsKnownNode()) {
if (kernel_type_ == ccKernelType::TE) {
args_ = l2_buffer_on_ ? davinci_model_->GetCurrentHybridArgsAddr(hybrid_args_offset_)
@@ -425,12 +418,25 @@ Status KernelTaskInfo::Distribute() {
}
GELOGI("Known node %s args addr %p, offset %u.", op_desc_->GetName().c_str(), args_, args_offset_);
}
}

Status KernelTaskInfo::Distribute() {
GELOGD("KernelTaskInfo Distribute Start.");
SetArgs();
rtError_t rt_ret = RT_ERROR_NONE;
char skt_enable_env[MMPA_MAX_PATH] = { 0x00 };
INT32 res = mmGetEnv("SKT_ENABLE", skt_enable_env, MMPA_MAX_PATH);
int64_t env_flag = (res == EN_OK) ? strtol(skt_enable_env, nullptr, kBaseInt) : kStrtolFail;
bool call_skt = ((env_flag != 0) || is_l1_fusion_enable_);
if (kernel_type_ == ccKernelType::AI_CPU || kernel_type_ == ccKernelType::CUST_AI_CPU) {
if (topic_type_flag_ > 0) {
// Use the fifth and sixth bits of dump_flag_ indicate the value of topic_type.
// xxxxxxxx xxxxxxxx xxxxxxxx xx00xxxx: DEVICE_ONLY
// xxxxxxxx xxxxxxxx xxxxxxxx xx01xxxx: DEVICE_FIRST
// xxxxxxxx xxxxxxxx xxxxxxxx xx10xxxx: HOST_ONLY
// xxxxxxxx xxxxxxxx xxxxxxxx xx11xxxx: HOST_FIRST
dump_flag_ = dump_flag_ | topic_type_flag_;
}
GELOGI("distribute task info kernel_type %d, flag %d", kernel_type_, dump_flag_);
// blockDim is reserved parameter, set to 1
rt_ret = rtCpuKernelLaunchWithFlag(reinterpret_cast<const void *>(so_name_.c_str()),
@@ -461,7 +467,7 @@ Status KernelTaskInfo::Distribute() {
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchWithFlag or rtCpuKernelLaunchWithFlag failed, "
"ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
// set for task_id_
@@ -497,18 +503,16 @@ Status KernelTaskInfo::CopyNoncontinuousArgs(uint16_t offset) {
// copy io addr
errno_t sec_ret = memcpy_s(args_addr.get() + offset, addr_size, io_addrs.data(), addr_size);
if (sec_ret != EOK) {
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%zu, ret:0x%X",
addr_size, sec_ret);
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%zu, ret:0x%X", addr_size, sec_ret);
GELOGE(FAILED, "[Call][Memcpy] failed, size:%zu, ret:%d", addr_size, sec_ret);
return FAILED;
}

// copy args to device
rtError_t rt_ret = rtMemcpy(args_, args_size_, args_addr.get(), args_size_, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X",
args_size_, rt_ret);
GELOGE(RT_FAILED, "Call rt api(rtMemcpy) failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X", args_size_, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%u, ret:0x%X", args_size_, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
GELOGD("Copy noncontinuous args success, kernel type %d.", kernel_type_);
@@ -558,7 +562,7 @@ Status KernelTaskInfo::Release() {
ret = (sm_desc_ != nullptr) ? rtMemFreeManaged(sm_desc_) : RT_ERROR_NONE;
if (ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemFreeManaged failed, ret:0x%X", ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", static_cast<int>(ret));
GELOGE(RT_FAILED, "[Call][RtMemFreeManaged] failed, ret:0x%X", static_cast<int>(ret));
return RT_ERROR_TO_GE_STATUS(ret);
}
sm_desc_ = nullptr;
@@ -588,17 +592,15 @@ Status KernelTaskInfo::UpdateL2Data(const domi::KernelDef &kernel_def) {

rtError_t rt_ret = rtMemAllocManaged(&sm_desc_, sm_desc.size(), RT_MEMORY_SPM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemAllocManaged failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemAllocManaged failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemAllocManaged] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

rt_ret = rtMemcpy(sm_desc_, sm_desc.size(), sm_desc.data(), sm_desc.size(), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X",
sm_desc.size(), rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X", sm_desc.size(), rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%zu, ret:0x%X", sm_desc.size(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -644,9 +646,8 @@ Status KernelTaskInfo::InitTVMTask(uint16_t offset, const domi::KernelDef &kerne
args_addr = std::unique_ptr<uint8_t[]>(new (std::nothrow) uint8_t[args_size_]);
errno_t sec_ret = memcpy_s(args_addr.get(), args_size_, kernel_def.args().data(), args_size_);
if (sec_ret != EOK) {
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%u, ret:0x%X",
args_size_, sec_ret);
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%u, ret:0x%X", args_size_, sec_ret);
GELOGE(FAILED, "[Call][Memcpy] failed, size:%u, ret:0x%X", args_size_, sec_ret);
return FAILED;
}

@@ -688,26 +689,24 @@ Status KernelTaskInfo::InitTVMTask(uint16_t offset, const domi::KernelDef &kerne
// malloc args memory
rt_ret = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X",
args_size_, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X", args_size_, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%u, ret:0x%X", args_size_, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

// copy orign args
rt_ret = rtMemcpy(args_, args_size_, kernel_def.args().data(), args_size_, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X",
args_size_, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X", args_size_, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%u, ret:0x%X", args_size_, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

if ((args_size_ <= offset) || (args_size_ - offset < kAddrLen * tensor_device_addrs.size())) {
REPORT_INNER_ERROR("E19999", "offset:%u >= kernelInfo.argsSize:%u or copy content:%zu beyond applied memory:%u, "
"check invalid",
offset, args_size_, kAddrLen * tensor_device_addrs.size(), args_size_ - offset);
GELOGE(FAILED, "offset >= kernelInfo.argsSize or copy content beyond applied memory.");
"check invalid", offset, args_size_, kAddrLen * tensor_device_addrs.size(), args_size_ - offset);
GELOGE(FAILED, "[Check][Param] offset:%u >= kernelInfo.argsSize:%u or copy content:%zu beyond applied memory:%u, "
"check invalid", offset, args_size_, kAddrLen * tensor_device_addrs.size(), args_size_ - offset);
return FAILED;
}

@@ -715,25 +714,20 @@ Status KernelTaskInfo::InitTVMTask(uint16_t offset, const domi::KernelDef &kerne
rt_ret = rtMemcpy(static_cast<char *>(args_) + offset, args_size_ - offset, tensor_device_addrs.data(),
kAddrLen * tensor_device_addrs.size(), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X",
args_size_ - offset, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X", args_size_ - offset, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%u, ret:0x%X", args_size_ - offset, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
sec_ret = memcpy_s(args_addr.get() + offset, args_size_ - offset, tensor_device_addrs.data(),
kAddrLen * tensor_device_addrs.size());
if (sec_ret != EOK) {
REPORT_CALL_ERROR("E19999", "Call memcpy_s failed, size:%u, ret:0x%X",
args_size_ - offset, sec_ret);
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
REPORT_CALL_ERROR("E19999", "Call memcpy_s failed, size:%u, ret:0x%X", args_size_ - offset, sec_ret);
GELOGE(FAILED, "[Call][Memcpy] failed, size:%u, ret:0x%X", args_size_ - offset, sec_ret);
return FAILED;
}
skt_dump_args_ = static_cast<char *>(args_) + offset;
InitDumpTask(offset);

GE_CHK_BOOL_TRUE_EXEC_INFO(davinci_model_->GetOpDugReg(), dump_args_ = static_cast<char *>(args_) + offset,
"Op debug is open in TVM task info");

vector<void *> virtual_io_addrs; // use virtual address for zero copy key.
virtual_io_addrs.insert(virtual_io_addrs.end(), input_data_addrs.begin(), input_data_addrs.end());
virtual_io_addrs.insert(virtual_io_addrs.end(), output_data_addrs.begin(), output_data_addrs.end());
@@ -769,9 +763,8 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
GELOGI("Do InitAICPUCustomTask");
OpDescPtr op_desc = davinci_model_->GetOpByIndex(op_index);
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u",
op_index);
GELOGE(INTERNAL_ERROR, "index is out of range, index: %u", op_index);
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u", op_index);
GELOGE(INTERNAL_ERROR, "[Get][Op] index is out of range, index:%u", op_index);
return INTERNAL_ERROR;
}

@@ -783,16 +776,18 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
if (ctx_.argsOffset == nullptr) {
REPORT_CALL_ERROR("E19999", "New ctx_.argsOffset fail, size:%u, op:%s(%s)",
kCustomAicpuArgsLen, op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(PARAM_INVALID, "ctx_.argsOffset is null!");
GELOGE(PARAM_INVALID, "[Malloc][Memory] ctx_.argsOffset is null, size:%u, op:%s(%s)",
kCustomAicpuArgsLen, op_desc->GetName().c_str(), op_desc->GetType().c_str());
return PARAM_INVALID;
}

if (context.args_offset().size() / sizeof(uint16_t) < kCustomAicpuArgsLen) {
REPORT_INNER_ERROR("E19999", "context.args_offset().size():%zu / sizeof(uint16_t) is less than "
"kCustomAicpuArgsLen:%u, op:%s(%s), check invalid",
context.args_offset().size(), kCustomAicpuArgsLen,
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(PARAM_INVALID, "context.args_offset().size() / sizeof(uint16_t) is less than kCustomAicpuArgsLen");
"kCustomAicpuArgsLen:%u, op:%s(%s), check invalid", context.args_offset().size(),
kCustomAicpuArgsLen, op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(PARAM_INVALID, "[Check][Param] context.args_offset().size():%zu / sizeof(uint16_t) is less than "
"kCustomAicpuArgsLen:%u, op:%s(%s)", context.args_offset().size(), kCustomAicpuArgsLen,
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return PARAM_INVALID;
}

@@ -805,7 +800,7 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
Status ret = StoreInputOutputTensor(input_data_addrs, output_data_addrs, ModelUtils::GetInputDescs(op_desc),
ModelUtils::GetOutputDescs(op_desc));
if (ret != SUCCESS) {
GELOGE(ret, "StoreInputOutputTensor Failed");
GELOGE(ret, "[Store][InputOutputTensor] Failed, op:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
return ret;
}

@@ -814,7 +809,8 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
if (!AttrUtils::GetBytes(op_desc, ATTR_NAME_OPATTR, buffer)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail", ATTR_NAME_OPATTR.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "can't find opattr bytes!.");
GELOGE(FAILED, "[Get][Attr] %s in op:%s(%s) fail", ATTR_NAME_OPATTR.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED;
}

@@ -822,7 +818,7 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
if (op_attr_size == 0) {
REPORT_INNER_ERROR("E19999", "Attr:%s in op:%s(%s) size is 0, check invalid",
ATTR_NAME_OPATTR.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(PARAM_INVALID, "param op_attr_size is out of range");
GELOGE(PARAM_INVALID, "[Check][Param] param op_attr_size is out of range, op:%s", op_desc->GetName().c_str());
return PARAM_INVALID;
}

@@ -830,7 +826,8 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), op_attr_size, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), op_attr_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -838,7 +835,8 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), op_attr_size, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), op_attr_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -851,7 +849,7 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
"op:%s(%s) check invalid", i, (uint32_t)ctx_.argsOffset[i],
sizeof(uint64_t), kernel_def.args().size(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "ctx.argsOffset[%u]: %u + sizeof(uint64_t): %zu >= kernelDef.args().size():%zu", i,
GELOGE(FAILED, "[Check][Param] ctx.argsOffset[%u]:%u + sizeof(uint64_t):%zu >= kernelDef.args().size():%zu", i,
(uint32_t)ctx_.argsOffset[i], sizeof(uint64_t), kernel_def.args().size());
return FAILED;
}
@@ -871,7 +869,8 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size_, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size_, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -879,9 +878,9 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
kernel_def.args_size(), rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
op_desc->GetName().c_str(), op_desc->GetType().c_str(), kernel_def.args_size(), rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), kernel_def.args_size(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -896,12 +895,12 @@ Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) {
GELOGI("Do InitCCETask");
if (davinci_model_ == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model is null!");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model is null!");
return PARAM_INVALID;
}
Status ret = SetContext(kernel_def);
if (ret != SUCCESS) {
GELOGE(ret, "SetContext Fail.");
GELOGE(ret, "[Set][Context] Fail.");
return ret;
}

@@ -911,7 +910,7 @@ Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) {
if (context.is_flowtable()) {
if (flowtable.empty()) {
REPORT_INNER_ERROR("E19999", "kernel_def.flowtable is empty, check invalid");
GELOGE(FAILED, "flowtable is null.");
GELOGE(FAILED, "[Check][Param] flowtable is null.");
return FAILED;
}
}
@@ -931,23 +930,22 @@ Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) {

ret = UpdateCceArgs(sm_desc, flowtable, kernel_def);
if (ret != SUCCESS) {
GELOGE(ret, "update cce args fail");
GELOGE(ret, "[Update][CceArgs] fail");
return ret;
}

// flowtable
ret = SetFlowtable(flowtable, kernel_def);
if (ret != SUCCESS) {
GELOGE(ret, "SetFlowtable Fail");
GELOGE(ret, "[Set][Flowtable] Fail");
return ret;
}

// args
rtError_t rt_ret = rtMalloc(&args_, kernel_def.args_size(), RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X",
kernel_def.args_size(), rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret:0x%X", kernel_def.args_size(), rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%u, ret:0x%X", kernel_def.args_size(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "cce task physical memory.", kernel_def.args_size())
@@ -955,9 +953,8 @@ Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) {
rt_ret = rtMemcpy(args_, kernel_def.args_size(), kernel_def.args().data(), kernel_def.args_size(),
RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X",
kernel_def.args_size(), rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%u, ret:0x%X", kernel_def.args_size(), rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%u, ret:0x%X", kernel_def.args_size(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -965,17 +962,15 @@ Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) {
if (!sm_desc.empty()) {
rt_ret = rtMemAllocManaged(&sm_desc_, sm_desc.size(), RT_MEMORY_SPM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemAllocManaged failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemAllocManaged failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemAllocManaged] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

rt_ret = rtMemcpy(sm_desc_, sm_desc.size(), sm_desc.data(), sm_desc.size(), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X",
sm_desc.size(), rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X", sm_desc.size(), rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%zu, ret:0x%X", sm_desc.size(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
}
@@ -989,9 +984,8 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k

OpDescPtr op_desc = davinci_model_->GetOpByIndex(op_index);
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u",
op_index);
GELOGE(INTERNAL_ERROR, "index is out of range, index: %u", op_index);
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u", op_index);
GELOGE(INTERNAL_ERROR, "[Get][Op] index is out of range, index:%u", op_index);
return INTERNAL_ERROR;
}
GELOGI("node[%s] test so name %s, kernel name %s", op_desc->GetName().c_str(), so_name_.c_str(),
@@ -1000,7 +994,7 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
if (kernel_type_ == ccKernelType::CUST_AI_CPU) {
bool loaded = false;
GE_CHK_STATUS_RET(ModelManager::GetInstance()->LoadCustAicpuSo(op_desc, so_name_, loaded),
"launch cust aicpu so failed");
"[Launch][CustAicpuSo] failed");
}

// copy args to new host memory
@@ -1008,9 +1002,8 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
GE_PRINT_DYNAMIC_MEMORY(new, "cce task physical memory.", sizeof(uint8_t) * args_size_)
errno_t sec_ret = memcpy_s(args_addr.get(), args_size_, kernel_def.args().data(), args_size_);
if (sec_ret != EOK) {
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%u, ret:0x%X",
args_size_, sec_ret);
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%u, ret:0x%X", args_size_, sec_ret);
GELOGE(FAILED, "[Call][Memcpy] failed, size:%u, ret:0x%X", args_size_, sec_ret);
return FAILED;
}

@@ -1018,7 +1011,7 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
const auto &ext_info = kernel_def.kernel_ext_info();
auto init_ret = InitAicpuTaskExtInfo(ext_info);
if (init_ret != SUCCESS) {
GELOGE(init_ret, "Init aicpu task ext info failed, ext_info size=%zu", ext_info.size());
GELOGE(init_ret, "[Init][AicpuTaskExtInfo] failed, ext_info size=%zu", ext_info.size());
return init_ret;
}
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, aicpu_ext_info_addr_=%p", op_desc->GetName().c_str(),
@@ -1044,9 +1037,8 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
auto addrs_size = sizeof(uint64_t) * io_addrs.size();
sec_ret = memcpy_s(reinterpret_cast<void *>(io_addr), addrs_size, io_addrs.data(), addrs_size);
if (sec_ret != EOK) {
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%lu, ret:0x%X",
addrs_size, sec_ret);
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%lu, ret:0x%X", addrs_size, sec_ret);
GELOGE(FAILED, "[Call][Memcpy] failed, size:%lu, ret:0x%X", addrs_size, sec_ret);
return FAILED;
}
}
@@ -1056,7 +1048,8 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size_, rt_ret);
GELOGE(RT_FAILED, "Call rt api(rtMalloc) failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size_, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "cce task physical memory.", args_size_)
@@ -1066,14 +1059,12 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size_, rt_ret);
GELOGE(RT_FAILED, "Call rt api(rtMemcpy) failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size_, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
InitDumpTask(sizeof(aicpu::AicpuParamHead));
if (davinci_model_->GetOpDugReg()) {
GELOGI("Op debug is open in aicpu task info");
dump_args_ = static_cast<char *>(args_) + sizeof(aicpu::AicpuParamHead);
}

if (kernel_type_ == ccKernelType::CUST_AI_CPU) {
dump_flag_ |= RT_KERNEL_CUSTOM_AICPU;
}
@@ -1085,6 +1076,7 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k

void KernelTaskInfo::InitDumpTask(uint32_t offset) {
if (davinci_model_->OpNeedDump(op_desc_->GetName())) {
GELOGD("Op %s need dump in task info", op_desc_->GetName().c_str());
if (IsL1FusionOp(op_desc_)) {
dump_flag_ = RT_FUSION_KERNEL_DUMPFLAG;
} else {
@@ -1092,6 +1084,10 @@ void KernelTaskInfo::InitDumpTask(uint32_t offset) {
}
dump_args_ = static_cast<char *>(args_) + offset;
}
if (davinci_model_->GetOpDugReg()) {
GELOGD("Op debug is open in kernel task info");
dump_args_ = static_cast<char *>(args_) + offset;
}
}

Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) {
@@ -1109,14 +1105,17 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) {
num_inputs,
num_outputs,
unknown_type));
GE_CHK_BOOL_RET_STATUS(ext_handle != nullptr, FAILED, "Malloc aicpu_ext_handle mem failed!");
GE_CHK_BOOL_RET_STATUS(ext_handle != nullptr, FAILED, "[Malloc][Memory] for aicpu_ext_handle failed!");
GE_CHK_STATUS_RET(ext_handle->Parse(ext_info),
"Parse kernel ext info failed, kernel_ext_info_size=%zu.", ext_info.size());
"[Parse][KernelExtInfo] failed, kernel_ext_info_size=%zu, op:%s.",
ext_info.size(), op_desc_->GetName().c_str());
GE_CHK_STATUS_RET(ext_handle->UpdateSessionInfoSessionId(davinci_model_->GetSessionId()),
"Update session info session id failed.");
"[Update][SessionInfoSessionId] failed, op:%s", op_desc_->GetName().c_str());
GELOGD("Update aicpu_task ext_info session_info session_id is %lu", davinci_model_->GetSessionId());
GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true), "UpdateExecuteMode failed.");
GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true),
"[Update][ExecuteMode] failed, op:%s", op_desc_->GetName().c_str());
GELOGD("Update aicpu_task ext_info bit_map execute mode to 1.");
topic_type_flag_ = ext_handle->GetTopicTypeFlag();

bool all_shape = false;
(void)AttrUtils::GetBool(op_desc_, kAicpuAllshape, all_shape);
@@ -1126,13 +1125,15 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) {
auto input_desc = op_desc_->MutableInputDesc(i);
GE_CHECK_NOTNULL(input_desc);
GE_CHK_STATUS_RET(ext_handle->UpdateInputShapeAndType(i, *input_desc),
"Input[%u] update input shape failed.", i);
"[Call][UpdateInputShapeAndType] Input[%u] update input shape failed, op:%s.",
i, op_desc_->GetName().c_str());
}
for (uint32_t j = 0; j < num_outputs; j++) {
auto output_desc = op_desc_->MutableOutputDesc(j);
GE_CHECK_NOTNULL(output_desc);
GE_CHK_STATUS_RET(ext_handle->UpdateOutputShapeAndType(j, *output_desc),
"Output[%u] update output shape failed.", j);
"[Call][UpdateOutputShapeAndType] Output[%u] update output shape failed, op:%s.",
j, op_desc_->GetName().c_str());
}
}
auto rt_ret = rtMalloc(&aicpu_ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM);
@@ -1140,7 +1141,8 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed for op:%s(%s), size:%zu, ret:0x%X",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(),
ext_handle->GetExtInfoLen(), rt_ret);
GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
GELOGE(RT_FAILED, "[Call][RtMalloc] failed for op:%s(%s), size:%zu, ret:0x%X",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(), ext_handle->GetExtInfoLen(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
rt_ret = rtMemcpy(aicpu_ext_info_addr_, ext_handle->GetExtInfoLen(), ext_handle->GetExtInfo(),
@@ -1149,7 +1151,8 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed for op:%s(%s), size:%zu, ret:0x%X",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(),
ext_handle->GetExtInfoLen(), rt_ret);
GELOGE(RT_FAILED, "rtMemcpy ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed for op:%s(%s), size:%zu, ret:0x%X",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(), ext_handle->GetExtInfoLen(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -1166,9 +1169,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
// inputDescs
rtError_t rt_ret = rtMalloc(&custom_info_.input_descs, sizeof(opTensor_t) * input_size, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X",
sizeof(opTensor_t) * input_size, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X", sizeof(opTensor_t) * input_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%zu, ret:0x%X", sizeof(opTensor_t) * input_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -1176,9 +1178,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
rt_ret = rtMemcpy(static_cast<opTensor_t *>(custom_info_.input_descs) + i, sizeof(opTensor_t),
const_cast<tagOpTensor *>(&input_descs[i]), sizeof(opTensor_t), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X",
sizeof(opTensor_t), rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X", sizeof(opTensor_t), rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%zu, ret:0x%X", sizeof(opTensor_t), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
}
@@ -1186,9 +1187,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
// inputAddrs
rt_ret = rtMalloc(&custom_info_.input_addrs, sizeof(opTensor_t) * input_size, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X",
sizeof(opTensor_t) * input_size, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X", sizeof(opTensor_t) * input_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%zu, ret:0x%X", sizeof(opTensor_t) * input_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -1196,9 +1196,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
rt_ret = rtMemcpy(custom_info_.input_addrs, kAddrLen * input_size, &input_data_addrs[0], kAddrLen * input_size,
RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X",
kAddrLen * input_size, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X", kAddrLen * input_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%zu, ret:0x%X", kAddrLen * input_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
}
@@ -1206,18 +1205,16 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
// outputDescs
rt_ret = rtMalloc(&custom_info_.output_descs, sizeof(opTensor_t) * output_size, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X",
sizeof(opTensor_t) * output_size, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X", sizeof(opTensor_t) * output_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%zu, ret:0x%X", sizeof(opTensor_t) * output_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
for (std::size_t i = 0; i < output_size; ++i) {
rt_ret = rtMemcpy(static_cast<opTensor_t *>(custom_info_.output_descs) + i, sizeof(opTensor_t),
const_cast<tagOpTensor *>(&input_descs[i]), sizeof(opTensor_t), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X",
sizeof(opTensor_t), rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X", sizeof(opTensor_t), rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%zu, ret:0x%X", sizeof(opTensor_t), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
}
@@ -1225,9 +1222,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
// outputAddrs
rt_ret = rtMalloc(&custom_info_.output_addrs, sizeof(opTensor_t) * output_size, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X",
sizeof(opTensor_t) * output_size, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X", sizeof(opTensor_t) * output_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%zu, ret:0x%X", sizeof(opTensor_t) * output_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -1235,9 +1231,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
rt_ret = rtMemcpy(custom_info_.output_addrs, kAddrLen * output_size, &output_data_addrs[0], kAddrLen * output_size,
RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X",
kAddrLen * output_size, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X", kAddrLen * output_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%zu, ret:0x%X", kAddrLen * output_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
}
@@ -1254,7 +1249,7 @@ Status KernelTaskInfo::SetContext(const domi::KernelDef &kernel_def) {
ctx_.argsCount = context.args_count();
if (ctx_.argsCount == 0) {
REPORT_INNER_ERROR("E19999", "kernel_def.context.args_count is 0, check invalid");
GELOGE(INTERNAL_ERROR, "check argsCount fail:%u.", ctx_.argsCount);
GELOGE(INTERNAL_ERROR, "[Check][Param] argsCount is %u.", ctx_.argsCount);
return INTERNAL_ERROR;
}

@@ -1262,16 +1257,16 @@ Status KernelTaskInfo::SetContext(const domi::KernelDef &kernel_def) {
REPORT_INNER_ERROR("E19999", "param [context.args_offset().size():%zu / sizeof(uint16_t)] "
"is less than [ctx_.argsCount:%u], check invalid",
context.args_offset().size(), ctx_.argsCount);
GELOGE(PARAM_INVALID, "param [context.args_offset().size() / sizeof(uint16_t)] is less than [ctx_.argsCount]");
GELOGE(PARAM_INVALID, "[Check][Param] [context.args_offset().size():%zu / sizeof(uint16_t)] "
"is less than [ctx_.argsCount:%u], check invalid", context.args_offset().size(), ctx_.argsCount);
return PARAM_INVALID;
}

// ctx_.argsOffset stores the offset of the internal information of agrs_, equal to the ctx_.argsCount
ctx_.argsOffset = new (std::nothrow) uint16_t[ctx_.argsCount]();
if (ctx_.argsOffset == nullptr) {
REPORT_CALL_ERROR("E19999", "New ctx_.argsOffset fail, size:%u",
ctx_.argsCount);
GELOGE(PARAM_INVALID, "(param [ctx_.argsOffset] must not be null.");
REPORT_CALL_ERROR("E19999", "New ctx_.argsOffset fail, size:%u", ctx_.argsCount);
GELOGE(PARAM_INVALID, "[Malloc][Memory] failed, ctx_.argsOffset must not be null, size:%u", ctx_.argsCount);
return PARAM_INVALID;
}

@@ -1289,7 +1284,7 @@ void KernelTaskInfo::FreeRtMem(void **ptr) {
rtError_t ret = rtFree(*ptr);
if (ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtFree failed, ret:0x%X", ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", ret);
GELOGE(RT_FAILED, "[Call][RtFree] failed, ret:0x%X", ret);
}

*ptr = nullptr;
@@ -1307,7 +1302,7 @@ Status KernelTaskInfo::UpdateCceArgs(std::string &sm_desc, std::string &flowtabl
Status status =
CceUpdateKernelArgs(context, data_base_addr, weight_base_addr, var_base_addr, sm_desc, flowtable, kernel_def);
if (status != SUCCESS) {
GELOGE(status, "Call cce api failed");
GELOGE(status, "[Call][CceUpdateKernelArgs] failed, ret:%d", status);
return status;
}
return SUCCESS;
@@ -1336,9 +1331,8 @@ Status KernelTaskInfo::CceUpdateKernelArgs(const domi::KernelContext &context, u
if (handle == nullptr) {
error = mmDlerror();
GE_IF_BOOL_EXEC(error == nullptr, error = "");
REPORT_INNER_ERROR("E19999", "Failed in dlopen:%s, dlerror:%s",
canonicalPath.c_str(), error);
GELOGE(GE_PLGMGR_SO_NOT_EXIST, "Failed in dlopen %s! ", error);
REPORT_INNER_ERROR("E19999", "Failed in dlopen:%s, dlerror:%s", canonicalPath.c_str(), error);
GELOGE(GE_PLGMGR_SO_NOT_EXIST, "[Open][File] %s failed, reason:%s! ", canonicalPath.c_str(), error);
return FAILED;
}
ccStatus_t cc_ret;
@@ -1348,7 +1342,7 @@ Status KernelTaskInfo::CceUpdateKernelArgs(const domi::KernelContext &context, u
if (cceUpdateKernelArgs == nullptr) {
REPORT_INNER_ERROR("E19999", "No symbol:%s in %s, check invalid",
update_kernel_args.c_str(), canonicalPath.c_str());
GELOGE(FAILED, "Failed to invoke function ccUpdateKernelArgs");
GELOGE(FAILED, "[Invoke][Function] ccUpdateKernelArgs failed.");
if (mmDlclose(handle) != 0) {
error = mmDlerror();
GE_IF_BOOL_EXEC(error == nullptr, error = "");
@@ -1372,9 +1366,8 @@ Status KernelTaskInfo::CceUpdateKernelArgs(const domi::KernelContext &context, u
return FAILED;
}
if (cc_ret != CC_STATUS_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call cceUpdateKernelArgs fail, ret:0x%X",
cc_ret);
GELOGE(CCE_FAILED, "Call cce api failed, ret: 0x%X", cc_ret);
REPORT_CALL_ERROR("E19999", "Call cceUpdateKernelArgs fail, ret:0x%X", cc_ret);
GELOGE(CCE_FAILED, "[Call][CceUpdateKernelArgs] failed, ret:0x%X", cc_ret);
return CCE_FAILED;
}

@@ -1387,18 +1380,16 @@ Status KernelTaskInfo::SetFlowtable(std::string &flowtable, const domi::KernelDe
if (context.is_flowtable()) {
rtError_t rt_ret = rtMalloc(&flowtable_, flowtable.size(), RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X",
flowtable.size(), rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X", flowtable.size(), rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%zu, ret:0x%X", flowtable.size(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "flowtable refresh of cce scence.", flowtable.size())

rt_ret = rtMemcpy(flowtable_, flowtable.size(), flowtable.data(), flowtable.size(), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X",
flowtable.size(), rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret:0x%X", flowtable.size(), rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%zu, ret:0x%X", flowtable.size(), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -1412,7 +1403,8 @@ Status KernelTaskInfo::SetFlowtable(std::string &flowtable, const domi::KernelDe
"kernelDef.args().size():%zu, check invalid",
(uint32_t)((reinterpret_cast<uint16_t *>(const_cast<char *>(context.args_offset().data())))[0]),
sizeof(uint64_t), kernel_def.args().size());
GELOGE(FAILED, "(context.args_offset().data()))[0]:%u + sizeof(uint64_t):%zu > kernelDef.args().size():%zu",
GELOGE(FAILED, "[Check][Param] (context.args_offset().data()))[0]:%u + sizeof(uint64_t):%zu > "
"kernelDef.args().size():%zu",
(uint32_t)((reinterpret_cast<uint16_t *>(const_cast<char *>(context.args_offset().data())))[0]),
sizeof(uint64_t), kernel_def.args().size());
return FAILED;


+ 2
- 0
ge/graph/load/model_manager/task_info/kernel_task_info.h View File

@@ -145,6 +145,7 @@ class KernelTaskInfo : public TaskInfo {
bool IsMarkedFirstNode();
bool FirstCallSKTLaunchCheck();
bool DoubleCallSKTSaveCheck();
void SetArgs();

void *stub_func_;
void *args_;
@@ -169,6 +170,7 @@ class KernelTaskInfo : public TaskInfo {
uint16_t io_addr_offset_ = 0;
bool l2_buffer_on_ = false;
bool call_save_dump_ = false;
int32_t topic_type_flag_ = -1;

// aicpu ext_info device mem
void *aicpu_ext_info_addr_ = nullptr;


+ 10
- 9
ge/graph/load/model_manager/task_info/label_goto_ex_task_info.cc View File

@@ -40,7 +40,7 @@ Status LabelGotoExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u",
label_goto.op_index());
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range!", label_goto.op_index());
GELOGE(INTERNAL_ERROR, "[Get][Op] Task op index:%u out of range!", label_goto.op_index());
return INTERNAL_ERROR;
}

@@ -49,8 +49,8 @@ Status LabelGotoExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail",
ATTR_NAME_LABEL_SWITCH_INDEX.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "LabelGotoExTaskInfo: %s attr [%s] not exist.",
op_desc->GetName().c_str(), ATTR_NAME_LABEL_SWITCH_INDEX.c_str());
GELOGE(INTERNAL_ERROR, "[Get][Attr] %s in op:%s(%s) fail.",
ATTR_NAME_LABEL_SWITCH_INDEX.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str());
return INTERNAL_ERROR;
}

@@ -63,7 +63,8 @@ Status LabelGotoExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed for op:%s(%s), size:%lu, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), sizeof(uint64_t), rt_ret);
GELOGE(RT_FAILED, "Call rtMalloc failed, error: %#x", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed for op:%s(%s), size:%lu, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), sizeof(uint64_t), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -72,7 +73,8 @@ Status LabelGotoExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed for op:%s(%s), size:%lu, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), sizeof(uint64_t), rt_ret);
GELOGE(RT_FAILED, "Call rtMemcpy failed, error: %#x", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed for op:%s(%s), size:%lu, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), sizeof(uint64_t), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -86,15 +88,14 @@ Status LabelGotoExTaskInfo::Distribute() {
GE_CHECK_NOTNULL(index_value_);
if (args_size_ == 0) {
REPORT_INNER_ERROR("E19999", "Param args_size_ is 0, check fail");
GELOGE(PARAM_INVALID, "branch max: %u, args size: %u invalid.", kGotoBranchMax, args_size_);
GELOGE(PARAM_INVALID, "[Check][Param] branch max:%u, args size:%u invalid.", kGotoBranchMax, args_size_);
return PARAM_INVALID;
}

rtError_t rt_ret = rtLabelSwitchByIndex(index_value_, kGotoBranchMax, args_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtLabelSwitchByIndex failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtLabelSwitchByIndex failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtLabelSwitchByIndex] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}



+ 7
- 8
ge/graph/load/model_manager/task_info/label_set_task_info.cc View File

@@ -32,9 +32,8 @@ Status LabelSetTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
const domi::LabelSetDef &label_set = task_def.label_set();
OpDescPtr op_desc = davinci_model->GetOpByIndex(label_set.op_index());
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u",
label_set.op_index());
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range!", label_set.op_index());
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u", label_set.op_index());
GELOGE(INTERNAL_ERROR, "[Get][Op] Task op index:%u out of range!", label_set.op_index());
return INTERNAL_ERROR;
}

@@ -43,7 +42,7 @@ Status LabelSetTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail",
ATTR_NAME_LABEL_SWITCH_INDEX.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "LabelSetTaskInfo: %s attr [%s] not exist.",
GELOGE(INTERNAL_ERROR, "[Get][Attr] LabelSetTaskInfo:%s attr [%s] not exist.",
op_desc->GetName().c_str(), ATTR_NAME_LABEL_SWITCH_INDEX.c_str());
return INTERNAL_ERROR;
}
@@ -53,7 +52,8 @@ Status LabelSetTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
REPORT_INNER_ERROR("E19999", "lable_index:%u >= label_list.size():%zu in model, op:%s(%s), "
"check invalid", label_index, label_list.size(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "LabelSetTaskInfo: Invalid label id:%u, label size:%zu", label_index, label_list.size());
GELOGE(INTERNAL_ERROR, "[Check][Param] LabelSetTaskInfo: Invalid label id:%u, label size:%zu, op:%s(%s)",
label_index, label_list.size(), op_desc->GetName().c_str(), op_desc->GetType().c_str());
return INTERNAL_ERROR;
}
label_ = label_list[label_index];
@@ -66,9 +66,8 @@ Status LabelSetTaskInfo::Distribute() {
GELOGI("LabelSetTaskInfo Distribute Start.");
rtError_t rt_ret = rtLabelSet(label_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtLabelSet failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtLabelSet failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtLabelSet] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}



+ 18
- 21
ge/graph/load/model_manager/task_info/label_switch_by_index_task_info.cc View File

@@ -39,9 +39,8 @@ Status LabelSwitchByIndexTaskInfo::Init(const domi::TaskDef &task_def, DavinciMo
const domi::LabelSwitchByIndexDef &label_switch = task_def.label_switch_by_index();
OpDescPtr op_desc = davinci_model->GetOpByIndex(label_switch.op_index());
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u",
label_switch.op_index());
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range!", label_switch.op_index());
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u", label_switch.op_index());
GELOGE(INTERNAL_ERROR, "[Get][Op] Task op index:%u out of range!", label_switch.op_index());
return INTERNAL_ERROR;
}

@@ -52,7 +51,7 @@ Status LabelSwitchByIndexTaskInfo::Init(const domi::TaskDef &task_def, DavinciMo
REPORT_INNER_ERROR("E19999", "input_data_addr size:%zu != kLabelSwitchIndexNum:%u, op:%s(%s), "
"check invalid", input_data_addr.size(), kLabelSwitchIndexNum,
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndexTaskInfo: %s invalid addr size: %zu, num: %u!",
GELOGE(INTERNAL_ERROR, "[Check][Param] %s invalid addr size:%zu, num:%u!",
op_desc->GetName().c_str(), input_data_addr.size(), kLabelSwitchIndexNum);
return INTERNAL_ERROR;
}
@@ -70,17 +69,16 @@ Status LabelSwitchByIndexTaskInfo::Init(const domi::TaskDef &task_def, DavinciMo
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail",
ATTR_NAME_LABEL_SWITCH_LIST.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndexTaskInfo: %s Get attr %s failed.", op_desc->GetName().c_str(),
ATTR_NAME_LABEL_SWITCH_LIST.c_str());
GELOGE(INTERNAL_ERROR, "[Get][Attr] %s in op:%s(%s) failed.", ATTR_NAME_LABEL_SWITCH_LIST.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return INTERNAL_ERROR;
}

if (label_idx_list.empty() || label_idx_list.size() != branch_max_) {
REPORT_INNER_ERROR("E19999", "label_idx_list in op:%s(%s) is empty, or size:%zu != branch_max_:%u"
"check invalid",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
"check invalid", op_desc->GetName().c_str(), op_desc->GetType().c_str(),
label_idx_list.size(), branch_max_);
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndexTaskInfo: %s label index size: %zu, task branch max: %u.",
GELOGE(INTERNAL_ERROR, "[Check][Param] %s label index size:%zu, task branch max:%u.",
op_desc->GetName().c_str(), label_idx_list.size(), branch_max_);
return INTERNAL_ERROR;
}
@@ -93,7 +91,7 @@ Status LabelSwitchByIndexTaskInfo::Init(const domi::TaskDef &task_def, DavinciMo
REPORT_INNER_ERROR("E19999", "label_id:%u in op:%s(%s) >= label_list.size():%zu in model"
"check invalid", label_id,
op_desc->GetName().c_str(), op_desc->GetType().c_str(), label_list.size());
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndexTaskInfo: %s index: %zu, label index: %u, model label size: %zu.",
GELOGE(INTERNAL_ERROR, "[Check][Param] %s index:%zu, label index:%u, model label size:%zu.",
op_desc->GetName().c_str(), idx, label_id, label_list.size());
return INTERNAL_ERROR;
}
@@ -108,15 +106,15 @@ Status LabelSwitchByIndexTaskInfo::Init(const domi::TaskDef &task_def, DavinciMo
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size_, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed for op:%s(%s), size:%u, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size_, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

rt_ret = rtLabelListCpy(label_used.data(), label_used.size(), args_, args_size_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtLabelListCpy failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtLabelListCpy failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtLabelListCpy] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -129,17 +127,15 @@ Status LabelSwitchByIndexTaskInfo::Distribute() {
GE_CHECK_NOTNULL(args_);
GE_CHECK_NOTNULL(index_value_);
if (branch_max_ == 0 || args_size_ == 0) {
REPORT_INNER_ERROR("E19999", "branch_max_:%u or args_size_:%u is 0"
"check invalid", branch_max_, args_size_);
GELOGE(PARAM_INVALID, "branch max: %u, args size: %u invalid.", branch_max_, args_size_);
REPORT_INNER_ERROR("E19999", "branch_max_:%u or args_size_:%u is 0, check invalid", branch_max_, args_size_);
GELOGE(PARAM_INVALID, "[Check][Param] branch max:%u, args size:%u invalid.", branch_max_, args_size_);
return PARAM_INVALID;
}

rtError_t rt_ret = rtLabelSwitchByIndex(index_value_, branch_max_, args_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtLabelSwitchByIndex failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtLabelSwitchByIndex failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtLabelSwitchByIndex] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -159,7 +155,8 @@ Status LabelSwitchByIndexTaskInfo::CalculateArgs(const domi::TaskDef &task_def,
REPORT_INNER_ERROR("E19999", "input size:%zu in op:%s(%s) != kLabelSwitchIndexNum"
"check invalid", op_desc->GetInputsSize(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Label switch op only have one data input. Now input size is %zu", op_desc->GetInputsSize());
GELOGE(FAILED, "[Check][Param] Label switch op:%s(%s) only have one data input. Now input size is %zu",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), op_desc->GetInputsSize());
return FAILED;
}
string input_tensor_name = op_desc->GetName();


+ 8
- 8
ge/graph/load/model_manager/task_info/memcpy_addr_async_task_info.cc View File

@@ -36,9 +36,8 @@ Status MemcpyAddrAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel
const auto &memcpy_async = task_def.memcpy_async();
OpDescPtr op_desc = davinci_model->GetOpByIndex(memcpy_async.op_index());
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u",
memcpy_async.op_index());
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range", memcpy_async.op_index());
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u", memcpy_async.op_index());
GELOGE(INTERNAL_ERROR, "[Get][Op] Task op index:%u out of range", memcpy_async.op_index());
return INTERNAL_ERROR;
}

@@ -66,7 +65,8 @@ Status MemcpyAddrAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed for op:%s(%s), size:%lu, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
args_size + kAlignBytes, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed for op:%s(%s), size:%lu, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size + kAlignBytes, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -78,7 +78,8 @@ Status MemcpyAddrAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed for op:%s(%s), size:%zu, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size, rt_ret);
GELOGE(RT_FAILED, "Call rt api for src failed, ret: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] for src failed for op:%s(%s), size:%zu, ret:0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -98,9 +99,8 @@ Status MemcpyAddrAsyncTaskInfo::Distribute() {
rtError_t rt_ret = rtMemcpyAsync(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(args_align_) + sizeof(void *)),
dst_max_, args_align_, count_, static_cast<rtMemcpyKind_t>(kind_), stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpyAsync failed, size:%lu, ret:0x%X",
dst_max_, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpyAsync failed, size:%lu, ret:0x%X", dst_max_, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpyAsync] failed, size:%lu, ret:0x%X", dst_max_, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}



+ 6
- 8
ge/graph/load/model_manager/task_info/memcpy_async_task_info.cc View File

@@ -36,9 +36,8 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
dst_max_ = memcpy_async.dst_max();
OpDescPtr op_desc = davinci_model_->GetOpByIndex(memcpy_async.op_index());
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u",
memcpy_async.op_index());
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range", memcpy_async.op_index());
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u", memcpy_async.op_index());
GELOGE(INTERNAL_ERROR, "[Get][Op] Task op index:%u out of range", memcpy_async.op_index());
return INTERNAL_ERROR;
}

@@ -47,7 +46,7 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
dst_ = reinterpret_cast<uint8_t *>(reinterpret_cast<uintptr_t>(src_) + sizeof(void *));
// for zero copy
kind_ = RT_MEMCPY_ADDR_DEVICE_TO_DEVICE;
GE_CHK_STATUS_RET(SetIoAddrs(op_desc, memcpy_async), "Set addrs failed");
GE_CHK_STATUS_RET(SetIoAddrs(op_desc, memcpy_async), "[Set][Addrs] failed, op:%s", op_desc->GetName().c_str());
GELOGI("MemcpyAsyncTaskInfo op name %s, src_ %p, dst_ %p, args_offset %u.",
op_desc->GetName().c_str(), src_, dst_, args_offset_);
return SUCCESS;
@@ -77,7 +76,7 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da

davinci_model_->DisableZeroCopy(src_);
davinci_model_->DisableZeroCopy(dst_);
GE_CHK_STATUS_RET(SetIoAddrs(op_desc, memcpy_async), "Set addrs failed");
GE_CHK_STATUS_RET(SetIoAddrs(op_desc, memcpy_async), "[Set][Addrs] failed, op:%s", op_desc->GetName().c_str());
GELOGI("MemcpyAsyncTaskInfo Init Success, logic[0x%lx, 0x%lx], src:%p, dst:%p, max:%lu, count:%lu",
memcpy_async.src(), memcpy_async.dst(), src_, dst_, dst_max_, count_);
return SUCCESS;
@@ -88,9 +87,8 @@ Status MemcpyAsyncTaskInfo::Distribute() {

rtError_t rt_ret = rtMemcpyAsync(dst_, dst_max_, src_, count_, static_cast<rtMemcpyKind_t>(kind_), stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpyAsync failed, size:%lu, ret:0x%X",
dst_max_, rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpyAsync failed, size:%lu, ret:0x%X", dst_max_, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpyAsync] failed, size:%lu, ret:0x%X", dst_max_, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}



+ 4
- 5
ge/graph/load/model_manager/task_info/model_exit_task_info.cc View File

@@ -25,13 +25,13 @@ Status ModelExitTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davi
GELOGI("InitModelExitTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model is null!");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model is null!");
return PARAM_INVALID;
}

Status ret = SetStream(task_def.stream_id(), davinci_model->GetStreamList());
if (ret != SUCCESS) {
GELOGE(ret, "SetStream fail, stream_id:%u", task_def.stream_id());
GELOGE(ret, "[Set][Stream] fail, stream_id:%u", task_def.stream_id());
return ret;
}

@@ -44,9 +44,8 @@ Status ModelExitTaskInfo::Distribute() {
GELOGI("ModelExitTaskInfo Distribute Start.");
rtError_t rt_ret = rtModelExit(model_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtModelExit failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rtModelExit failed, ret: 0x%x", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtModelExit failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtModelExit] failed, ret:0x%x", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
GELOGI("ModelExitTaskInfo Distribute Success.");


+ 4
- 4
ge/graph/load/model_manager/task_info/profiler_trace_task_info.cc View File

@@ -24,7 +24,7 @@ Status ProfilerTraceTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *
GELOGI("ProfilerTraceTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model is null!");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model is null!");
return PARAM_INVALID;
}

@@ -47,9 +47,9 @@ Status ProfilerTraceTaskInfo::Distribute() {

rtError_t rt_ret = rtProfilerTrace(log_id_, notify_, flat_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtProfilerTrace failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtProfilerTrace failed, ret:0x%X, logid:%lu. notify:%d",
rt_ret, log_id_, notify_);
GELOGE(RT_FAILED, "[Call][RtProfilerTrace] failed, ret:0x%X, logid:%lu. notify:%d", rt_ret, log_id_, notify_);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}



+ 11
- 11
ge/graph/load/model_manager/task_info/stream_active_task_info.cc View File

@@ -27,7 +27,7 @@ Status StreamActiveTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *d
GELOGI("StreamActiveTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model is null!");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model is null!");
return PARAM_INVALID;
}

@@ -46,10 +46,10 @@ Status StreamActiveTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *d
GE_CHECK_NOTNULL(op_desc);
std::vector<uint32_t> active_stream_index_list;
if (!AttrUtils::GetListInt(op_desc, ATTR_NAME_ACTIVE_STREAM_LIST, active_stream_index_list)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail",
ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "StreamActiveOp get attr ACTIVE_STREAM fail, node name:%s.", op_desc->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Get][Attr] %s in op:%s(%s) fail", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return INTERNAL_ERROR;
}

@@ -57,8 +57,8 @@ Status StreamActiveTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *d
REPORT_INNER_ERROR("E19999", "flowctrl index:%u >= active_stream_list size:%zu in op:%s(%s), "
"check invalid", internal_index, active_stream_index_list.size(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "InitStreamSwitchTaskInfo stream id index invalid. index:%u, list size:%zu.", internal_index,
active_stream_index_list.size());
GELOGE(INTERNAL_ERROR, "[Check][Param] stream id index invalid. index:%u, list size:%zu, op:%s(%s).",
internal_index, active_stream_index_list.size(), op_desc->GetName().c_str(), op_desc->GetType().c_str());
return INTERNAL_ERROR;
}

@@ -66,8 +66,9 @@ Status StreamActiveTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *d
REPORT_INNER_ERROR("E19999", "active_stream_index:%u in op:%s(%s) >= stream size:%zu in model, "
"check invalid", active_stream_index_list[internal_index],
op_desc->GetName().c_str(), op_desc->GetType().c_str(), davinci_model->GetStreamList().size());
GELOGE(INTERNAL_ERROR, "InitStreamSwitchTaskInfo stream index invalid. index:%u, stream list size:%zu.",
active_stream_index_list[internal_index], davinci_model->GetStreamList().size());
GELOGE(INTERNAL_ERROR, "[Check][Param] active_stream_index:%u in op:%s(%s) >= stream size:%zu in model",
active_stream_index_list[internal_index], op_desc->GetName().c_str(), op_desc->GetType().c_str(),
davinci_model->GetStreamList().size());
return INTERNAL_ERROR;
}

@@ -83,9 +84,8 @@ Status StreamActiveTaskInfo::Distribute() {
GELOGI("StreamActiveTaskInfo Distribute Start.");
rtError_t rt_ret = rtStreamActive(active_stream_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtStreamActive failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtStreamActive failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtStreamActive] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}



+ 24
- 21
ge/graph/load/model_manager/task_info/stream_switch_task_info.cc View File

@@ -32,7 +32,7 @@ Status StreamSwitchTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *d
GELOGI("StreamSwitchTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr");
GELOGE(PARAM_INVALID, "davinci_model is null!");
GELOGE(PARAM_INVALID, "[Check][Param] davinci_model is null!");
return PARAM_INVALID;
}

@@ -50,10 +50,10 @@ Status StreamSwitchTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *d
SetInputAndValuePtr(davinci_model, input_data_addr);
uint32_t cond = 0;
if (!AttrUtils::GetInt(op_desc, ATTR_NAME_STREAM_SWITCH_COND, cond)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail",
ATTR_NAME_STREAM_SWITCH_COND.c_str(),
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail", ATTR_NAME_STREAM_SWITCH_COND.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "StreamSwitchOp get attr STREAM_SWITCH_COND fail.");
GELOGE(INTERNAL_ERROR, "[Get][Attr] %s in op:%s(%s) fail",
ATTR_NAME_STREAM_SWITCH_COND.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str());
return INTERNAL_ERROR;
}
cond_ = static_cast<rtCondition_t>(cond);
@@ -63,17 +63,18 @@ Status StreamSwitchTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *d
REPORT_INNER_ERROR("E19999", "input_data_addr.size():%zu or input size:%zu != STREAM_SWITCH_INPUT_NUM:%u "
"in op:%s(%s), check invalid", input_data_addr.size(), input_size,
STREAM_SWITCH_INPUT_NUM, op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Input num should be %u. inputAddr size:%zu, inputDesc size:%zu.",
STREAM_SWITCH_INPUT_NUM, input_data_addr.size(), input_size);
GELOGE(INTERNAL_ERROR, "[Check][Param] Input num should be %u. inputAddr size:%zu, inputDesc size:%zu, op:%s(%s).",
STREAM_SWITCH_INPUT_NUM, input_data_addr.size(), input_size,
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return INTERNAL_ERROR;
}

vector<uint32_t> active_stream_list;
if (!AttrUtils::GetListInt(op_desc, ATTR_NAME_ACTIVE_STREAM_LIST, active_stream_list)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail",
ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "StreamSwitchOp get attr ACTIVE_STREAM_LIST fail.");
GELOGE(INTERNAL_ERROR, "[Get][Attr] %s in op:%s(%s) fail",
ATTR_NAME_ACTIVE_STREAM_LIST.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str());
return INTERNAL_ERROR;
}

@@ -81,17 +82,19 @@ Status StreamSwitchTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *d
REPORT_INNER_ERROR("E19999", "active_stream_list.size():%zu in op:%s(%s) != kTrueBranchStreamNum:%u, "
"check invalid", active_stream_list.size(),
op_desc->GetName().c_str(), op_desc->GetType().c_str(), kTrueBranchStreamNum);
GELOGE(FAILED, "Stream num of switch true branch must be %u.", kTrueBranchStreamNum);
GELOGE(FAILED, "[Check][Param] active_stream_list.size():%zu in op:%s(%s) must be equal %u",
active_stream_list.size(), op_desc->GetName().c_str(), op_desc->GetType().c_str(), kTrueBranchStreamNum);
return FAILED;
}

size_t true_stream_index = active_stream_list.front();
if (true_stream_index >= davinci_model->GetStreamList().size()) {
REPORT_INNER_ERROR("E19999", "active_stream_index:%zu in op:%s(%s) >= stream list size:%zu in model,"
"check invalid", true_stream_index,
op_desc->GetName().c_str(), op_desc->GetType().c_str(), davinci_model->GetStreamList().size());
GELOGE(INTERNAL_ERROR, "InitStreamSwitchTaskInfo stream index invalid. index:%zu, stream list size:%zu.",
true_stream_index, davinci_model->GetStreamList().size());
"check invalid", true_stream_index, op_desc->GetName().c_str(), op_desc->GetType().c_str(),
davinci_model->GetStreamList().size());
GELOGE(INTERNAL_ERROR, "[Check][Param] active_stream_index:%zu in op:%s(%s) >= stream list size:%zu in model",
true_stream_index, op_desc->GetName().c_str(), op_desc->GetType().c_str(),
davinci_model->GetStreamList().size());
return INTERNAL_ERROR;
}

@@ -103,10 +106,10 @@ Status StreamSwitchTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *d
if (op_desc->HasAttr(ATTR_NAME_SWITCH_DATA_TYPE)) {
int64_t data_type = 0;
if (!AttrUtils::GetInt(op_desc, ATTR_NAME_SWITCH_DATA_TYPE, data_type)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail",
ATTR_NAME_SWITCH_DATA_TYPE.c_str(),
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail", ATTR_NAME_SWITCH_DATA_TYPE.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "StreamSwitchOp[node:%s] get attr SWITCH_DATA_TYPE fail.", op_desc->GetName().c_str());
GELOGE(FAILED, "[Get][Attr] %s in op:%s(%s) fail",
ATTR_NAME_SWITCH_DATA_TYPE.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED;
}
data_type_ = static_cast<rtSwitchDataType_t>(data_type);
@@ -122,9 +125,8 @@ Status StreamSwitchTaskInfo::Distribute() {
GELOGI("StreamSwitchTaskInfo Distribute Start.");
rtError_t rt_ret = rtStreamSwitchEx(input_ptr_, cond_, value_ptr_, true_stream_, stream_, data_type_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtStreamSwitchEx fail, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtStreamSwitchEx fail, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtStreamSwitchEx] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -143,7 +145,8 @@ Status StreamSwitchTaskInfo::CalculateArgs(const domi::TaskDef &task_def, Davinc
REPORT_INNER_ERROR("E19999", "input size:%zu in op:%s(%s) != STREAM_SWITCH_INPUT_NUM:%u,"
"check invalid", op_desc->GetInputsSize(),
op_desc->GetName().c_str(), op_desc->GetType().c_str(), STREAM_SWITCH_INPUT_NUM);
GELOGE(FAILED, "Stream switch op only have one data input. Now input size is %zu", op_desc->GetInputsSize());
GELOGE(FAILED, "[Check][Param] Stream switch op:%s only have one data input. Now input size is %zu",
op_desc->GetName().c_str(), op_desc->GetInputsSize());
return FAILED;
}
for (uint32_t i = 0; i < STREAM_SWITCH_INPUT_NUM; ++i) {


+ 24
- 24
ge/graph/load/model_manager/task_info/stream_switchn_task_info.cc View File

@@ -36,9 +36,8 @@ Status StreamSwitchNTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *
auto stream_switchn_def = task_def.stream_switch_n();
OpDescPtr op_desc = davinci_model->GetOpByIndex(stream_switchn_def.op_index());
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u",
stream_switchn_def.op_index());
GELOGE(FAILED, "Index is out of range, index: %u", stream_switchn_def.op_index());
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u", stream_switchn_def.op_index());
GELOGE(FAILED, "[Get][Op] failed, as Index is out of range, index:%u", stream_switchn_def.op_index());
return FAILED;
}

@@ -51,7 +50,8 @@ Status StreamSwitchNTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *
REPORT_INNER_ERROR("E19999", "task_Def.stream_switch_n.target_value:%d in op:%s(%s) is 0,"
"check invalid", value.size(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "The number of gears in dynamic batch scenario can not be 0.");
GELOGE(FAILED, "[Check][Param] The number of gears in dynamic batch scenario can not be 0, op:%s.",
op_desc->GetName().c_str());
return FAILED;
}
for (int i = 0; i < value.size(); ++i) {
@@ -62,15 +62,15 @@ Status StreamSwitchNTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *

// set element_size_
if (!AttrUtils::GetInt(op_desc, ATTR_NAME_BATCH_NUM, element_size_)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail",
ATTR_NAME_BATCH_NUM.c_str(),
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail", ATTR_NAME_BATCH_NUM.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Get ATTR_NAME_BATCH_NUM of switchN op failed.");
GELOGE(FAILED, "[Get][Attr] %s in op:%s(%s) fail",
ATTR_NAME_BATCH_NUM.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED;
}

if (GetTrueStreamPtr(op_desc, davinci_model) != SUCCESS) {
GELOGE(FAILED, "Get true stream ptr of switchN op failed.");
GELOGE(FAILED, "[Get][TrueStreamPtr] of switchN op:%s failed.", op_desc->GetName().c_str());
return FAILED;
}

@@ -92,9 +92,8 @@ Status StreamSwitchNTaskInfo::Distribute() {
rtError_t rt_ret =
rtStreamSwitchN(input_ptr_, input_size_, value_ptr_, true_stream_ptr_, element_size_, stream_, data_type_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtStreamSwitchN failed, ret:0x%X",
rt_ret);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtStreamSwitchN failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtStreamSwitchN] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

@@ -106,10 +105,10 @@ Status StreamSwitchNTaskInfo::Distribute() {
Status StreamSwitchNTaskInfo::GetTrueStreamPtr(const OpDescPtr &op_desc, DavinciModel *davinci_model) {
vector<uint32_t> true_stream_id_list;
if (!AttrUtils::GetListInt(op_desc, ATTR_NAME_ACTIVE_STREAM_LIST, true_stream_id_list)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail",
ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "StreamSwitchNOp get attr ACTIVE_STREAM_LIST fail.");
GELOGE(FAILED, "[Get][Attr] %s in op:%s(%s) fail",
ATTR_NAME_ACTIVE_STREAM_LIST.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED;
}

@@ -118,9 +117,8 @@ Status StreamSwitchNTaskInfo::GetTrueStreamPtr(const OpDescPtr &op_desc, Davinci
"check invalid", true_stream_id_list.size(),
op_desc->GetName().c_str(), op_desc->GetType().c_str(), davinci_model->GetStreamList().size());
GELOGE(FAILED,
"InitStreamSwitchNTaskInfo get true stream id list failed. true stream size:%zu, "
"stream list size:%zu.",
true_stream_id_list.size(), davinci_model->GetStreamList().size());
"[Check][Param] InitStreamSwitchNTaskInfo get true stream id list failed. true stream size:%zu, "
"stream list size:%zu.", true_stream_id_list.size(), davinci_model->GetStreamList().size());
return FAILED;
}

@@ -131,8 +129,8 @@ Status StreamSwitchNTaskInfo::GetTrueStreamPtr(const OpDescPtr &op_desc, Davinci
REPORT_INNER_ERROR("E19999", "active_stream_id:%u in op:%s(%s) >= stream list size:%zu in model,"
"check invalid", true_stream_id,
op_desc->GetName().c_str(), op_desc->GetType().c_str(), davinci_model->GetStreamList().size());
GELOGE(FAILED, "InitStreamSwitchNTaskInfo stream id invalid. id:%u, stream list size:%zu.", true_stream_id,
davinci_model->GetStreamList().size());
GELOGE(FAILED, " [Check][Param] stream id:%u in op:%s invalid, stream list size:%zu.",
true_stream_id, op_desc->GetName().c_str(), davinci_model->GetStreamList().size());
return FAILED;
}
rtStream_t true_stream = davinci_model->GetStreamList()[true_stream_id];
@@ -144,7 +142,7 @@ Status StreamSwitchNTaskInfo::GetTrueStreamPtr(const OpDescPtr &op_desc, Davinci
REPORT_INNER_ERROR("E19999", "active_stream_list.size():%zu in op:%s(%s) is empty, "
"check invalid", true_stream_id_list.size(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "true stream list is null.");
GELOGE(FAILED, "[Check][Param] true stream list is null, op:%s.", op_desc->GetName().c_str());
return FAILED;
}
true_stream_ptr_ = &true_stream_list_[0];
@@ -160,10 +158,11 @@ Status StreamSwitchNTaskInfo::CalculateArgs(const domi::TaskDef &task_def, Davin
GE_CHECK_NOTNULL(op_desc);
GELOGI("Calc opType[%s] args size. Node name is [%s]", op_desc->GetType().c_str(), op_desc->GetName().c_str());
if (op_desc->GetInputsSize() != kStreamSwitchnInputNum) {
REPORT_INNER_ERROR("E19999", "input size:%zu in op:%s(%s) != kStreamSwitchnInputNum:%u ,"
REPORT_INNER_ERROR("E19999", "input size:%zu in op:%s(%s) != kStreamSwitchnInputNum:%u, "
"check invalid", op_desc->GetInputsSize(),
op_desc->GetName().c_str(), op_desc->GetType().c_str(), kStreamSwitchnInputNum);
GELOGE(FAILED, "Stream switchn op only have one data input. Now input size is %zu", op_desc->GetInputsSize());
GELOGE(FAILED, "[Check][Param] Stream switchn op:%s only have one data input. Now input size is %zu",
op_desc->GetName().c_str(), op_desc->GetInputsSize());
return FAILED;
}
string input_tensor_name = op_desc->GetInputNameByIndex(0);
@@ -187,7 +186,8 @@ Status StreamSwitchNTaskInfo::InputPtrUpdate(const OpDescPtr &op_desc, DavinciMo
REPORT_INNER_ERROR("E19999", "input_offset size:%zu or input_length.size:%zu in op:%s(%s) is empty,"
"check invalid", input_offset.size(), input_legnth.size(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "input offset size %zu, input legnth size: %zu", input_offset.size(), input_legnth.size());
GELOGE(FAILED, "[Check][Param] op:%s input offset size %zu, input legnth size:%zu",
op_desc->GetName().c_str(), input_offset.size(), input_legnth.size());
return FAILED;
}
const RuntimeParam &rts_param = davinci_model->GetRuntimeParam();
@@ -201,7 +201,7 @@ Status StreamSwitchNTaskInfo::InputPtrUpdate(const OpDescPtr &op_desc, DavinciMo
REPORT_INNER_ERROR("E19999", "input_data_addr size:%zu in op:%s(%s) is empty,"
"check invalid", input_data_addr.size(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "input data addr is empty");
GELOGE(FAILED, "[Check][Param] input data addr is empty in op:%s", op_desc->GetName().c_str());
return FAILED;
}
input_ptr_ = input_data_addr[0];


+ 5
- 7
ge/graph/load/model_manager/task_info/super_kernel/super_kernel.cc View File

@@ -27,23 +27,21 @@ Status SuperKernel::Launch(rtStream_t stream, uint32_t dump_flag) {

rtError_t rt_ret = rtMalloc(reinterpret_cast<void **>(&device_args_addr_), sizeof(args), RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%lu, ret:0x%X",
sizeof(args), rt_ret);
GELOGE(RT_FAILED, "rtMalloc failied. error: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%lu, ret:0x%X", sizeof(args), rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failied, size:%lu, ret:0x%X", sizeof(args), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)
rt_ret = rtMemcpy(reinterpret_cast<void *>(device_args_addr_), sizeof(args), reinterpret_cast<void *>(args),
sizeof(args), RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%lu, ret:0x%X",
sizeof(args), rt_ret);
GELOGE(RT_FAILED, "rtMemcpy failied. error: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%lu, ret:0x%X", sizeof(args), rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failied, size:%lu, ret:0x%X", sizeof(args), rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)
rt_ret = rtKernelLaunchWithFlag((void *const)func_stub_, block_dim_, device_args_addr_, sizeof(args), NULL, stream,
dump_flag);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchWithFlag failed, dump_flag:%u, ret:0x%X",
dump_flag, rt_ret);
GELOGE(RT_FAILED, "rtKernelLaunchWithFlag failied. error: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtKernelLaunchWithFlag] failied. error: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)
return SUCCESS;
}


+ 9
- 11
ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc View File

@@ -33,20 +33,20 @@ Status SuperKernelFactory::Init() {
if (handle_ == nullptr) {
const char* error = mmDlerror();
GE_IF_BOOL_EXEC(error == nullptr, error = "");
GELOGE(FAILED, "SKT: open skt lib failed, please check LD_LIBRARY_PATH. errmsg:%s", error);
GELOGE(FAILED, "[Open][SktLib] failed, please check LD_LIBRARY_PATH. errmsg:%s", error);
}
rtError_t rt_ret;
rt_ret = rtGetFunctionByName(this->sk_stub_name_.c_str(), &this->func_stub_);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtGetFunctionByName failed, stub_func:%s, ret:0x%X",
this->sk_stub_name_.c_str(), rt_ret);
GELOGE(RT_FAILED, "rtGetFunctionByName failed. stub_func: %s, please export LD_LIBRARY_PATH for "
"libcce_aicore.so", this->sk_stub_name_.c_str());
GELOGE(RT_FAILED, "[Call][RtGetFunctionByName] failed. stub_func:%s, "
"please export LD_LIBRARY_PATH for libcce_aicore.so", this->sk_stub_name_.c_str());
return RT_ERROR_TO_GE_STATUS(rt_ret);)
rt_ret = rtGetAddrByFun(this->func_stub_, &this->func_ptr_);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtGetAddrByFun failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "rtGetAddrByFun failed. error: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtGetAddrByFun] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)
GELOGD(
"SKT: fuseKernels super_kernel_template subFunc %p, device func "
@@ -104,7 +104,7 @@ Status SuperKernelFactory::FuseKernels(const std::vector<void *> &stub_func_list
rt_ret = rtGetAddrByFun(stub_func_list[i], &sub_device_func);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtGetAddrByFun failed, ret:0x%X", rt_ret);
GELOGE(RT_FAILED, "rtGetAddrByFun failed. error: 0x%X", rt_ret);
GELOGE(RT_FAILED, "[Call][RtGetAddrByFun] failed, ret:0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)
GELOGD("SKT: fuseKernels subFunc %p, device func address %p", stub_func_list[i], sub_device_func);
// store two uint64_t address
@@ -116,16 +116,14 @@ Status SuperKernelFactory::FuseKernels(const std::vector<void *> &stub_func_list
}
rt_ret = rtMalloc(reinterpret_cast<void **>(&hbm_nav_table_addr), nav_table_size, RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%lu, ret:0x%X",
nav_table_size, rt_ret);
GELOGE(RT_FAILED, "rtMalloc failed. error: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%lu, ret:0x%X", nav_table_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMalloc] failed, size:%lu, ret:0x%X", nav_table_size, rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)
rt_ret = rtMemcpy(reinterpret_cast<void *>(hbm_nav_table_addr), nav_table_size,
reinterpret_cast<void *>(nav_table.get()), nav_table_size, RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%lu, ret:0x%X",
nav_table_size, rt_ret);
GELOGE(RT_FAILED, "rtMemcpy failed. error: 0x%X", rt_ret);
REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%lu, ret:0x%X", nav_table_size, rt_ret);
GELOGE(RT_FAILED, "[Call][RtMemcpy] failed, size:%lu, ret:0x%X", nav_table_size, rt_ret);
GE_CHK_RT(rtFree(hbm_nav_table_addr)); return RT_ERROR_TO_GE_STATUS(rt_ret);)
// Create the necessary metadata for the super kernel
h =


+ 1
- 1
ge/graph/load/model_manager/task_info/task_info.cc View File

@@ -27,7 +27,7 @@ Status TaskInfo::SetStream(uint32_t stream_id, const std::vector<rtStream_t> &st
} else {
REPORT_INNER_ERROR("E19999", "stream_id:%u >= stream_list.size(): %zu, check invalid",
stream_id, stream_list.size());
GELOGE(FAILED, "index: %u >= stream_list.size(): %zu.", stream_id, stream_list.size());
GELOGE(FAILED, "[Check][Param] index:%u >= stream_list.size():%zu.", stream_id, stream_list.size());
return FAILED;
}



+ 18
- 2
ge/graph/load/model_manager/task_info/task_info.h View File

@@ -18,6 +18,7 @@
#define GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_TASK_INFO_H_

#include <vector>
#include <sstream>

#include "cce/customize.h"
#include "framework/common/taskdown_common.h"
@@ -28,9 +29,11 @@

namespace ge {
struct MemInfo {
uint64_t memory_size = 0;
size_t memory_size = 0;
uint64_t logic_memory_base = 0;
uint8_t *memory_base = nullptr;
uint32_t memory_type = RT_MEMORY_HBM;
std::string memory_key = "";
};

struct RuntimeParam {
@@ -40,6 +43,19 @@ struct RuntimeParam {
}
~RuntimeParam() = default;

std::string ToString() {
std::stringstream ss;
ss << "session_id:" << session_id << ", stream_num:" << stream_num << ", event_num:" << event_num
<< ", label_num:" << label_num << ", logic_mem_base:" << logic_mem_base
<< ", logic_weight_base:" << logic_weight_base << ", logic_var_base:" << logic_var_base
<< ", memory_size:" << mem_size << ", weight_size:" << weight_size << ", var_size:" << var_size
<< ", ex_memory_info:";
for (auto it : memory_infos) {
ss << "[memory_type:" << it.first << ", memory_size:" << it.second.memory_size << "]";
}
return ss.str();
}

uint64_t mem_size = 0;
uint64_t logic_mem_base = 0;
uint8_t *mem_base = nullptr;
@@ -49,7 +65,7 @@ struct RuntimeParam {
uint64_t var_size = 0;
uint64_t logic_var_base = 0;
uint8_t *var_base = nullptr;
std::map<uint32_t, MemInfo> memory_infos;
std::map<uint64_t, MemInfo> memory_infos;
uint32_t batch_num = 0;
uint32_t stream_num = 0;
uint32_t event_num = 0;


+ 3
- 3
ge/graph/manager/graph_caching_allocator.cc View File

@@ -21,7 +21,7 @@
#include <utility>

#include "framework/common/debug/ge_log.h"
#include "graph/manager/graph_mem_allocator.h"
#include "graph/manager/graph_mem_manager.h"

namespace ge {
const size_t bin_ranges[kNumBins] = {kRoundBlockSize * kKByteSize,
@@ -117,7 +117,7 @@ Status CachingAllocator::Initialize(uint32_t device_id) {
}
free_block_bins_[i] = bin_ptr;
}
memory_allocator_ = MemManager::Instance(memory_type_);
memory_allocator_ = &MemManager::Instance().MemInstance(memory_type_);
if (memory_allocator_ == nullptr) {
return ACL_ERROR_GE_INTERNAL_ERROR;
}
@@ -168,7 +168,7 @@ Status CachingAllocator::Free(uint8_t *ptr, uint32_t device_id) {
if (it == allocated_blocks_.end()) {
REPORT_INNER_ERROR("E19999", "Param ptr not allocated before, device_id:%u, check invalid",
device_id);
GELOGE(PARAM_INVALID, "Invalid memory pointer");
GELOGE(PARAM_INVALID, "Invalid memory pointer: %p", ptr);
return ge::PARAM_INVALID;
}
Block *block = it->second;


+ 1
- 1
ge/graph/manager/graph_caching_allocator.h View File

@@ -88,8 +88,8 @@ class CachingAllocator {
///
/// @ingroup ge_graph
/// @brief free memory
/// @param [in] memory_ptr memory address ptr
/// @param [in] device_id device id
/// @param [out] memory_ptr memory address ptr
/// @return Status result of function
///
Status Free(uint8_t *memory_addr, uint32_t device_id = 0);


+ 184
- 70
ge/graph/manager/graph_manager.cc View File

@@ -65,6 +65,7 @@
#include "graph/passes/merge_pass.h"
#include "graph/passes/merge_input_memcpy_pass.h"
#include "graph/passes/merge_to_stream_merge_pass.h"
#include "graph/passes/mark_force_unknown_for_cond_pass.h"
#include "graph/passes/multi_batch_pass.h"
#include "graph/passes/next_iteration_pass.h"
#include "graph/passes/permute_pass.h"
@@ -106,6 +107,8 @@
#include "graph/common/omg_util.h"
#include "common/formats/utils/formats_trans_utils.h"
#include "register/custom_pass_helper.h"
#include "external/graph/types.h"
#include "common/util/error_manager/error_manager.h"

namespace {
const char *const kSummary = "Summary";
@@ -126,6 +129,7 @@ const uint32_t kNotAdded = 0;
const uint32_t kStartAdd = 1;
const uint32_t kDoneAdded = 2;
const uint32_t kNeverLoaded = 0;
const size_t kAlignment = 64;

bool IsTailingOptimization() {
string is_tailing_optimization_option;
@@ -368,9 +372,9 @@ void GraphManager::RemoveAddGraphCondition(GraphId graph_id) {
auto it = graph_id_to_add_graph_cond_.find(graph_id);
if (it != graph_id_to_add_graph_cond_.end()) {
graph_id_to_add_graph_cond_.erase(it);
GELOGD("Successfully removed add_graph_cond of graph [id:%u].", graph_id);
GELOGD("Successfully remove add_graph_cond of graph [id:%u].", graph_id);
} else {
GELOGD("Graph [id:%u] has not been added. no need to remove.", graph_id);
GELOGD("Graph [id:%u] has not been added, no need to be removed.", graph_id);
}
}

@@ -463,6 +467,48 @@ Status GraphManager::SetStagesOptions(uint32_t graph_id, const GraphManagerOptio
return SUCCESS;
}

Status GraphManager::ModifyDataIndex(const Graph &graph, const std::map<std::string, std::string> &graph_option) {
vector<OpDescPtr> data_desc;
set<int64_t> indexes;
auto compute_graph = GraphUtils::GetComputeGraph(graph);
GE_CHECK_NOTNULL(compute_graph);
for (auto &input_node : compute_graph->GetDirectNode()) {
GE_CHECK_NOTNULL(input_node);
auto op = input_node->GetOpDesc();
GE_CHECK_NOTNULL(op);
if (op->GetType() == DATA) {
int64_t index = 0;
(void) AttrUtils::GetInt(op, ATTR_NAME_INDEX, index);
indexes.insert(index);
data_desc.emplace_back(op);
}
}
if (!indexes.empty()) {
auto first_iter = indexes.begin();
auto end_iter = indexes.end();
--end_iter;
auto data_size = static_cast<int64_t>(data_desc.size());
// The valid index starts with 0 and increases by 1, and num is equal to data_node.
if (indexes.size() != data_desc.size() || *first_iter != 0 || *end_iter != data_size - 1) {
auto iter = graph_option.find(OPTION_EXEC_DATA_INPUTS_SHAPE_RANGE);
if (iter != graph_option.end() && !iter->second.empty()) {
// If data inputs shape range is set, user must set valid data index.
std::string failed_reason = "Data index must be set continuous from 0 when data shape range enabled!";
REPORT_INPUT_ERROR("E10003", std::vector<std::string>({"parameter", "value", "reason"}),
std::vector<std::string>({"--data_index", "-", failed_reason}));
GELOGE(GRAPH_PARAM_INVALID, "[COMP][AddGraph]Input data index is invalid when data shape range enabled.");
return GRAPH_PARAM_INVALID;
}
GELOGI("Graph[%s] input data index is invalid, set data index by topo order.", compute_graph->GetName().c_str());
int64_t index = 0;
for (auto &op : data_desc) {
(void) AttrUtils::SetInt(op, ATTR_NAME_INDEX, index++);
}
}
}
return SUCCESS;
}

Status GraphManager::AddGraph(const GraphId &graph_id, const Graph &graph,
const std::map<std::string, std::string> &options,
const OmgContext &omg_context) {
@@ -492,9 +538,13 @@ Status GraphManager::AddGraph(const GraphId &graph_id, const Graph &graph,
}
// Do add graph
SetAddGraphCondition(graph_id, kStartAdd);
if (CheckGraphAdded(graph_id, graph) != SUCCESS) {
GELOGE(FAILED, "AddGraph failed.");
return FAILED;
}
GE_CHK_STATUS_RET(ModifyDataIndex(graph, options));
auto compute_graph = GraphUtils::GetComputeGraph(graph);
GE_CHECK_NOTNULL(compute_graph);
compute_graph->SetGraphID(graph_id);
(void)AttrUtils::SetBool(*compute_graph, ATTR_NAME_GRAPH_HAS_BEEN_ADDED, true);
SetSessionGraphId(compute_graph, graph_id);

@@ -534,7 +584,7 @@ Status GraphManager::CheckGraphAdded(const GraphId &graph_id, const Graph &graph
bool graph_has_been_added = false;
if (AttrUtils::GetBool(*compute_graph, ATTR_NAME_GRAPH_HAS_BEEN_ADDED, graph_has_been_added)
&& graph_has_been_added) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s from graph:%u fail",
REPORT_INNER_ERROR("E19999", "Get Attr:%s from graph:%u fail.",
ATTR_NAME_GRAPH_HAS_BEEN_ADDED.c_str(), graph_id);
GELOGE(GE_GRAPH_GRAPH_ALREADY_EXIST,
"[GraphManager] same graph object can not be added again, graph_id = %u.", graph_id);
@@ -552,6 +602,10 @@ Status GraphManager::CheckGraphAdded(const GraphId &graph_id, const Graph &graph
Status GraphManager::AddGraphWithCopy(const GraphId &graph_id, const Graph &graph,
const std::map<std::string, std::string> &options,
const OmgContext &omg_context) {
if (HasGraphNode(graph_id)) {
GELOGE(GE_GRAPH_GRAPH_ALREADY_EXIST, "[GraphManager] graph exists, graph_id = %u", graph_id);
return GE_GRAPH_GRAPH_ALREADY_EXIST;
}
if (CheckGraphAdded(graph_id, graph) != SUCCESS) {
GELOGE(FAILED, "AddGraphWithCopy failed.");
return FAILED;
@@ -889,7 +943,7 @@ Status GraphManager::PreRunAfterOptimizeSubGraph(const GraphNodePtr &graph_node,
}

Status GraphManager::SetRtContext(rtContext_t rt_context, rtCtxMode_t mode, uint64_t session_id, uint32_t graph_id) {
GELOGD("set rt_context: session id: %lu, graph id: %u, mode %d, device id:%u.",
GELOGD("Set rt_context: session id: %lu, graph id: %u, mode %d, device id:%u.",
session_id, graph_id, static_cast<int>(mode), ge::GetContext().DeviceId());

rtError_t rt_ret = rtCtxCreate(&rt_context, mode, ge::GetContext().DeviceId());
@@ -935,7 +989,7 @@ Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vector<Ge
GE_CHK_STATUS_RET(analyzer_instance->BuildJsonObject(session_id, compute_graph->GetGraphID()),
"BuildJsonObject Failed")

GEEVENT("PreRun start: graph node size %zu, session id %lu, graph id %u, graph name %s",
GEEVENT("PreRun start: graph node size %zu, session id %lu, graph id %u, graph name %s.",
compute_graph->GetDirectNodesSize(), session_id, compute_graph->GetGraphID(),
compute_graph->GetName().c_str());
GE_DUMP(compute_graph, "PreRunBegin");
@@ -956,7 +1010,7 @@ Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vector<Ge
if (run_optimize_original_graph) {
Status ret = PreRunOptimizeOriginalGraph(graph_node, inputs, compute_graph, session_id);
if (ret != SUCCESS) {
GELOGE(ret, "Run PreRunOptimizeOriginalGraph failed for graph:%s", compute_graph->GetName().c_str());
GELOGE(ret, "Run PreRunOptimizeOriginalGraph failed for graph:%s.", compute_graph->GetName().c_str());
return ret;
}
}
@@ -1051,7 +1105,7 @@ Status GraphManager::StartForRunGraph(const GraphNodePtr &graph_node, const std:
// release rts generate context
RtContextUtil::GetInstance().DestroyRtContexts(session_id, graph_node->GetGraphId());
if (ret != SUCCESS) {
GELOGE(ret, "PreRun Failed. graph_id:%u.", graph_node->GetGraphId());
GELOGE(ret, "PreRun Failed, graph_id:%u.", graph_node->GetGraphId());
return ret;
}
}
@@ -1102,6 +1156,7 @@ Status GraphManager::LoadGraph(const GeRootModelPtr &ge_root_model, const GraphN
GE_CHK_STATUS_RET(CheckAndReleaseMemory(ge_model, graph_node));
}
}
ge_root_model->SetIsSpecificStream(graph_node->IsSpecificStream());
GE_TIMESTAMP_START(LoadGraph);
Status ret = GraphLoader::LoadModelOnline(model_id_info.model_id, ge_root_model, model_listener);
GE_TIMESTAMP_EVENT_END(LoadGraph, "GraphManager::LoadGraph");
@@ -1225,6 +1280,78 @@ Status GraphManager::InnerRunGraph(GraphNodePtr &graph_node, const GraphId &grap
return SUCCESS;
}

Status GraphManager::InnerRunGraphWithStream(GraphNodePtr &graph_node, const GraphId &graph_id, rtStream_t stream,
const std::vector<GeTensor> &inputs, std::vector<GeTensor> &outputs) {
auto ret = graph_executor_.SetCondition(&sync_run_mutex_, &condition_, graph_run_listener_);
if (ret != SUCCESS) {
GELOGE(GE_GRAPH_RUNGRAPH_FAILED, "[Run][GraphWithStreamAsync] set condition failed, "
"graph id = %u, stream = %p.", graph_id, stream);
graph_node->SetRunFlag(false);
return GE_GRAPH_RUNGRAPH_FAILED;
}

ret = graph_executor_.ExecuteGraphWithStream(graph_id, stream, graph_node->GetGeRootModel(), inputs, outputs);
graph_node->SetRunFlag(false);
graph_node->SetIsSpecificStream(false);
if (ret != SUCCESS) {
GELOGE(ret, "[Run][GraphWithStreamAsync] execute graph failed, graph id = %u, stream = %p.", graph_id, stream);
return ret;
}
GELOGI("[Run][GraphWithStreamAsync] run graph success, graph id = %u, stream = %p.", graph_id, stream);
return SUCCESS;
}

Status GraphManager::RunGraphWithStreamAsync(const GraphId &graph_id, rtStream_t stream, uint64_t session_id,
const std::vector<GeTensor> &inputs, std::vector<GeTensor> &outputs) {
ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther);
std::lock_guard<std::mutex> lock(run_mutex_);
GELOGI("Start to run graph with stream async, graph id = %u, stream = %p.", graph_id, stream);

if (inputs.empty()) {
GELOGI("Run graph with stream async, initialize sub graph has no inputs.");
}

// find graph
GraphNodePtr graph_node = nullptr;
Status ret = GetGraphNode(graph_id, graph_node);
if (ret != SUCCESS) {
REPORT_INNER_ERROR("E19999", "graph id = %u not exist in graph_map, check invalid.", graph_id);
GELOGE(ret, "Run graph with stream async graph not exist, graph id = %u.", graph_id);
return ret;
}
if (graph_node == nullptr) {
REPORT_INNER_ERROR("E19999", "Graph node is nullptr in graph_map, graph id = %u, check invalid.", graph_id);
GELOGE(GE_GRAPH_GRAPH_NODE_NULL, "Run graph with stream async graph node is NULL, graph id = %u.", graph_id);
return GE_GRAPH_GRAPH_NODE_NULL;
}
if (graph_node->GetRunFlag()) {
REPORT_INNER_ERROR("E19999", "Graph is already running, can't be run again, graph id = %u, "
"check invalid.", graph_id);
GELOGE(GE_GRAPH_ALREADY_RUNNING, "Run graph with stream async graph already running, graph id = %u.", graph_id);
return GE_GRAPH_ALREADY_RUNNING;
}

UpdateLocalOmgContext(graph_id);
// set graph's run flag
graph_node->SetRunFlag(true);
graph_node->SetIsSpecificStream(true);
ComputeGraphPtr compute_graph_tmp = GraphUtils::GetComputeGraph(*(graph_node->GetGraph()));

// when set incre build, add cache helper map
AddModelCacheHelperToMap(graph_id, session_id, compute_graph_tmp);
if (options_.local_fmk_op_flag) {
GetCompilerStages(graph_id).optimizer.TranFrameOp(compute_graph_tmp);
}
GeRootModelPtr ge_root_model = nullptr;
ret = StartForRunGraph(graph_node, inputs, ge_root_model, session_id);
if (ret != SUCCESS) {
GELOGE(ret, "[Run][GraphWithStreamAsync] StartForRunGraph failed!");
graph_node->SetRunFlag(false);
return ret;
}
return InnerRunGraphWithStream(graph_node, graph_id, stream, inputs, outputs);
}

Status GraphManager::RunGraph(const GraphId &graph_id, const std::vector<GeTensor> &inputs,
std::vector<GeTensor> &outputs, uint64_t session_id) {
ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther);
@@ -1664,7 +1791,8 @@ Status GraphManager::ParseOptions(const std::map<std::string, std::string> &opti
return GE_GRAPH_OPTIONS_INVALID);

// ge.graphType
ret = ParseTrainGraphFlag(options_.run_graph_flag, options_.train_graph_flag);
ret =
ParseTrainGraphFlag(options_.run_graph_flag, options_.train_graph_flag);
GE_IF_BOOL_EXEC(ret != SUCCESS,
GELOGE(GE_GRAPH_OPTIONS_INVALID, "Key:ge.runFlag value is invalid");
return GE_GRAPH_OPTIONS_INVALID);
@@ -1706,20 +1834,18 @@ Status GraphManager::ParseOptions(const std::map<std::string, std::string> &opti
return SUCCESS;
}

Status GraphManager::ParseTrainGraphFlag(bool &options, bool &option) {
Status GraphManager::ParseTrainGraphFlag(const bool &run_flag, bool &train_flag) {
std::shared_ptr<GELib> ge_instance_ptr = ge::GELib::GetInstance();
if (ge_instance_ptr == nullptr) {
GELOGW("[Initialize] set train_graph_flag to 0 when GE is not initialized or finalized");
option = false;
train_flag = false;
} else if (!ge_instance_ptr->isTrainMode()) {
option = false;
train_flag = false;
} else { // ge_instance_ptr->isTrainMode() is true
if (!options) {
GELOGE(GE_GRAPH_OPTIONS_INVALID,
"Key:ge.runFlag, its value %d is invalid, it must be 1 when GElib::is_train_mode_ flag is 1", options);
return GE_GRAPH_OPTIONS_INVALID;
train_flag = true;
if (!run_flag) {
GELOGW("Key:ge.runFlag, its value %d is invalid, it must be 1 when GElib::is_train_mode_ flag is 1", run_flag);
}
option = true;
}
return SUCCESS;
}
@@ -2455,7 +2581,9 @@ Status GraphManager::OptimizeStage1(ge::ComputeGraphPtr &compute_graph) {
// the prune pass should between SwitchPass and SwitchToStreamSwitchPass
GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::Migration", new (std::nothrow) SubgraphConstMigrationPass));
GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::ArgsClean", new (std::nothrow) UnusedArgsCleanPass));
GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::PrunePass", new (std::nothrow) PrunePass))
GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::PrunePass", new (std::nothrow) PrunePass));
auto mark_force_unknown_pass = new (std::nothrow) MarkForceUnknownForCondPass;
GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::MarkForceUnknownForCondPass", mark_force_unknown_pass));
GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::NextIterationPass", new (std::nothrow) NextIterationPass))
GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::ControlTriggerPass", new (std::nothrow) ControlTriggerPass))
GE_CHK_STATUS_RET(
@@ -2863,7 +2991,7 @@ Status GraphManager::ProcessSubGraphWithMultiThreads(GraphManager *graph_manager
}

// run graph async on session
Status GraphManager::RunGraphAsync(const GraphId &graph_id, const std::vector<ge::InputTensorInfo> &inputs,
Status GraphManager::RunGraphAsync(const GraphId &graph_id, const std::vector<ge::Tensor> &inputs,
uint64_t session_id, RunAsyncCallback callback) {
ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute);
GELOGI("[GraphManager] Start to run graph async, graph_id=%u, inputsSize=%zu.", graph_id, inputs.size());
@@ -2935,14 +3063,6 @@ Status GraphManager::IncreBuild(const GraphNodePtr &graph_node, GeModelPtr &ge_m
return FAILED;
}

void GraphManager::ConstructGeInput(const vector<InputTensorInfo> &inputs, vector<GeTensor> &ge_inputs) {
for (auto const &input : inputs) {
GeTensorDesc input_tensor_desc(GeShape(input.dims));
input_tensor_desc.SetDataType(static_cast<ge::DataType>(input.data_type));
ge_inputs.emplace_back(input_tensor_desc);
}
}

Status GraphManager::CheckIncreBuildAndPreRun(GraphManager *graph_manager, const PreRunArgs &args,
GraphNodePtr &graph_node, GeRootModelPtr &ge_root_model) {
if (!graph_manager->IsGraphNeedBuild(graph_node)) {
@@ -2961,7 +3081,9 @@ Status GraphManager::CheckIncreBuildAndPreRun(GraphManager *graph_manager, const
GeModelPtr ge_model = nullptr;
if (graph_manager->IncreBuild(graph_node, ge_model) != SUCCESS) {
std::vector<GeTensor> ge_inputs;
ConstructGeInput(args.input_tensor, ge_inputs);
for (const auto &item: args.input_tensor) {
ge_inputs.emplace_back(TensorAdapter::AsGeTensor(item));
}
Status ret = graph_manager->PreRun(graph_node, ge_inputs, ge_root_model, args.session_id);
// release rts generate context
RtContextUtil::GetInstance().DestroyRtContexts(args.session_id, graph_node->GetGraphId());
@@ -3073,20 +3195,19 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) {
}
}

void GraphManager::ParseInputsDimsForData(const std::vector<InputTensorInfo> &input_tensor) {
void GraphManager::ParseInputsDimsForData(const std::vector<ge::Tensor> &input_tensor) {
GELOGD("Start parse input dims from data.");
for (size_t i = 0; i < input_tensor.size(); ++i) {
std::vector<int64_t> dynamic_dim;
for (size_t j = 0; j < input_tensor[i].dims.size(); ++j) {
dynamic_dim.emplace_back(input_tensor[i].dims[j]);
}
GELOGD("Input tensor dims is %s.", formats::JoinToString(dynamic_dim).c_str());
GetLocalOmgContext().user_real_input_dims.emplace_back(input_tensor[i].dims);
const TensorDesc &tensor_desc = input_tensor[i].GetTensorDesc();
const Shape &shape = tensor_desc.GetShape();
const auto &shape_dims = shape.GetDims();
GELOGD("Input tensor dims is %s.", formats::JoinToString(shape_dims).c_str());
GetLocalOmgContext().user_real_input_dims.emplace_back(shape_dims);
}
}

Status GraphManager::ParseInputsDimsForGetNexNosinkAndData(const vector<NodePtr> &dynamic_nodes,
const std::vector<InputTensorInfo> &input_tensor) {
const std::vector<ge::Tensor> &input_tensor) {
GELOGD("Start parse inputs dims when coexist data and getnext sink.");
for (size_t i = 0; i < dynamic_nodes.size(); ++i) {
auto op_desc = dynamic_nodes.at(i)->GetOpDesc();
@@ -3109,13 +3230,16 @@ Status GraphManager::ParseInputsDimsForGetNexNosinkAndData(const vector<NodePtr>
return PARAM_INVALID;
}

GetLocalOmgContext().user_real_input_dims.emplace_back(input_tensor.at(index).dims);
GELOGI("Shape dims of %zu data is %s.", index, formats::JoinToString(input_tensor.at(index).dims).c_str());
const TensorDesc &tensor_desc = input_tensor[i].GetTensorDesc();
const Shape &shape = tensor_desc.GetShape();
const auto &shape_dims = shape.GetDims();
GELOGI("Shape dims of %zu data is %s.", index, formats::JoinToString(shape_dims).c_str());
GetLocalOmgContext().user_real_input_dims.emplace_back(std::move(shape_dims));
}
return SUCCESS;
}

Status GraphManager::ParseInputsDims(const std::vector<InputTensorInfo> &input_tensor) {
Status GraphManager::ParseInputsDims(const std::vector<ge::Tensor> &input_tensor) {
GELOGI("Start parse input dims of %zu input tensor.", input_tensor.size());
GetLocalOmgContext().user_real_input_dims.clear();
if (!GetLocalOmgContext().dynamic_node_type.empty()) {
@@ -3246,13 +3370,13 @@ void GraphManager::ReturnError(GraphManager *graph_manager, RunAsyncCallback cal
}
StopQueue(graph_manager);
GELOGE(ret, "%s.", log.c_str());
std::vector<ge::OutputTensorInfo> outputs;
std::vector<ge::Tensor> outputs;
callback(ret, outputs);
}

void GraphManager::ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_node,
RunAsyncCallback callback, Status ret, const string &log) {
std::vector<ge::OutputTensorInfo> outputs;
void GraphManager::ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_node, RunAsyncCallback callback,
Status ret, const string &log) {
std::vector<ge::Tensor> outputs;
auto compute_graph = GraphUtils::GetComputeGraph(*graph_node->GetGraph());
if (graph_manager == nullptr || compute_graph == nullptr) {
REPORT_INNER_ERROR("E19999", "Param graph_manager or compute_graph in graph_node is nullptr, "
@@ -3268,9 +3392,10 @@ void GraphManager::ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_
}
for (size_t i = 0; i < node->GetAllInDataAnchorsSize(); i++) {
auto input_desc = node->GetOpDesc()->MutableInputDesc(i);
ge::OutputTensorInfo tensor;
tensor.dims = input_desc->GetShape().GetDims();
tensor.data_type = static_cast<uint32_t>(input_desc->GetDataType());
GeShape ge_shape(input_desc->GetShape().GetDims());
GeTensorDesc ge_tensor_desc;
ge_tensor_desc.SetShape(ge_shape);
GeTensor ge_tensor(ge_tensor_desc);
int64_t len = 1;
if (input_desc->GetShape().GetDims() != std::vector<int64_t>({})) {
len = input_desc->GetShape().GetShapeSize();
@@ -3286,30 +3411,19 @@ void GraphManager::ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_
GELOGI("getted shape size is 0.Do process as empty tensor!");
len = 1;
}
auto size = GetSizeByDataType(input_desc->GetDataType());
if (size <= 0) {
REPORT_INNER_ERROR("E19999", "data_type:%s of op:%s(%s) is not support, input_index:%zu check invalid",
ge::TypeUtils::DataTypeToSerialString(input_desc->GetDataType()).c_str(),
node->GetName().c_str(), node->GetType().c_str(), i);
GELOGE(PARAM_INVALID, "Failed to get cube size, the data type %s is invalid",
ge::TypeUtils::DataTypeToSerialString(input_desc->GetDataType()).c_str());
callback(GRAPH_FAILED, outputs);
auto length = GetSizeInBytes(len, input_desc->GetDataType());
auto aligned_ptr = MakeShared<AlignedPtr>(length, kAlignment);
if (aligned_ptr == nullptr) {
REPORT_INNER_ERROR("E19999", "Aligned_ptr is nullptr");
GELOGE(GRAPH_FAILED, "[Analyze Mode] Aligned_ptr is nullptr");
return;
}
if (CheckInt64MulOverflow(len, static_cast<int64_t>(size)) != true) {
REPORT_INNER_ERROR("E19999", "shape_size:%ld of op:%s(%s) will overflow after multiply by "
"size:%u of data_type:%s, input_index:%zu, check invalid", len,
node->GetName().c_str(), node->GetType().c_str(), size,
ge::TypeUtils::DataTypeToSerialString(input_desc->GetDataType()).c_str(), i);
GELOGE(MEMALLOC_FAILED, "int64 multiply happens overflow! a:%ld b:%d", len, size);
callback(GRAPH_FAILED, outputs);
return;
}
tensor.length = len * size;
tensor.data.reset(new(std::nothrow) uint8_t[tensor.length]);
ge_tensor.SetData(aligned_ptr, length);
ge::Tensor tensor = TensorAdapter::AsTensor(ge_tensor);
// To avoid global step too small and can not stop, totally set a bigger value
for (int64_t i = 0; i < tensor.length; i++) {
tensor.data[i] = 0x7F; // here stands for a positive max value
auto ptr = aligned_ptr->MutableGet();
for (int64_t i = 0; i < length; i++) {
ptr[i] = 0x7F; // here stands for a positive max value
}
outputs.emplace_back(std::move(tensor));
}
@@ -3657,7 +3771,7 @@ void GraphManager::UpdateLocalOmgContext(GraphId graph_id) {
if (iter != omg_contexts_.end()) {
SetLocalOmgContext(iter->second);
} else {
GELOGW("OmgContext of graph %u not found.", graph_id);
GELOGW("OmgContext of graph %u is not found.", graph_id);
}
}

@@ -3687,9 +3801,9 @@ void GraphManager::RemoveGraphCount(GraphId graph_id) {
std::lock_guard<std::mutex> lock(graph_count_mutex_);
auto it = graph_count_.find(graph_id);
if (it == graph_count_.end()) {
GELOGW("Graph of id: %u has not been added, count cannot be decreased.", graph_id);
GELOGW("Graph of id: %u has not been added, count cannot be decreased", graph_id);
} else {
GELOGD("RemoveGraphCount success, graph count of id[%u] is %u.", graph_id, graph_count_[graph_id]);
GELOGD("RemoveGraphCount success, graph count of id[%u] is %u", graph_id, graph_count_[graph_id]);
graph_count_.erase(it);
}
}


+ 25
- 9
ge/graph/manager/graph_manager.h View File

@@ -105,6 +105,19 @@ class GraphManager {

///
/// @ingroup ge_graph
/// @brief run specific graph with specific session id and stream
/// @param [in] graph_id graph id
/// @param [in] stream specific stream
/// @param [in] session_id session id
/// @param [in] inputs input data
/// @param [out] outputs output data
/// @return Status result of function
///
Status RunGraphWithStreamAsync(const GraphId &graph_id, rtStream_t stream, uint64_t session_id,
const std::vector<GeTensor> &inputs, std::vector<GeTensor> &outputs);

///
/// @ingroup ge_graph
/// @brief build specific graph
/// @param [in] graph_id graph id
/// @param [in] inputs input data
@@ -149,9 +162,8 @@ class GraphManager {
/// @param [out] callback: callback while run graph async finish
/// @return Status result of function
///
Status RunGraphAsync(const GraphId &graph_id, const std::vector<ge::InputTensorInfo> &inputs,
Status RunGraphAsync(const GraphId &graph_id, const std::vector<ge::Tensor> &inputs,
uint64_t session_id, RunAsyncCallback callback);

///
/// @ingroup ge_graph
/// @brief me register the callback function to get the result of summary or checkpoin
@@ -208,7 +220,7 @@ class GraphManager {

struct PreRunArgs {
GraphId graph_id;
std::vector<ge::InputTensorInfo> input_tensor;
std::vector<ge::Tensor> input_tensor;
uint64_t session_id;
struct error_message::Context error_context;
GEThreadLocalContext context;
@@ -220,7 +232,7 @@ class GraphManager {
GraphId graph_id;
uint64_t session_id;
struct error_message::Context error_context;
std::vector<ge::InputTensorInfo> input_tensor;
std::vector<ge::Tensor> input_tensor;
GeRootModelPtr ge_root_model;
GEThreadLocalContext context;
RunAsyncCallback callback;
@@ -239,10 +251,10 @@ class GraphManager {
uint64_t session_id,
const struct error_message::Context &error_context,
const GEThreadLocalContext &ge_context);
Status ParseInputsDims(const std::vector<InputTensorInfo> &input_tensor);
void ParseInputsDimsForData(const std::vector<InputTensorInfo> &input_tensor);
Status ParseInputsDims(const std::vector<ge::Tensor> &input_tensor);
void ParseInputsDimsForData(const std::vector<ge::Tensor> &input_tensor);
Status ParseInputsDimsForGetNexNosinkAndData(const vector<NodePtr> &dynamic_nodes,
const std::vector<InputTensorInfo> &input_tensor);
const std::vector<ge::Tensor> &input_tensor);
Status RunCustomPass(const GraphNodePtr &graph_node);
Status PreRun(const GraphNodePtr &graph_node, const std::vector<GeTensor> &inputs, GeRootModelPtr &ge_root_model,
uint64_t session_id = INVALID_SESSION_ID);
@@ -258,6 +270,9 @@ class GraphManager {
Status InnerRunGraph(GraphNodePtr &graph_node, const GraphId &graph_id, const std::vector<GeTensor> &inputs,
std::vector<GeTensor> &outputs);

Status InnerRunGraphWithStream(GraphNodePtr &graph_node, const GraphId &graph_id, rtStream_t stream,
const std::vector<GeTensor> &inputs, std::vector<GeTensor> &outputs);

Status ParseOptions(const std::map<std::string, std::string> &options);

static void ParseOption(const std::map<std::string, std::string> &options, const std::string &key,
@@ -277,7 +292,7 @@ class GraphManager {

static Status ParseParallelNum(const std::string &parallel_num, const std::string &key, int &num);

static Status ParseTrainGraphFlag(bool &options, bool &option);
static Status ParseTrainGraphFlag(const bool &run_flag, bool &train_flag);

static bool IsPerfLevelInvalid(int32_t perf_level);

@@ -353,7 +368,6 @@ class GraphManager {
void RemoveModelCacheHelper(const GraphId &graph_id);
ModelCacheHelperPtr FindModelCacheHelper(GraphId graph_id);

static void ConstructGeInput(const std::vector<InputTensorInfo> &inputs, std::vector<GeTensor> &ge_inputs);
static void PreRunThread(GraphManager *graph_manager);
static void RunThread(GraphManager *graph_manager);
static void StopQueue(GraphManager *graph_manager);
@@ -413,6 +427,8 @@ class GraphManager {

void SetSessionGraphId(ComputeGraphPtr compute_graph, uint32_t graph_id);

Status ModifyDataIndex(const Graph &graph, const std::map<std::string, std::string> &graph_option);

static Status CheckGraphAdded(const GraphId &graph_id, const Graph &graph);

std::atomic_bool thread_run_flag_;


+ 3
- 2
ge/graph/manager/graph_manager_utils.cc View File

@@ -41,6 +41,7 @@ GraphNode::GraphNode(GraphId graph_id)
build_flag_(false),
load_flag_(false),
async_(false),
is_specific_stream_(false),
ge_model_(nullptr),
sem_(1) {
graph_run_async_listener_ = MakeShared<RunAsyncListener>();
@@ -113,7 +114,7 @@ GraphModelListener::GraphModelListener(std::mutex &mutex, std::condition_variabl
: result_code_(0), is_finished_(false), mutex_(mutex), condition_(cond) {}

Status GraphModelListener::OnComputeDone(uint32_t model_id, uint32_t task_id, uint32_t result,
std::vector<ge::OutputTensorInfo> &outputs) {
std::vector<ge::Tensor> &outputs) {
GELOGI(
"[GraphManager] graph compute call back, model_id:%u, task_id:%u, "
"resultCode:%u.",
@@ -150,7 +151,7 @@ void RunAsyncListener::SetCallback(const RunAsyncCallback &callback) {
}

Status RunAsyncListener::OnComputeDone(uint32_t model_id, uint32_t task_id, uint32_t result,
std::vector<ge::OutputTensorInfo> &outputs) {
std::vector<ge::Tensor> &outputs) {
GELOGI("[GraphManager] run graph async call back, modelId:%u, taskId:%u, resultCode:%u.",
model_id, task_id, result);
GE_CHECK_NOTNULL(callback_);


+ 5
- 2
ge/graph/manager/graph_manager_utils.h View File

@@ -130,7 +130,7 @@ class RunAsyncListener : public ge::ModelListener {

// callback
Status OnComputeDone(uint32_t model_id, uint32_t task_id, uint32_t result,
std::vector<ge::OutputTensorInfo> &outputs) override;
std::vector<ge::Tensor> &outputs) override;

private:
RunAsyncCallback callback_;
@@ -167,6 +167,8 @@ class GraphNode {
void UpdateLoadFlag() { load_flag_ = load_count_ == 0 || load_record_ >= kMaxLoadNum; }
void SetLoadFlag(bool load_flag) { load_flag_ = load_flag; }
void SetGeModel(const GeModelPtr &ge_model) { ge_model_ = ge_model; }
void SetIsSpecificStream(bool specific_stream) { is_specific_stream_ = specific_stream; }
bool IsSpecificStream() const { return is_specific_stream_; }
GeModelPtr GetGeModel() const { return ge_model_; }
void SetGeRootModel(const GeRootModelPtr &ge_root_model) { ge_root_model_ = ge_root_model; }
GeRootModelPtr GetGeRootModel() const { return ge_root_model_; }
@@ -200,6 +202,7 @@ class GraphNode {
// load_flag_ is true if more than 1 model were loaded
bool load_flag_;
bool async_;
bool is_specific_stream_;
GeModelPtr ge_model_;
GeRootModelPtr ge_root_model_;
BlockingQueue<uint8_t> sem_;
@@ -221,7 +224,7 @@ class GraphModelListener : public ge::ModelListener {

// callback
Status OnComputeDone(uint32_t model_id, uint32_t task_id, uint32_t result,
std::vector<ge::OutputTensorInfo> &outputs) override;
std::vector<ge::Tensor> &outputs) override;

Status ResetResult();



+ 3
- 113
ge/graph/manager/graph_mem_allocator.cc View File

@@ -17,11 +17,9 @@
#include "graph/manager/graph_mem_allocator.h"

#include <string>
#include "graph/manager/graph_caching_allocator.h"
#include "graph/manager/rdma_pool_allocator.h"
#include "graph/manager/host_mem_allocator.h"

namespace ge {
void MemoryAllocator::Initialize(uint32_t device_id) {
Status MemoryAllocator::Initialize(uint32_t device_id) {
GELOGI("MemoryAllocator::Initialize");

// when redo Initialize free memory
@@ -31,6 +29,7 @@ void MemoryAllocator::Initialize(uint32_t device_id) {
}
}
memory_base_map_.clear();
return SUCCESS;
}

void MemoryAllocator::Finalize(uint32_t device_id) {
@@ -152,113 +151,4 @@ uint8_t *MemoryAllocator::GetMemoryAddr(const string &memory_key, uint32_t devic

return it->second.memory_addr_;
}

MemManager::MemManager() {}

MemManager::~MemManager() { Finalize(); }

MemManager &MemManager::Instance() {
static MemManager mem_manager;
return mem_manager;
}

MemoryAllocator *MemManager::Instance(rtMemType_t memory_type) { return Instance().GetMemoryAllocator(memory_type); }

Status MemManager::Initialize(const std::vector<rtMemType_t> &memory_type) {
std::lock_guard<std::recursive_mutex> lock(allocator_mutex_);
MemoryAllocator *memory_allocator = nullptr;
for (unsigned int index : memory_type) {
auto it = memory_allocator_map_.find(index);
if (it == memory_allocator_map_.end()) {
memory_allocator = new (std::nothrow) MemoryAllocator(index);

if (memory_allocator != nullptr) {
memory_allocator_map_[index] = memory_allocator;
GELOGI("Create MemoryAllocator memory type[%u] success.", index);
} else {
REPORT_CALL_ERROR("E19999", "New MemoryAllocator fail, index:%u", index);
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Alloc MemoryAllocator failed.");
}
} else {
memory_allocator = it->second;
}

if (memory_allocator == nullptr) {
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Create MemoryAllocator failed.");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
} else {
memory_allocator->Initialize(0);
}
}

auto ret = InitAllocator(memory_type, caching_allocator_map_);
if (ret != SUCCESS) {
GELOGE(ret, "Create CachingAllocator failed.");
return ret;
}

ret = InitAllocator(memory_type, rdma_allocator_map_);
if (ret != SUCCESS) {
GELOGE(ret, "Create RdmaAllocator failed.");
return ret;
}

ret = InitAllocator(memory_type, host_allocator_map_);
if (ret != SUCCESS) {
GELOGE(ret, "Create HostMemAllocator failed.");
return ret;
}
return SUCCESS;
}

template <typename T>
void FinalizeAllocatorMap(std::map<rtMemType_t, T *> &allocate_map) {
for (auto &allocator : allocate_map) {
if (allocator.second != nullptr) {
allocator.second->Finalize();
delete allocator.second;
allocator.second = nullptr;
}
}
allocate_map.clear();
}

void MemManager::Finalize() noexcept {
GELOGI("Finalize.");
std::lock_guard<std::recursive_mutex> lock(allocator_mutex_);
// caching and rdma allocator use memory allocator, so finalize them first
FinalizeAllocatorMap(caching_allocator_map_);
FinalizeAllocatorMap(rdma_allocator_map_);
FinalizeAllocatorMap(host_allocator_map_);
FinalizeAllocatorMap(memory_allocator_map_);
}

MemoryAllocator *MemManager::GetMemoryAllocator(rtMemType_t memory_type) {
std::lock_guard<std::recursive_mutex> lock(allocator_mutex_);
MemoryAllocator *memory_allocator = nullptr;
auto it = memory_allocator_map_.find(memory_type);
if (it != memory_allocator_map_.end()) {
memory_allocator = it->second;
}

// Usually impossible
if (memory_allocator == nullptr) {
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "GetMemoryAllocator failed, memory type is %u.", memory_type);
static MemoryAllocator default_memory_allocator(RT_MEMORY_RESERVED);
return &default_memory_allocator;
}

return memory_allocator;
}

CachingAllocator &MemManager::CachingInstance(rtMemType_t memory_type) {
return Instance().GetAllocator(memory_type, caching_allocator_map_);
}

RdmaPoolAllocator &MemManager::RdmaPoolInstance(rtMemType_t memory_type) {
return Instance().GetAllocator(memory_type, rdma_allocator_map_);
}
HostMemAllocator &MemManager::HostMemInstance(rtMemType_t memory_type) {
return Instance().GetAllocator(memory_type, host_allocator_map_);
}
} // namespace ge

+ 2
- 106
ge/graph/manager/graph_mem_allocator.h View File

@@ -26,7 +26,6 @@

#include "framework/common/debug/ge_log.h"
#include "framework/common/ge_inner_error_codes.h"
#include "graph/manager/host_mem_allocator.h"
#include "graph/node.h"
#include "runtime/mem.h"

@@ -71,9 +70,9 @@ class MemoryAllocator {
/// @ingroup ge_graph
/// @brief memory allocator init
/// @param [in] options user config params
/// @return void
/// @return Status of init
///
void Initialize(uint32_t device_id = 0);
Status Initialize(uint32_t device_id = 0);

///
/// @ingroup ge_graph
@@ -136,109 +135,6 @@ class MemoryAllocator {
bool mem_malloced_;
map<string, MemoryInfo> memory_base_map_;
};

using MemoryAllocatorPtr = std::shared_ptr<MemoryAllocator>;
class CachingAllocator;
class RdmaPoolAllocator;
class MemManager {
public:
MemManager();
virtual ~MemManager();
static MemManager &Instance();
static MemoryAllocator *Instance(rtMemType_t memory_type);
CachingAllocator &CachingInstance(rtMemType_t memory_type);
RdmaPoolAllocator &RdmaPoolInstance(rtMemType_t memory_type);
HostMemAllocator &HostMemInstance(rtMemType_t memory_type);
MemManager(const MemManager &) = delete;
MemManager &operator=(const MemManager &) = delete;
///
/// @ingroup ge_graph
/// @brief memory allocator manager init
/// @param [in] options user config params
/// @return Status result of function
///
Status Initialize(const std::vector<rtMemType_t> &memory_type);

///
/// @ingroup ge_graph
/// @brief memory allocator finalize
/// @return void
///
void Finalize() noexcept;

private:
///
/// @ingroup ge_graph
/// @brief ge memory allocator
/// @param [in] memory_type memory type
/// @return MemoryAllocator ptr
///
MemoryAllocator *GetMemoryAllocator(rtMemType_t memory_type);

///
/// @ingroup ge_graph
/// @param [in] memory_type memory type
/// @param [in] allocate_map memory allocator map
/// @return Status result of function
///
template <typename T>
Status InitAllocator(const std::vector<rtMemType_t> &memory_type, std::map<rtMemType_t, T *> &allocate_map) {
T *allocator = nullptr;
for (unsigned int index : memory_type) {
auto it = allocate_map.find(index);
if (it == allocate_map.end()) {
allocator = new (std::nothrow) T(index);
if (allocator != nullptr) {
allocate_map[index] = allocator;
GELOGI("Create Allocator memory type[%u] success.", index);
} else {
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Alloc Allocator failed.");
}
} else {
allocator = it->second;
}

if (allocator == nullptr) {
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Create Allocator failed.");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
} else {
if (allocator->Initialize() != SUCCESS) {
return ACL_ERROR_GE_INTERNAL_ERROR;
}
}
}
return SUCCESS;
}
///
/// @ingroup ge_graph
/// @param [in] memory_type memory type
/// @param [in] allocate_map memory allocator map
/// @return Allocator ptr
///
template <typename T>
T &GetAllocator(rtMemType_t memory_type, std::map<rtMemType_t, T *> allocate_map) {
std::lock_guard<std::recursive_mutex> lock(allocator_mutex_);
T *allocator = nullptr;
auto it = allocate_map.find(memory_type);
if (it != allocate_map.end()) {
allocator = it->second;
}

// Usually impossible
if (allocator == nullptr) {
GELOGW("Get allocator failed, memory type is %u.", memory_type);
static T default_allocator(RT_MEMORY_RESERVED);
return default_allocator;
}
return *allocator;
}

std::map<rtMemType_t, MemoryAllocator *> memory_allocator_map_;
std::map<rtMemType_t, CachingAllocator *> caching_allocator_map_;
std::map<rtMemType_t, RdmaPoolAllocator *> rdma_allocator_map_;
std::map<rtMemType_t, HostMemAllocator *> host_allocator_map_;
std::recursive_mutex allocator_mutex_;
};
} // namespace ge

#endif // GE_GRAPH_MANAGER_GRAPH_MEM_ALLOCATOR_H_

+ 116
- 0
ge/graph/manager/graph_mem_manager.cc View File

@@ -0,0 +1,116 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "graph/manager/graph_mem_manager.h"

#include <string>

namespace ge {
MemManager::MemManager() {}

MemManager::~MemManager() { Finalize(); }

MemManager &MemManager::Instance() {
static MemManager mem_manager;
return mem_manager;
}

Status MemManager::Initialize(const std::vector<rtMemType_t> &memory_type) {
std::lock_guard<std::recursive_mutex> lock(allocator_mutex_);
if (init_) {
GELOGW("MemManager has been inited.");
return SUCCESS;
}

auto ret = InitAllocator(memory_type, memory_allocator_map_);
if (ret != SUCCESS) {
GELOGE(ret, "Create MemoryAllocator failed.");
return ret;
}

ret = InitAllocator(memory_type, caching_allocator_map_);
if (ret != SUCCESS) {
GELOGE(ret, "Create CachingAllocator failed.");
return ret;
}

ret = InitAllocator(memory_type, rdma_allocator_map_);
if (ret != SUCCESS) {
GELOGE(ret, "Create RdmaAllocator failed.");
return ret;
}

ret = InitAllocator(memory_type, host_allocator_map_);
if (ret != SUCCESS) {
GELOGE(ret, "Create HostMemAllocator failed.");
return ret;
}

ret = InitAllocator(memory_type, session_scope_allocator_map_);
if (ret != SUCCESS) {
GELOGE(ret, "Create HostMemAllocator failed.");
return ret;
}
init_ = true;
memory_type_ = memory_type;
return SUCCESS;
}

template <typename T>
void FinalizeAllocatorMap(std::map<rtMemType_t, T *> &allocate_map) {
for (auto &allocator : allocate_map) {
if (allocator.second != nullptr) {
allocator.second->Finalize();
delete allocator.second;
allocator.second = nullptr;
}
}
allocate_map.clear();
}

void MemManager::Finalize() noexcept {
GELOGI("Finalize.");
std::lock_guard<std::recursive_mutex> lock(allocator_mutex_);
// caching and rdma allocator use memory allocator, so finalize them first
FinalizeAllocatorMap(session_scope_allocator_map_);
FinalizeAllocatorMap(caching_allocator_map_);
FinalizeAllocatorMap(rdma_allocator_map_);
FinalizeAllocatorMap(host_allocator_map_);
FinalizeAllocatorMap(memory_allocator_map_);
init_ = false;
memory_type_.clear();
}

MemoryAllocator &MemManager::MemInstance(rtMemType_t memory_type) {
return GetAllocator(memory_type, memory_allocator_map_);
}

CachingAllocator &MemManager::CachingInstance(rtMemType_t memory_type) {
return GetAllocator(memory_type, caching_allocator_map_);
}

RdmaPoolAllocator &MemManager::RdmaPoolInstance(rtMemType_t memory_type) {
return GetAllocator(memory_type, rdma_allocator_map_);
}

HostMemAllocator &MemManager::HostMemInstance(rtMemType_t memory_type) {
return GetAllocator(memory_type, host_allocator_map_);
}

SessionScopeMemAllocator &MemManager::SessionScopeMemInstance(rtMemType_t memory_type) {
return GetAllocator(memory_type, session_scope_allocator_map_);
}
} // namespace ge

+ 141
- 0
ge/graph/manager/graph_mem_manager.h View File

@@ -0,0 +1,141 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef GE_GRAPH_MANAGER_GRAPH_MEM_MANAGER_H_
#define GE_GRAPH_MANAGER_GRAPH_MEM_MANAGER_H_

#include <iostream>
#include <map>
#include <memory>
#include <mutex>
#include <string>
#include <vector>

#include "framework/common/debug/ge_log.h"
#include "framework/common/ge_inner_error_codes.h"
#include "graph/manager/graph_mem_allocator.h"
#include "graph/manager/graph_caching_allocator.h"
#include "graph/manager/host_mem_allocator.h"
#include "graph/manager/rdma_pool_allocator.h"
#include "graph/manager/host_mem_allocator.h"
#include "graph/manager/session_scope_mem_allocator.h"
#include "graph/node.h"
#include "runtime/mem.h"

namespace ge {
using MemoryAllocatorPtr = std::shared_ptr<MemoryAllocator>;

class MemManager {
public:
MemManager();
virtual ~MemManager();
static MemManager &Instance();
MemoryAllocator &MemInstance(rtMemType_t memory_type);
CachingAllocator &CachingInstance(rtMemType_t memory_type);
RdmaPoolAllocator &RdmaPoolInstance(rtMemType_t memory_type);
HostMemAllocator &HostMemInstance(rtMemType_t memory_type);
SessionScopeMemAllocator &SessionScopeMemInstance(rtMemType_t memory_type);
MemManager(const MemManager &) = delete;
MemManager &operator=(const MemManager &) = delete;
///
/// @ingroup ge_graph
/// @brief memory allocator manager init
/// @param [in] options user config params
/// @return Status result of function
///
Status Initialize(const std::vector<rtMemType_t> &memory_type);

///
/// @ingroup ge_graph
/// @brief memory allocator finalize
/// @return void
///
void Finalize() noexcept;

const std::vector<rtMemType_t> &GetAllMemoryType() const { return memory_type_; }

private:
///
/// @ingroup ge_graph
/// @param [in] memory_type memory type
/// @param [in] allocate_map memory allocator map
/// @return Status result of function
///
template <typename T>
Status InitAllocator(const std::vector<rtMemType_t> &memory_type, std::map<rtMemType_t, T *> &allocate_map) {
T *allocator = nullptr;
for (unsigned int index : memory_type) {
auto it = allocate_map.find(index);
if (it == allocate_map.end()) {
allocator = new (std::nothrow) T(index);
if (allocator != nullptr) {
allocate_map[index] = allocator;
GELOGI("Create Allocator memory type[%u] success.", index);
} else {
REPORT_CALL_ERROR("E19999", "New MemoryAllocator fail, index:%u", index);
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Alloc Allocator failed.");
}
} else {
allocator = it->second;
}

if (allocator == nullptr) {
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Create Allocator failed.");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
} else {
if (allocator->Initialize() != SUCCESS) {
return ACL_ERROR_GE_INTERNAL_ERROR;
}
}
}
return SUCCESS;
}
///
/// @ingroup ge_graph
/// @param [in] memory_type memory type
/// @param [in] allocate_map memory allocator map
/// @return Allocator ptr
///
template <typename T>
T &GetAllocator(rtMemType_t memory_type, std::map<rtMemType_t, T *> allocate_map) {
std::lock_guard<std::recursive_mutex> lock(allocator_mutex_);
T *allocator = nullptr;
auto it = allocate_map.find(memory_type);
if (it != allocate_map.end()) {
allocator = it->second;
}

// Usually impossible
if (allocator == nullptr) {
GELOGW("Get allocator failed, memory type is %u.", memory_type);
static T default_allocator(RT_MEMORY_RESERVED);
return default_allocator;
}
return *allocator;
}

std::map<rtMemType_t, MemoryAllocator *> memory_allocator_map_;
std::map<rtMemType_t, CachingAllocator *> caching_allocator_map_;
std::map<rtMemType_t, RdmaPoolAllocator *> rdma_allocator_map_;
std::map<rtMemType_t, HostMemAllocator *> host_allocator_map_;
std::map<rtMemType_t, SessionScopeMemAllocator *> session_scope_allocator_map_;
std::recursive_mutex allocator_mutex_;
std::vector<rtMemType_t> memory_type_;
bool init_ = false;
};
} // namespace ge

#endif // GE_GRAPH_MANAGER_GRAPH_MEM_ALLOCATOR_H_

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save