diff --git a/ge/graph/load/model_manager/model_utils.cc b/ge/graph/load/model_manager/model_utils.cc index f593f67b..d8b36933 100755 --- a/ge/graph/load/model_manager/model_utils.cc +++ b/ge/graph/load/model_manager/model_utils.cc @@ -22,16 +22,25 @@ #include "graph/manager/graph_var_manager.h" #include "graph/types.h" #include "graph/build/memory/block_mem_assigner.h" - -#define VALIDATE_MEM_RANGE(OP, SIZE, OFFSET) \ - do { \ - if (SIZE <= static_cast(OFFSET)) { \ - REPORT_INNER_ERROR("E19999", "Node:%s(%s) offset:%ld out of range size:%lu, check invalid", \ - OP->GetName().c_str(), OP->GetType().c_str(), OFFSET, SIZE); \ - GELOGE(OUT_OF_MEMORY, "[Check][Param]Node: %s, memory out of range[%lu: %ld]", \ - OP->GetName().c_str(), SIZE, OFFSET); \ - return {}; \ - } \ +#include "common/math/math_util.h" + +#define VALIDATE_MEM_RANGE(OP, TOTAL_SIZE, OFFSET, SIZE) \ + do { \ + if (ge::CheckInt64AddOverflow((OFFSET), (SIZE)) != SUCCESS) { \ + GELOGE(PARAM_INVALID, "Int64 %ld and %ld addition can result in overflow!", \ + static_cast((OFFSET)), static_cast((SIZE))); \ + return {}; \ + } \ + int64_t range = (OFFSET) + (SIZE); \ + if ((TOTAL_SIZE) < static_cast(range)) { \ + REPORT_INNER_ERROR("E19999", \ + "Node:%s(%s) memory out of range, offset:%ld, size:%ld, exceed total size:%lu.", \ + OP->GetName().c_str(), OP->GetType().c_str(), (OFFSET), (SIZE), (TOTAL_SIZE)); \ + GELOGE(OUT_OF_MEMORY, \ + "[Check][Param]Node:%s(%s) memory out of range, offset:%ld, size:%ld, exceed total size:%lu.", \ + OP->GetName().c_str(), OP->GetType().c_str(), (OFFSET), (SIZE), (TOTAL_SIZE)); \ + return {}; \ + } \ } while (0) namespace ge { @@ -321,14 +330,16 @@ vector ModelUtils::GetInputDataAddrs(const RuntimeParam &model_param, Co const GeTensorDescPtr tensor_desc = op_desc->MutableInputDesc(static_cast(i)); GE_IF_BOOL_EXEC(tensor_desc == nullptr, GELOGD("Op: %s, Index: %zu, has no input", op_desc->GetName().c_str(), i); continue;) + int64_t tensor_size = 0; + GE_CHK_STATUS_EXEC(TensorUtils::GetSize(*tensor_desc, tensor_size), return {}); if ((i < v_is_input_const.size()) && v_is_input_const[i]) { // TBE: add weights address to input - int64_t tensor_size = 0; - GE_CHK_STATUS(TensorUtils::GetSize(*tensor_desc, tensor_size)); if (tensor_size) { int64_t data_offset = 0; GE_CHK_STATUS(TensorUtils::GetDataOffset(*tensor_desc, data_offset)); - VALIDATE_MEM_RANGE(op_desc, model_param.weight_size, data_offset); + int64_t weight_size = 0; + GE_CHK_STATUS(TensorUtils::GetTensorSizeInBytes(*tensor_desc, weight_size)); + VALIDATE_MEM_RANGE(op_desc, model_param.weight_size, data_offset, weight_size); uint8_t *weight_addr = model_param.weight_base + data_offset; v_input_data_addr.push_back(weight_addr); GELOGI("[IMAS]GetInputDataAddrs graph_%u type[C] name[%s] input[%zu] memaddr[%p]", model_param.graph_id, @@ -344,7 +355,8 @@ vector ModelUtils::GetInputDataAddrs(const RuntimeParam &model_param, Co non_const_index++; GE_IF_BOOL_EXEC(model_param.var_size != 0 && ge::VarManager::Instance(session_id)->IsVarAddr(input_offset), uint8_t *variable_addr = nullptr; - GE_CHK_STATUS_EXEC(GetVarAddr(model_param, op_desc, input_offset, variable_addr), return {}); + GE_CHK_STATUS_EXEC(GetVarAddr(model_param, op_desc, input_offset, + tensor_size, variable_addr), return {}); v_input_data_addr.push_back(variable_addr); GELOGI("[IMAS]GetInputDataAddrs graph_%u type[V] name[%s] input[%lu] memaddr[%p]", model_param.graph_id, op_desc->GetName().c_str(), i, variable_addr); @@ -358,9 +370,7 @@ vector ModelUtils::GetInputDataAddrs(const RuntimeParam &model_param, Co mem_addr = reinterpret_cast(static_cast(input_offset)); v_input_data_addr.push_back(mem_addr); } else if (has_mem_type_attr && v_memory_type[i] == RT_MEMORY_TS_4G) { - int64_t tensor_size = 0; - GE_CHK_STATUS_EXEC(TensorUtils::GetSize(*tensor_desc, tensor_size), return {}); - VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, input_offset); + VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, input_offset, tensor_size); mem_addr = model_param.ts_mem_mall->Acquire(input_offset, static_cast(tensor_size)); v_input_data_addr.push_back(mem_addr); } else if (tensor_has_mem_type && mem_type == RT_MEMORY_P2P_DDR) { @@ -370,7 +380,7 @@ vector ModelUtils::GetInputDataAddrs(const RuntimeParam &model_param, Co op_desc->GetName().c_str(), i, p2p_mem_addr); continue; } else { - VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, input_offset); + VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, input_offset, tensor_size); mem_addr = model_param.mem_base + input_offset; v_input_data_addr.push_back(mem_addr); } @@ -387,7 +397,7 @@ vector ModelUtils::GetInputDataAddrs(const RuntimeParam &model_param, Co /// @return Status /// Status ModelUtils::GetVarAddr(const RuntimeParam &model_param, const ConstOpDescPtr &op_desc, int64_t offset, - uint8_t *&var_addr) { + int64_t tensor_size, uint8_t *&var_addr) { rtMemType_t mem_type = ge::VarManager::Instance(model_param.session_id)->GetVarMemType(offset); switch (mem_type) { case RT_MEMORY_RDMA_HBM: @@ -399,7 +409,7 @@ Status ModelUtils::GetVarAddr(const RuntimeParam &model_param, const ConstOpDesc var_addr = reinterpret_cast(static_cast(offset)); break; case RT_MEMORY_HBM: - VALIDATE_MEM_RANGE(op_desc, model_param.var_size, offset - model_param.logic_var_base); + VALIDATE_MEM_RANGE(op_desc, model_param.var_size, offset - model_param.logic_var_base, tensor_size); var_addr = model_param.var_base + offset - model_param.logic_var_base; break; default: @@ -451,9 +461,12 @@ vector ModelUtils::GetOutputDataAddrs(const RuntimeParam &model_param, C GELOGD("%s is an optional output, the address don't need to be saved.", tensor_desc->GetName().c_str()); continue; } + int64_t tensor_size = 0; + GE_CHK_STATUS_EXEC(TensorUtils::GetSize(*tensor_desc, tensor_size), return {}); GE_IF_BOOL_EXEC(model_param.var_size != 0 && ge::VarManager::Instance(session_id)->IsVarAddr(v_output_offset[i]), uint8_t *variable_addr = nullptr; - GE_CHK_STATUS_EXEC(GetVarAddr(model_param, op_desc, v_output_offset[i], variable_addr), return {}); + GE_CHK_STATUS_EXEC(GetVarAddr(model_param, op_desc, v_output_offset[i], + tensor_size, variable_addr), return {}); v_output_data_addr.push_back(variable_addr); GELOGI("[IMAS]GetOutputDataAddrs graph_%u type[V] name[%s] output[%zu] memaddr[%p]", model_param.graph_id, op_desc->GetName().c_str(), i, variable_addr); @@ -467,11 +480,7 @@ vector ModelUtils::GetOutputDataAddrs(const RuntimeParam &model_param, C mem_addr = reinterpret_cast(static_cast(v_output_offset[i])); v_output_data_addr.push_back(mem_addr); } else if (has_mem_type_attr && v_memory_type[i] == RT_MEMORY_TS_4G) { - const GeTensorDescPtr tensor_desc = op_desc->MutableOutputDesc(i); - GE_CHECK_NOTNULL_EXEC(tensor_desc, return {}); - int64_t tensor_size = 0; - GE_CHK_STATUS_EXEC(TensorUtils::GetSize(*tensor_desc, tensor_size), return {}); - VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, v_output_offset[i]); + VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, v_output_offset[i], tensor_size); mem_addr = model_param.ts_mem_mall->Acquire(v_output_offset[i], static_cast(tensor_size)); v_output_data_addr.push_back(mem_addr); } else if (tensor_has_mem_type && mem_type == RT_MEMORY_P2P_DDR) { @@ -481,7 +490,7 @@ vector ModelUtils::GetOutputDataAddrs(const RuntimeParam &model_param, C op_desc->GetName().c_str(), i, p2p_mem_addr); continue; } else { - VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, v_output_offset[i]); + VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, v_output_offset[i], tensor_size); mem_addr = static_cast(model_param.mem_base + v_output_offset[i]); v_output_data_addr.push_back(mem_addr); } @@ -554,7 +563,7 @@ vector ModelUtils::GetWorkspaceDataAddrs(const RuntimeParam &model_param GELOGI("[IMAS]GetWorkspaceDataAddrs graph_%u type[F] name[%s] workspace[%zu] offset[%ld] bytes[%ld] Null addr", model_param.graph_id, op_desc->GetName().c_str(), i, v_workspace_offset[i], v_workspace_bytes[i]); } else { - VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, v_workspace_offset[i]); + VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, v_workspace_offset[i], v_workspace_bytes[i]); uint8_t *mem_addr = nullptr; bool session_scope_memory = (has_workspace_no_reuse_scope) && (i < workspace_no_reuse_scope.size()); if (session_scope_memory) { diff --git a/ge/graph/load/model_manager/model_utils.h b/ge/graph/load/model_manager/model_utils.h index 26f8d700..8ce1b060 100755 --- a/ge/graph/load/model_manager/model_utils.h +++ b/ge/graph/load/model_manager/model_utils.h @@ -115,7 +115,7 @@ class ModelUtils { /// @return Status /// static Status GetVarAddr(const RuntimeParam &model_param, const ConstOpDescPtr &op_desc, int64_t offset, - uint8_t *&var_addr); + int64_t tensor_size, uint8_t *&var_addr); }; } // namespace ge diff --git a/tests/ut/ge/graph/load/model_utils_unittest.cc b/tests/ut/ge/graph/load/model_utils_unittest.cc index 630a75aa..977e3155 100644 --- a/tests/ut/ge/graph/load/model_utils_unittest.cc +++ b/tests/ut/ge/graph/load/model_utils_unittest.cc @@ -44,7 +44,7 @@ TEST_F(UtestModelUtils, get_var_addr_hbm) { VarManager::Instance(runtime_param.session_id)->var_resource_->var_offset_map_[offset] = RT_MEMORY_HBM; std::shared_ptr op_desc = std::make_shared("test", "test"); uint8_t *var_addr = nullptr; - EXPECT_EQ(ModelUtils::GetVarAddr(runtime_param, op_desc, offset, var_addr), SUCCESS); + EXPECT_EQ(ModelUtils::GetVarAddr(runtime_param, op_desc, offset, 0, var_addr), SUCCESS); EXPECT_EQ(runtime_param.var_base + offset - runtime_param.logic_var_base, var_addr); VarManager::Instance(runtime_param.session_id)->Destory(); } @@ -63,7 +63,7 @@ TEST_F(UtestModelUtils, get_var_addr_rdma_hbm) { VarManager::Instance(runtime_param.session_id)->var_resource_->var_offset_map_[offset] = RT_MEMORY_RDMA_HBM; std::shared_ptr op_desc = std::make_shared("test", "test"); uint8_t *var_addr = nullptr; - EXPECT_EQ(ModelUtils::GetVarAddr(runtime_param, op_desc, offset, var_addr), SUCCESS); + EXPECT_EQ(ModelUtils::GetVarAddr(runtime_param, op_desc, offset, 0, var_addr), SUCCESS); EXPECT_EQ(reinterpret_cast(offset), var_addr); VarManager::Instance(runtime_param.session_id)->Destory(); } @@ -82,7 +82,7 @@ TEST_F(UtestModelUtils, get_var_addr_rdma_hbm_negative_offset) { VarManager::Instance(runtime_param.session_id)->var_resource_->var_offset_map_[offset] = RT_MEMORY_RDMA_HBM; std::shared_ptr op_desc = std::make_shared("test", "test"); uint8_t *var_addr = nullptr; - EXPECT_NE(ModelUtils::GetVarAddr(runtime_param, op_desc, offset, var_addr), SUCCESS); + EXPECT_NE(ModelUtils::GetVarAddr(runtime_param, op_desc, offset, 0, var_addr), SUCCESS); VarManager::Instance(runtime_param.session_id)->Destory(); } } // namespace ge