Browse Source

fixed coverity warning

pull/1965/head
李磊 3 years ago
parent
commit
6cb69109b6
6 changed files with 13 additions and 8 deletions
  1. +1
    -0
      ge/graph/load/model_manager/davinci_model.cc
  2. +1
    -1
      ge/hybrid/executor/worker/execution_engine.cc
  3. +2
    -0
      ge/hybrid/model/hybrid_model_builder.cc
  4. +2
    -1
      ge/hybrid/node_executor/hccl/hccl_node_executor.cc
  5. +1
    -0
      ge/ir_build/ge_ir_build.cc
  6. +6
    -6
      ge/single_op/task/op_task.cc

+ 1
- 0
ge/graph/load/model_manager/davinci_model.cc View File

@@ -3683,6 +3683,7 @@ Status DavinciModel::InitConstant(const OpDescPtr &op_desc) {
elem_num = 1; elem_num = 1;
} }
uint64_t *buff = reinterpret_cast<uint64_t *>(tensor->MutableData().data()); uint64_t *buff = reinterpret_cast<uint64_t *>(tensor->MutableData().data());
GE_CHECK_NOTNULL(buff);
if (ge::CheckInt64Uint32MulOverflow(elem_num, kBytes * kStringHeadElems) != SUCCESS) { if (ge::CheckInt64Uint32MulOverflow(elem_num, kBytes * kStringHeadElems) != SUCCESS) {
GELOGE(FAILED, "[Call][CheckInt64Uint32MulOverflow] Shape size:%ld is invalid", elem_num); GELOGE(FAILED, "[Call][CheckInt64Uint32MulOverflow] Shape size:%ld is invalid", elem_num);
return FAILED; return FAILED;


+ 1
- 1
ge/hybrid/executor/worker/execution_engine.cc View File

@@ -428,7 +428,7 @@ Status ExecutionEngine::ValidateInputTensors(const NodeState &node_state, const
continue; continue;
} }


int64_t expected_size;
int64_t expected_size = 0;
(void)TensorUtils::GetSize(*tensor_desc, expected_size); (void)TensorUtils::GetSize(*tensor_desc, expected_size);
GELOGD("[%s] Input[%d] expects [%ld] bytes.", task_context.GetNodeName(), i, expected_size); GELOGD("[%s] Input[%d] expects [%ld] bytes.", task_context.GetNodeName(), i, expected_size);
auto size_diff = expected_size - static_cast<int64_t>(input_tensor->GetSize()); auto size_diff = expected_size - static_cast<int64_t>(input_tensor->GetSize());


+ 2
- 0
ge/hybrid/model/hybrid_model_builder.cc View File

@@ -900,6 +900,7 @@ Status HybridModelBuilder::LoadGraph() {
GE_CHECK_NOTNULL(node_item); GE_CHECK_NOTNULL(node_item);
AscendString graph_name; AscendString graph_name;
GE_CHK_GRAPH_STATUS_RET(it.second->GetGraph().GetName(graph_name), "Failed to get subgraph name"); GE_CHK_GRAPH_STATUS_RET(it.second->GetGraph().GetName(graph_name), "Failed to get subgraph name");
GE_CHECK_NOTNULL(graph_name.GetString());
auto subgraph = hybrid_model_.GetRootGraph()->GetSubgraph(graph_name.GetString()); auto subgraph = hybrid_model_.GetRootGraph()->GetSubgraph(graph_name.GetString());
GE_CHECK_NOTNULL(subgraph); GE_CHECK_NOTNULL(subgraph);
GE_CHK_STATUS_RET(IdentifyVariableOutputs(*node_item, subgraph), GE_CHK_STATUS_RET(IdentifyVariableOutputs(*node_item, subgraph),
@@ -967,6 +968,7 @@ Status HybridModelBuilder::HandleDtString(const GeTensor &tensor, void *var_addr


auto &mutable_tensor = const_cast<GeTensor &>(tensor); auto &mutable_tensor = const_cast<GeTensor &>(tensor);
uint64_t *buff = reinterpret_cast<uint64_t *>(mutable_tensor.MutableData().data()); uint64_t *buff = reinterpret_cast<uint64_t *>(mutable_tensor.MutableData().data());
GE_CHECK_NOTNULL(buff);
GE_CHK_BOOL_RET_STATUS(ge::CheckInt64Uint32MulOverflow(elem_num, kBytes * kStringHeadElems) == SUCCESS, FAILED, GE_CHK_BOOL_RET_STATUS(ge::CheckInt64Uint32MulOverflow(elem_num, kBytes * kStringHeadElems) == SUCCESS, FAILED,
"[Invoke][CheckInt64Uint32MulOverflow] failed because Shape size is invalid."); "[Invoke][CheckInt64Uint32MulOverflow] failed because Shape size is invalid.");
auto offset = static_cast<uint64_t>(elem_num * kBytes * kStringHeadElems); auto offset = static_cast<uint64_t>(elem_num * kBytes * kStringHeadElems);


+ 2
- 1
ge/hybrid/node_executor/hccl/hccl_node_executor.cc View File

@@ -417,7 +417,7 @@ Status BuildGatherAllToAllParams(TaskContext &context, HcomGatherAllToAllVParams
} }
params.recvtype = iter->second; params.recvtype = iter->second;


int64_t addr_len;
int64_t addr_len = 0;
(void) ge::AttrUtils::GetInt(op_desc, "addr_length", addr_len); (void) ge::AttrUtils::GetInt(op_desc, "addr_length", addr_len);
params.addrLength = static_cast<int>(addr_len); params.addrLength = static_cast<int>(addr_len);


@@ -460,6 +460,7 @@ Status AllToAllNodeTask::ExecuteAsync(TaskContext &context, std::function<void()
return FAILED; return FAILED;
} }
HcomGatherAllToAllVParams params; HcomGatherAllToAllVParams params;
params.group = nullptr;
GE_CHK_STATUS_RET(BuildGatherAllToAllParams(context, params)); GE_CHK_STATUS_RET(BuildGatherAllToAllParams(context, params));
HcclResult hccl_ret = HcomExecEnqueueGatherAllToAllV(params, callback); HcclResult hccl_ret = HcomExecEnqueueGatherAllToAllV(params, callback);
if (hccl_ret != HCCL_SUCCESS) { if (hccl_ret != HCCL_SUCCESS) {


+ 1
- 0
ge/ir_build/ge_ir_build.cc View File

@@ -866,6 +866,7 @@ graphStatus aclgrphDumpGraph(const ge::Graph &graph, const char *file, const siz
graphStatus aclgrphGenerateForOp(const AscendString &op_type, const vector<TensorDesc> &inputs, graphStatus aclgrphGenerateForOp(const AscendString &op_type, const vector<TensorDesc> &inputs,
const vector<TensorDesc> &outputs, Graph &graph) { const vector<TensorDesc> &outputs, Graph &graph) {
ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther);
GE_CHECK_NOTNULL(op_type.GetString());
auto op_type_str = std::string(op_type.GetString()); auto op_type_str = std::string(op_type.GetString());
auto op_name = op_type_str + "_" + std::to_string(ge::GetCurrentTimestamp()); auto op_name = op_type_str + "_" + std::to_string(ge::GetCurrentTimestamp());
auto op_desc = ge::MakeShared<ge::OpDesc>(op_name, op_type_str); auto op_desc = ge::MakeShared<ge::OpDesc>(op_name, op_type_str);


+ 6
- 6
ge/single_op/task/op_task.cc View File

@@ -57,7 +57,7 @@ Status OpTask::OpenDump(rtStream_t stream) {
size_t arg_num = 0; size_t arg_num = 0;
GetIoAddr(arg_base, arg_num); GetIoAddr(arg_base, arg_num);
if (arg_num < input_size + output_size) { if (arg_num < input_size + output_size) {
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR,
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR,
"[Check][Size]io_addrs_for_dump_ size %zu is not equal input and output size %zu", "[Check][Size]io_addrs_for_dump_ size %zu is not equal input and output size %zu",
arg_num, input_size + output_size); arg_num, input_size + output_size);
REPORT_INNER_ERROR("E19999", "io_addrs_for_dump_ size %zu is not equal input and output size %zu", REPORT_INNER_ERROR("E19999", "io_addrs_for_dump_ size %zu is not equal input and output size %zu",
@@ -149,7 +149,7 @@ Status OpTask::DoUpdateArgTable(const SingleOpModelParam &param, bool keep_works
size_t arg_num = 0; size_t arg_num = 0;
GetIoAddr(arg_base, arg_num); GetIoAddr(arg_base, arg_num);
if (arg_num < all_addresses.size()) { if (arg_num < all_addresses.size()) {
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR,
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR,
"[Check][Size][%s] arg number mismatches, expect at least = %zu, but got = %zu.", "[Check][Size][%s] arg number mismatches, expect at least = %zu, but got = %zu.",
op_desc_->GetName().c_str(), all_addresses.size(), arg_num); op_desc_->GetName().c_str(), all_addresses.size(), arg_num);
REPORT_INNER_ERROR("E19999", "%s arg number mismatches, expect at least = %zu, but got = %zu.", REPORT_INNER_ERROR("E19999", "%s arg number mismatches, expect at least = %zu, but got = %zu.",
@@ -443,7 +443,7 @@ Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint
Status ret = aicpu_ext_handle_->Parse(kernel_ext_info); Status ret = aicpu_ext_handle_->Parse(kernel_ext_info);
if (ret != SUCCESS) { if (ret != SUCCESS) {
GELOGE(ret, "[Parse][Param:kernel_ext_info] failed, kernel_ext_info_size=%zu.", kernel_ext_info.size()); GELOGE(ret, "[Parse][Param:kernel_ext_info] failed, kernel_ext_info_size=%zu.", kernel_ext_info.size());
REPORT_INNER_ERROR("E19999",
REPORT_INNER_ERROR("E19999",
"Parse Param:kernel_ext_info failed, kernel_ext_info_size=%zu.", kernel_ext_info.size()); "Parse Param:kernel_ext_info failed, kernel_ext_info_size=%zu.", kernel_ext_info.size());
return ret; return ret;
} }
@@ -512,12 +512,12 @@ Status AiCpuBaseTask::UpdateExtInfo(const std::vector<GeTensorDesc> &input_desc,


if (unknown_type_ != DEPEND_COMPUTE) { if (unknown_type_ != DEPEND_COMPUTE) {
for (size_t j = 0; j < num_outputs_; ++j) { for (size_t j = 0; j < num_outputs_; ++j) {
GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateOutputShapeAndType(j, output_desc[j]),
GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateOutputShapeAndType(j, output_desc[j]),
"[Update][OutputShapeAndType] failed, Output:%zu.", j); "[Update][OutputShapeAndType] failed, Output:%zu.", j);
if (DumpManager::GetInstance().GetDumpProperties(kInferSessionId).IsSingleOpNeedDump()) { if (DumpManager::GetInstance().GetDumpProperties(kInferSessionId).IsSingleOpNeedDump()) {
GE_CHK_STATUS_RET(op_desc_->UpdateOutputDesc(j, output_desc[j]), GE_CHK_STATUS_RET(op_desc_->UpdateOutputDesc(j, output_desc[j]),
"AiCpuTask Update [%zu]th output desc failed.",j); "AiCpuTask Update [%zu]th output desc failed.",j);
}
}
} }
} }


@@ -546,7 +546,7 @@ Status AiCpuBaseTask::UpdateOutputShape(vector<GeTensorDesc> &output_desc) {
GeShape shape; GeShape shape;
DataType data_type; DataType data_type;
aicpu_ext_handle_->GetOutputShapeAndType(i, shape, data_type); aicpu_ext_handle_->GetOutputShapeAndType(i, shape, data_type);
GE_CHK_STATUS_RET(UpdateShapeToOutputDesc(shape, output_desc[i]),
GE_CHK_STATUS_RET(UpdateShapeToOutputDesc(shape, output_desc[i]),
"[Update][ShapeToOutputDesc] failed, output:%zu.", i); "[Update][ShapeToOutputDesc] failed, output:%zu.", i);
if (DumpManager::GetInstance().GetDumpProperties(kInferSessionId).IsSingleOpNeedDump()) { if (DumpManager::GetInstance().GetDumpProperties(kInferSessionId).IsSingleOpNeedDump()) {
GE_CHK_STATUS_RET(op_desc_->UpdateOutputDesc(i, output_desc[i]), "[Update][OutputDesc] failed, output:%zu.", i); GE_CHK_STATUS_RET(op_desc_->UpdateOutputDesc(i, output_desc[i]), "[Update][OutputDesc] failed, output:%zu.", i);


Loading…
Cancel
Save