From 2b23e1b5a9662799b62572c9a51c4757b5cc66ba Mon Sep 17 00:00:00 2001 From: zhangzhenghai Date: Tue, 4 Aug 2020 15:58:11 +0800 Subject: [PATCH] modify host_kernels --- src/ge/host_kernels/add_kernel.cc | 11 ++++---- src/ge/host_kernels/broadcast_args_kernel.cc | 1 + src/ge/host_kernels/concat_offset_kernel.cc | 11 ++++---- src/ge/host_kernels/dynamic_stitch_kernel.cc | 10 +++---- src/ge/host_kernels/empty_kernel.cc | 11 ++++---- src/ge/host_kernels/expanddims_kernel.cc | 2 +- src/ge/host_kernels/floordiv_kernel.cc | 2 +- src/ge/host_kernels/floormod_kernel.cc | 2 +- src/ge/host_kernels/gather_v2_kernel.cc | 28 ++++++++++---------- src/ge/host_kernels/pack_kernel.cc | 9 ++++--- src/ge/host_kernels/permute_kernel.cc | 4 +-- src/ge/host_kernels/rank_kernel.cc | 2 +- src/ge/host_kernels/reduce_prod_kernel.cc | 24 ++++++++--------- src/ge/host_kernels/reformat_kernel.cc | 9 ++++--- src/ge/host_kernels/reshape_kernel.cc | 2 +- src/ge/host_kernels/rsqrt_kernel.cc | 6 ++--- src/ge/host_kernels/slice_d_kernel.cc | 23 +++------------- src/ge/host_kernels/slice_d_kernel.h | 1 - src/ge/host_kernels/slice_kernel.cc | 2 +- src/ge/host_kernels/ssd_prior_box_kernel.cc | 2 +- src/ge/host_kernels/strided_slice_kernel.cc | 39 ++++++++-------------------- src/ge/host_kernels/strided_slice_kernel.h | 1 - src/ge/host_kernels/sub_kernel.cc | 2 +- src/ge/host_kernels/transdata_kernel.cc | 2 +- src/ge/host_kernels/transpose_kernel.cc | 4 +-- 25 files changed, 90 insertions(+), 120 deletions(-) diff --git a/src/ge/host_kernels/add_kernel.cc b/src/ge/host_kernels/add_kernel.cc index afef1c37..6d6a049c 100644 --- a/src/ge/host_kernels/add_kernel.cc +++ b/src/ge/host_kernels/add_kernel.cc @@ -133,24 +133,25 @@ Status AddKernel::BCastAdd(const OpDescPtr &op_desc_ptr, const std::vector &input) { if (op_desc_ptr == nullptr) { - GELOGW("Op_desc_ptr must not be null."); + GELOGE(PARAM_INVALID, "Op_desc_ptr must not be null."); return PARAM_INVALID; } // check how many inputs if ((input.size() != kAddInputSize) || (op_desc_ptr->GetOutputsSize() != kAddOutputSize)) { - GELOGW("The number of input for add must be %zu, output number must be %zu.", kAddInputSize, kAddOutputSize); + GELOGE(PARAM_INVALID, "The number of input for add must be %zu, output number must be %zu.", kAddInputSize, + kAddOutputSize); return PARAM_INVALID; } // input vector elements must not be null if ((input[kAddFirstInput] == nullptr) || (input[kAddSecondInput] == nullptr)) { - GELOGW("Input vector elements must not be null."); + GELOGE(PARAM_INVALID, "Input vector elements must not be null."); return PARAM_INVALID; } // Inputs must have the same datatype. DataType data_type_0 = input[kAddFirstInput]->GetTensorDesc().GetDataType(); DataType data_type_1 = input[kAddSecondInput]->GetTensorDesc().GetDataType(); if (data_type_0 != data_type_1) { - GELOGW("Data type of inputs for add not matched, data_type_0:%s, data_type_1:%s", + GELOGE(PARAM_INVALID, "Data type of inputs for add not matched, data_type_0:%s, data_type_1:%s", TypeUtils::DataTypeToSerialString(data_type_0).c_str(), TypeUtils::DataTypeToSerialString(data_type_1).c_str()); return PARAM_INVALID; @@ -191,7 +192,7 @@ Status AddKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector x2_dims; const auto &op_in_desc = op_desc_ptr->MutableInputDesc(0); GE_CHECK_NOTNULL(op_in_desc); + ; DataType data_type = op_in_desc->GetDataType(); bool result = (OpUtils::GetShapeDataFromConstTensor(input[0], data_type, x1_dims) == SUCCESS) && (OpUtils::GetShapeDataFromConstTensor(input[1], data_type, x2_dims) == SUCCESS); diff --git a/src/ge/host_kernels/concat_offset_kernel.cc b/src/ge/host_kernels/concat_offset_kernel.cc index 0a870949..2e609d68 100644 --- a/src/ge/host_kernels/concat_offset_kernel.cc +++ b/src/ge/host_kernels/concat_offset_kernel.cc @@ -41,7 +41,7 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vector(reinterpret_cast(input_0->GetData().data()))); // validate inputs if (static_cast(input.size()) != (N + kNumOne) || input.size() <= kConcatOffsetInputIndexOne) { - GELOGW("The number of input for concat offset must be equal with %d, and must be more than one.", (N + kNumOne)); + GELOGE(PARAM_INVALID, "The number of input for concat offset must be equal with %d, and must be more than one.", + (N + kNumOne)); return NOT_CHANGED; } @@ -58,7 +59,7 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vectorGetTensorDesc().GetShape(); int64_t output_size = output_shape.GetShapeSize(); if (concat_dim >= output_size) { - GELOGW("Concat dim is biger than the size of output_shape."); + GELOGE(PARAM_INVALID, "Concat dim is biger than the size of output_shape."); return NOT_CHANGED; } GELOGI("Output shape size is %ld", output_size); @@ -78,7 +79,7 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vectorGetOutputDesc(0); GeTensorPtr output_ptr = MakeShared(output_tensor_desc); if (output_ptr == nullptr) { - GELOGW("Failed to fold node %s, out of memeory", op_desc_ptr->GetName().c_str()); + GELOGE(MEMALLOC_FAILED, "Failed to fold node %s, out of memeory", op_desc_ptr->GetName().c_str()); return NOT_CHANGED; } @@ -86,7 +87,7 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vectorMutableTensorDesc().SetShape(output_shape); GE_IF_BOOL_EXEC(output_ptr->SetData(reinterpret_cast(buf.get()), static_cast(sizeof(DT_INT32) * output_size)) != GRAPH_SUCCESS, - GELOGW("set data failed"); + GELOGE(INTERNAL_ERROR, "set data failed"); return NOT_CHANGED); v_output.push_back(output_ptr); // caculate offset diff --git a/src/ge/host_kernels/dynamic_stitch_kernel.cc b/src/ge/host_kernels/dynamic_stitch_kernel.cc index c1245535..c8a19e44 100644 --- a/src/ge/host_kernels/dynamic_stitch_kernel.cc +++ b/src/ge/host_kernels/dynamic_stitch_kernel.cc @@ -63,11 +63,11 @@ Status DynamicStitchKernel::Compute(const OpDescPtr op_desc_ptr, const vector &input) { if (op_desc_ptr == nullptr) { - GELOGW("Input op_desc is nullptr."); + GELOGE(PARAM_INVALID, "Input op_desc is nullptr."); return PARAM_INVALID; } if (op_desc_ptr->GetOutputsSize() == 0) { - GELOGW("Current output_desc is empty."); + GELOGE(PARAM_INVALID, "Current output_desc is empty."); return PARAM_INVALID; } // validate input @@ -78,7 +78,7 @@ Status DynamicStitchKernel::ValidateParams(const OpDescPtr &op_desc_ptr, const s } for (const auto &in : input) { if (in == nullptr) { - GELOGW("input is nullptr."); + GELOGE(PARAM_INVALID, "input is nullptr."); return PARAM_INVALID; } } @@ -150,7 +150,7 @@ Status DynamicStitchKernel::GenData(const vector &input, GeTen // 2.allocate memery for output std::unique_ptr buf(new (std::nothrow) uint8_t[allowance]); if (buf == nullptr) { - GELOGW("new buffer failed"); + GELOGE(MEMALLOC_FAILED, "new buffer failed"); return INTERNAL_ERROR; } // 3.copy data from input_data along with the sequence of input_indices @@ -164,7 +164,7 @@ Status DynamicStitchKernel::GenData(const vector &input, GeTen output_ptr->MutableTensorDesc().SetShape(merged_shape); Status ret = output_ptr->SetData(buf.get(), allowance); if (ret != GRAPH_SUCCESS) { - GELOGW("set data failed"); + GELOGE(INTERNAL_ERROR, "set data failed"); return NOT_CHANGED; } return SUCCESS; diff --git a/src/ge/host_kernels/empty_kernel.cc b/src/ge/host_kernels/empty_kernel.cc index a5e5fbcf..856caf50 100644 --- a/src/ge/host_kernels/empty_kernel.cc +++ b/src/ge/host_kernels/empty_kernel.cc @@ -38,7 +38,7 @@ const size_t kShapeMaxDims = 1; } // namespace Status EmptyKernel::EmptyCheck(const OpDescPtr &op_desc_ptr, const std::vector &input) { if (op_desc_ptr == nullptr) { - GELOGW("Parameter's invalid, Input opDescPtr is nullptr."); + GELOGE(PARAM_INVALID, "Parameter's invalid, Input opDescPtr is nullptr."); return PARAM_INVALID; } // check input size @@ -46,19 +46,20 @@ Status EmptyKernel::EmptyCheck(const OpDescPtr &op_desc_ptr, const std::vectorGetAllInputsDesc().size() != kEmptyInputsSize) || (input.size() != kEmptyInputsSize) || (op_desc_ptr->GetAllOutputsDesc().size() != kEmptyOutputsSize)); if (size_check) { - GELOGW("Input/Output size error. InDesc size:%zu, OutDesc size:%zu, in size:%zu ", + GELOGE(PARAM_INVALID, "Input/Output size error. InDesc size:%zu, OutDesc size:%zu, in size:%zu ", op_desc_ptr->GetAllInputsDesc().size(), op_desc_ptr->GetAllOutputsDesc().size(), input.size()); return PARAM_INVALID; } if (input.at(kEmptyFirstInput) == nullptr) { - GELOGW("Parameter's invalid, first input is nullptr."); + GELOGE(PARAM_INVALID, "Parameter's invalid, first input is nullptr."); return PARAM_INVALID; } ConstGeTensorPtr shape = input.at(kEmptyFirstInput); // Check if the dimension is 1-D if (shape->GetTensorDesc().GetShape().GetDimNum() > kShapeMaxDims) { - GELOGW("Check if the dimension is 1-D failed, dims:%zu", shape->GetTensorDesc().GetShape().GetDimNum()); + GELOGE(PARAM_INVALID, "Check if the dimension is 1-D failed, dims:%zu", + shape->GetTensorDesc().GetShape().GetDimNum()); return PARAM_INVALID; } return SUCCESS; @@ -83,7 +84,7 @@ Status EmptyKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector(shape, shape_vec, total_data_size); } else { - GELOGW("shape type must be DT_INT32 or DT_INT64."); + GELOGE(PARAM_INVALID, "shape type must be DT_INT32 or DT_INT64."); return NOT_CHANGED; } diff --git a/src/ge/host_kernels/expanddims_kernel.cc b/src/ge/host_kernels/expanddims_kernel.cc index 15648573..1d17ad48 100644 --- a/src/ge/host_kernels/expanddims_kernel.cc +++ b/src/ge/host_kernels/expanddims_kernel.cc @@ -66,7 +66,7 @@ Status ExpanddimsKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vec auto output_tensor_desc = op_desc_ptr->GetOutputDesc(kExpandDimsIndexZero); GeTensorPtr output_ptr = MakeShared(output_tensor_desc); if (output_ptr == nullptr) { - GELOGW("Failed to fold node %s, out of memory", op_desc_ptr->GetName().c_str()); + GELOGE(MEMALLOC_FAILED, "Failed to fold node %s, out of memory", op_desc_ptr->GetName().c_str()); return NOT_CHANGED; } diff --git a/src/ge/host_kernels/floordiv_kernel.cc b/src/ge/host_kernels/floordiv_kernel.cc index 05eded80..4175df92 100644 --- a/src/ge/host_kernels/floordiv_kernel.cc +++ b/src/ge/host_kernels/floordiv_kernel.cc @@ -260,7 +260,7 @@ Status FloorDivKernel::Compute(const OpDescPtr op_desc_ptr, const std::vectorGetOutputDesc(0); GeTensorPtr output_ptr = MakeShared(output_tensor_desc); if (output_ptr == nullptr) { - GELOGW("make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str()); + GELOGE(MEMALLOC_FAILED, "make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str()); return NOT_CHANGED; } diff --git a/src/ge/host_kernels/floormod_kernel.cc b/src/ge/host_kernels/floormod_kernel.cc index 7ad746de..a8c16c9d 100644 --- a/src/ge/host_kernels/floormod_kernel.cc +++ b/src/ge/host_kernels/floormod_kernel.cc @@ -122,7 +122,7 @@ Status FloorModKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector(op_desc_ptr->GetOutputDesc(kFloorModFirstOutput)); if (output_ptr == nullptr) { - GELOGW("make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str()); + GELOGE(MEMALLOC_FAILED, "make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str()); return NOT_CHANGED; } diff --git a/src/ge/host_kernels/gather_v2_kernel.cc b/src/ge/host_kernels/gather_v2_kernel.cc index 7413395a..c8cc3006 100644 --- a/src/ge/host_kernels/gather_v2_kernel.cc +++ b/src/ge/host_kernels/gather_v2_kernel.cc @@ -274,7 +274,7 @@ Status GatherV2Kernel::SaveIndicesByDataType(ConstGeTensorPtr indices_tensor_ptr auto indices_ptr = const_cast(reinterpret_cast(indices_tensor_ptr->GetData().data())); for (int64_t i = 0; i < indices_shape.GetShapeSize(); i++) { if (*(indices_ptr + i) < 0 || *(indices_ptr + i) >= x_shape.GetDim(axis)) { - GELOGW("indices %ld value is not in range [0, %ld)", i, x_shape.GetDim(axis)); + GELOGE(NOT_CHANGED, "indices %ld value is not in range [0, %ld)", i, x_shape.GetDim(axis)); return NOT_CHANGED; } indicates_.push_back(*(indices_ptr + i)); @@ -284,7 +284,7 @@ Status GatherV2Kernel::SaveIndicesByDataType(ConstGeTensorPtr indices_tensor_ptr auto indices_ptr = const_cast(reinterpret_cast(indices_tensor_ptr->GetData().data())); for (int64_t i = 0; i < indices_shape.GetShapeSize(); i++) { if (*(indices_ptr + i) < 0 || *(indices_ptr + i) >= x_shape.GetDim(axis)) { - GELOGW("indices %ld value is not in range [0, %ld)", i, x_shape.GetDim(axis)); + GELOGE(NOT_CHANGED, "indices %ld value is not in range [0, %ld)", i, x_shape.GetDim(axis)); return NOT_CHANGED; } indicates_.push_back(*(indices_ptr + i)); @@ -296,19 +296,19 @@ Status GatherV2Kernel::SaveIndicesByDataType(ConstGeTensorPtr indices_tensor_ptr Status GatherV2Kernel::Check(const OpDescPtr &op_desc_ptr, const vector &input, vector &v_output) const { if (op_desc_ptr == nullptr) { - GELOGW("input opdesc is nullptr."); + GELOGE(NOT_CHANGED, "input opdesc is nullptr."); return NOT_CHANGED; } if (input.size() != kGatherV2InpotNum) { - GELOGW("The number of input for GatherV2 must be %zu.", kGatherV2InpotNum); + GELOGE(NOT_CHANGED, "The number of input for GatherV2 must be %zu.", kGatherV2InpotNum); return NOT_CHANGED; } bool is_null = (input[kGatherV2InputIndexZero] == nullptr || input[kGatherV2InputIndexOne] == nullptr || input[kGatherV2InputIndexTwo] == nullptr); if (is_null) { - GELOGW("some input is nullptr."); + GELOGE(NOT_CHANGED, "some input is nullptr."); return NOT_CHANGED; } ConstGeTensorPtr tensor0 = input.at(kGatherV2InputIndexZero); @@ -318,7 +318,7 @@ Status GatherV2Kernel::Check(const OpDescPtr &op_desc_ptr, const vectorGetData().size() == 0) || (tensor1->GetData().size() == 0) || (tensor2->GetData().size() == 0)); if (size_is_zero) { - GELOGW("some input size is zero."); + GELOGE(NOT_CHANGED, "some input size is zero."); return NOT_CHANGED; } @@ -326,13 +326,13 @@ Status GatherV2Kernel::Check(const OpDescPtr &op_desc_ptr, const vectorGetTensorDesc().GetShape(); // axis must be scalar if (axis_shape.GetDimNum() != 0) { - GELOGW("axis must be scalar but its shape is %zu", axis_shape.GetDimNum()); + GELOGE(NOT_CHANGED, "axis must be scalar but its shape is %zu", axis_shape.GetDimNum()); return NOT_CHANGED; } auto axis_data_type = tensor2->GetTensorDesc().GetDataType(); bool is_valid_axis_data_type = axis_data_type == DT_INT32 || axis_data_type == DT_INT64; if (!is_valid_axis_data_type) { - GELOGW("axis datatype must be DT_INT32 or DT_INT64"); + GELOGE(NOT_CHANGED, "axis datatype must be DT_INT32 or DT_INT64"); return NOT_CHANGED; } @@ -340,11 +340,11 @@ Status GatherV2Kernel::Check(const OpDescPtr &op_desc_ptr, const vectorGetTensorDesc().GetDataType(); bool is_valid_indices_data_type = indices_data_type == DT_INT32 || indices_data_type == DT_INT64; if (!is_valid_indices_data_type) { - GELOGW("indices datatype must be DT_INT32 or DT_INT64"); + GELOGE(NOT_CHANGED, "indices datatype must be DT_INT32 or DT_INT64"); return NOT_CHANGED; } if (indices_shape.GetDimNum() > kMaxIndicatesDims) { - GELOGW("indices input only support 0 or 1 dims"); + GELOGE(NOT_CHANGED, "indices input only support 0 or 1 dims"); return NOT_CHANGED; } return SUCCESS; @@ -372,7 +372,7 @@ Status GatherV2Kernel::Compute(const OpDescPtr op_desc_ptr, const vectorGetName().c_str()); @@ -390,13 +390,13 @@ Status GatherV2Kernel::Compute(const OpDescPtr op_desc_ptr, const vector= 0 ? axis : axis + x_shape.GetDimNum(); // check axis value if (axis < 0 || (axis + 1) > static_cast(x_shape.GetDimNum())) { - GELOGW("axis is invalid"); + GELOGE(NOT_CHANGED, "axis is invalid"); return NOT_CHANGED; } auto indices_data_type = tensor1->GetTensorDesc().GetDataType(); ret = SaveIndicesByDataType(tensor1, x_shape, indices_shape, indices_data_type, static_cast(axis)); if (ret != SUCCESS) { - GELOGW("Save indeices by data type failed!"); + GELOGE(NOT_CHANGED, "Save indeices by data type failed!"); return ret; } @@ -420,7 +420,7 @@ Status GatherV2Kernel::Compute(const OpDescPtr op_desc_ptr, const vector(op_desc_ptr->GetOutputDesc(0)); if (output_ptr == nullptr) { - GELOGW("make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str()); + GELOGE(MEMALLOC_FAILED, "make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str()); return NOT_CHANGED; } output_ptr->MutableTensorDesc().SetShape(GeShape(y_shape)); diff --git a/src/ge/host_kernels/pack_kernel.cc b/src/ge/host_kernels/pack_kernel.cc index 9b62a582..f3f64a6c 100644 --- a/src/ge/host_kernels/pack_kernel.cc +++ b/src/ge/host_kernels/pack_kernel.cc @@ -63,7 +63,7 @@ Status PackKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vector &input) { if (op_desc_ptr == nullptr) { - GELOGW("input opdesc is nullptr."); + GELOGE(PARAM_INVALID, "input opdesc is nullptr."); return PARAM_INVALID; } if (!(AttrUtils::GetInt(op_desc_ptr, PACK_ATTR_NAME_NUM, n_))) { @@ -71,15 +71,16 @@ Status PackKernel::ValidateKernelParams(const ge::OpDescPtr &op_desc_ptr, GELOGD("Attr %s is not set, default value %ld is used.", PACK_ATTR_NAME_NUM.c_str(), n_); } if (!(AttrUtils::GetInt(op_desc_ptr, ATTR_NAME_AXIS, axis_))) { - GELOGW("Attr %s is not exist.", ATTR_NAME_AXIS.c_str()); + GELOGE(PARAM_INVALID, "Attr %s is not exist.", ATTR_NAME_AXIS.c_str()); return PARAM_INVALID; } if (input.empty()) { - GELOGW("The number of input for Pack should be %ld, in fact it is %zu ", n_, input.size()); + GELOGE(PARAM_INVALID, "The number of input for Pack should be %ld, in fact it is %zu ", n_, input.size()); return NOT_CHANGED; } if (input.size() != static_cast(n_)) { - GELOGW("The number of input for Pack should be %d, in fact it is %ld ", static_cast(n_), input.size()); + GELOGE(PARAM_INVALID, "The number of input for Pack should be %d, in fact it is %ld ", static_cast(n_), + input.size()); return PARAM_INVALID; } data_type_ = op_desc_ptr->GetInputDesc(0).GetDataType(); diff --git a/src/ge/host_kernels/permute_kernel.cc b/src/ge/host_kernels/permute_kernel.cc index 24bed54d..8263d19f 100644 --- a/src/ge/host_kernels/permute_kernel.cc +++ b/src/ge/host_kernels/permute_kernel.cc @@ -110,14 +110,14 @@ Status PermuteKernel::Compute(const OpDescPtr op_desc_ptr, const std::vectorGetData().data(); formats::TransResult trans_result; auto ret = formats::TransposeWithShapeCheck(src_data, src_shape, data_shape, src_data_type, perm_list, trans_result); if (ret != SUCCESS) { - GELOGW("Failed to Transpose from %s to %s, shape %s to %s, perm_list %s, data type %s", + GELOGE(INTERNAL_ERROR, "Failed to Transpose from %s to %s, shape %s to %s, perm_list %s, data type %s", TypeUtils::FormatToSerialString(src_format).c_str(), TypeUtils::FormatToSerialString(data_format).c_str(), formats::ShapeToString(src_shape).c_str(), formats::ShapeToString(data_shape).c_str(), formats::ShapeToString(perm_list).c_str(), TypeUtils::DataTypeToSerialString(src_data_type).c_str()); diff --git a/src/ge/host_kernels/rank_kernel.cc b/src/ge/host_kernels/rank_kernel.cc index c8763aef..faaf16b8 100644 --- a/src/ge/host_kernels/rank_kernel.cc +++ b/src/ge/host_kernels/rank_kernel.cc @@ -49,7 +49,7 @@ Status RankKernel::Compute(const NodePtr &node, std::vector &v_outp auto ndims = input_shape->GetShape().GetDimNum(); GeTensorDesc tensor_desc(op_desc->GetOutputDesc(0)); GeTensorPtr output_ptr; - output_ptr = MakeShared(tensor_desc, reinterpret_cast(&ndims), GetSizeByDataType(DT_INT32)); + output_ptr = MakeShared(tensor_desc, reinterpret_cast(&ndims), sizeof(ndims)); if (output_ptr == nullptr) { GELOGE(MEMALLOC_FAILED, "make_shared ge::GeTensor failed"); return MEMALLOC_FAILED; diff --git a/src/ge/host_kernels/reduce_prod_kernel.cc b/src/ge/host_kernels/reduce_prod_kernel.cc index 739d4b9f..479b50ab 100644 --- a/src/ge/host_kernels/reduce_prod_kernel.cc +++ b/src/ge/host_kernels/reduce_prod_kernel.cc @@ -51,7 +51,7 @@ Status ReduceProdKernel::ReduceProdCheck(const ge::OpDescPtr &op_desc_ptr, op_desc_ptr->GetName().c_str()); return NOT_CHANGED; } - GELOGW("Unexpected ReduceProd node, node input size: %zu, node name: %s", input.size(), + GELOGE(PARAM_INVALID, "Unexpected ReduceProd node, node input size: %zu, node name: %s", input.size(), op_desc_ptr->GetName().c_str()); return PARAM_INVALID; } @@ -60,13 +60,13 @@ Status ReduceProdKernel::ReduceProdCheck(const ge::OpDescPtr &op_desc_ptr, GE_CHECK_NOTNULL(data_tensor); GE_CHECK_NOTNULL(axis_tensor); if (axis_tensor->GetTensorDesc().GetShape().GetDimNum() > kReduceProdMaxAxisRank) { - GELOGW("Axis must be at most rank 1, node node: %s", op_desc_ptr->GetName().c_str()); + GELOGE(PARAM_INVALID, "Axis must be at most rank 1, node node: %s", op_desc_ptr->GetName().c_str()); return PARAM_INVALID; } DataType data_type = data_tensor->GetTensorDesc().GetDataType(); if (kReduceProdSupportedType.find(data_type) == kReduceProdSupportedType.end()) { - GELOGW("ReduceProdKernel data type %s not support, node name: %s", + GELOGE(PARAM_INVALID, "ReduceProdKernel data type %s not support, node name: %s", TypeUtils::DataTypeToSerialString(data_type).c_str(), op_desc_ptr->GetName().c_str()); return PARAM_INVALID; } @@ -83,7 +83,7 @@ Status ReduceProdKernel::AxisCal(const std::vector &input) int32_t *axis = const_cast(reinterpret_cast(axis_tensor->GetData().GetData())); GE_CHECK_NOTNULL(axis); if (static_cast(*axis) >= data_dim_size) { - GELOGW("axis is out of rank of data_dims, axis is %d.", *axis); + GELOGE(PARAM_INVALID, "axis is out of rank of data_dims, axis is %d.", *axis); return PARAM_INVALID; } axis_dim_ = data_dims[static_cast(*axis)]; @@ -98,13 +98,13 @@ Status ReduceProdKernel::AxisCal(const std::vector &input) // data_dims is the vector of dims, element in data_dims isn't negative. if (axis_appear) { if (data_dims[i] != 0 && end_dim_ > (INT64_MAX / data_dims[i])) { - GELOGW("Product is overflow. multiplier 1: %ld. multiplier 2: %ld.", end_dim_, data_dims[i]); + GELOGE(INTERNAL_ERROR, "Product is overflow. multiplier 1: %ld. multiplier 2: %ld.", end_dim_, data_dims[i]); return INTERNAL_ERROR; } end_dim_ *= data_dims[i]; } else { if (data_dims[i] != 0 && head_dim_ > (INT64_MAX / data_dims[i])) { - GELOGW("Product is overflow. multiplier 1: %ld. multiplier 2: %ld.", head_dim_, data_dims[i]); + GELOGE(INTERNAL_ERROR, "Product is overflow. multiplier 1: %ld. multiplier 2: %ld.", head_dim_, data_dims[i]); return INTERNAL_ERROR; } head_dim_ *= data_dims[i]; @@ -122,7 +122,7 @@ Status ReduceProdKernel::DataCal(const std::vector &input, size_t data_num = data_tensor->GetData().size() / sizeof(int32_t); unique_ptr buf(new (std::nothrow) int32_t[data_num]()); if (buf == nullptr) { - GELOGW("new buf failed"); + GELOGE(MEMALLOC_FAILED, "new buf failed"); return INTERNAL_ERROR; } @@ -190,12 +190,12 @@ Status ReduceProdKernel::ComputeNoAxis(const ge::OpDescPtr &op_desc_ptr, const s ConstGeTensorPtr data_tensor = input.at(kReduceProdDataIndex); GE_CHECK_NOTNULL(data_tensor); if (data_tensor->GetData().size() == 0) { - GELOGW("ReduceProdKernel data size of inputs is 0, node node: %s", op_desc_ptr->GetName().c_str()); + GELOGE(PARAM_INVALID, "ReduceProdKernel data size of inputs is 0, node node: %s", op_desc_ptr->GetName().c_str()); return PARAM_INVALID; } DataType data_type = data_tensor->GetTensorDesc().GetDataType(); if (kReduceProdSupportedType.find(data_type) == kReduceProdSupportedType.end()) { - GELOGW("ReduceProdKernel data type %s not support, node name: %s", + GELOGE(PARAM_INVALID, "ReduceProdKernel data type %s not support, node name: %s", TypeUtils::DataTypeToSerialString(data_type).c_str(), op_desc_ptr->GetName().c_str()); return PARAM_INVALID; } @@ -206,7 +206,7 @@ Status ReduceProdKernel::ComputeNoAxis(const ge::OpDescPtr &op_desc_ptr, const s size_t data_num = data_tensor->GetData().size() / sizeof(int32_t); unique_ptr buf(new (std::nothrow) int32_t[data_num]()); if (buf == nullptr) { - GELOGW("new buf failed"); + GELOGE(MEMALLOC_FAILED, "new buf failed"); return INTERNAL_ERROR; } @@ -235,7 +235,7 @@ Status ReduceProdKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vec GELOGI("ReduceProdKernel in."); Status ret = ReduceProdCheck(op_desc_ptr, input); if (ret != SUCCESS && ret != NOT_CHANGED) { - GELOGW("ReduceProdKernel input is invalid, failed to fold node."); + GELOGE(PARAM_INVALID, "ReduceProdKernel input is invalid, failed to fold node."); return NOT_CHANGED; } @@ -243,7 +243,7 @@ Status ReduceProdKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vec auto output_tensor_desc = op_desc_ptr->GetOutputDesc(0); GeTensorPtr output_ptr = MakeShared(output_tensor_desc); if (output_ptr == nullptr) { - GELOGW("make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str()); + GELOGE(MEMALLOC_FAILED, "make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str()); return NOT_CHANGED; } diff --git a/src/ge/host_kernels/reformat_kernel.cc b/src/ge/host_kernels/reformat_kernel.cc index c2dd1e17..33a13599 100644 --- a/src/ge/host_kernels/reformat_kernel.cc +++ b/src/ge/host_kernels/reformat_kernel.cc @@ -56,7 +56,7 @@ Status ReFormatKernel::Compute(const OpDescPtr op_desc_ptr, const std::vectorGetTensorDesc().GetShape()).c_str()); return NOT_CHANGED; } GeTensorPtr output_ptr = MakeShared(op_desc_ptr->GetOutputDesc(kReformatFirstOutput)); if (output_ptr == nullptr) { - GELOGW("Create shared ptr for GeTensor failed"); + GELOGE(INTERNAL_ERROR, "Create shared ptr for GeTensor failed"); return NOT_CHANGED; } - GE_IF_BOOL_EXEC(output_ptr->SetData(input.at(0)->GetData()) != GRAPH_SUCCESS, GELOGW("set data failed"); + GE_IF_BOOL_EXEC(output_ptr->SetData(input.at(0)->GetData()) != GRAPH_SUCCESS, + GELOGE(INTERNAL_ERROR, "set data failed"); return NOT_CHANGED); v_output.emplace_back(output_ptr); GELOGD("ReFormatKernel success."); diff --git a/src/ge/host_kernels/reshape_kernel.cc b/src/ge/host_kernels/reshape_kernel.cc index dc7e4bb8..906624d2 100644 --- a/src/ge/host_kernels/reshape_kernel.cc +++ b/src/ge/host_kernels/reshape_kernel.cc @@ -67,7 +67,7 @@ Status ReshapeKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vector auto output_tensor_desc = op_desc_ptr->GetOutputDesc(kOutputDescFirstIndex); GeTensorPtr output_ptr = MakeShared(output_tensor_desc); if (output_ptr == nullptr) { - GELOGW("Failed to fold node %s, out of memory", op_desc_ptr->GetName().c_str()); + GELOGE(MEMALLOC_FAILED, "Failed to fold node %s, out of memory", op_desc_ptr->GetName().c_str()); return NOT_CHANGED; } diff --git a/src/ge/host_kernels/rsqrt_kernel.cc b/src/ge/host_kernels/rsqrt_kernel.cc index 56972d23..3e14fd5f 100644 --- a/src/ge/host_kernels/rsqrt_kernel.cc +++ b/src/ge/host_kernels/rsqrt_kernel.cc @@ -64,7 +64,7 @@ Status RsqrtKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector 0) { unique_ptr buf(new (std::nothrow) float[data_count]()); if (buf == nullptr) { - GELOGW("new buf failed"); + GELOGE(MEMALLOC_FAILED, "new buf failed"); return NOT_CHANGED; } @@ -81,13 +81,13 @@ Status RsqrtKernel::Compute(const OpDescPtr op_desc_ptr, const std::vectorGetOutputDesc(0); GeTensorPtr output_ptr = MakeShared(output_tensor_desc); if (output_ptr == nullptr) { - GELOGW("MakeShared GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str()); + GELOGE(MEMALLOC_FAILED, "MakeShared GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str()); return NOT_CHANGED; } output_ptr->MutableTensorDesc().SetDataType(DT_FLOAT); GE_IF_BOOL_EXEC(output_ptr->SetData(reinterpret_cast(buf.get()), data_size) != GRAPH_SUCCESS, - GELOGW("set data failed"); + GELOGE(INTERNAL_ERROR, "set data failed"); return NOT_CHANGED); output_ptr->MutableTensorDesc().SetShape(x_shape); v_output.push_back(output_ptr); diff --git a/src/ge/host_kernels/slice_d_kernel.cc b/src/ge/host_kernels/slice_d_kernel.cc index 3b8fd0a0..ad0a1675 100644 --- a/src/ge/host_kernels/slice_d_kernel.cc +++ b/src/ge/host_kernels/slice_d_kernel.cc @@ -129,7 +129,7 @@ Status SliceDKernel::Compute(const OpDescPtr op_desc_ptr, const std::vectorGetOutputDesc(0); GeTensorPtr output_ptr = MakeShared(output_tensor_desc); if (output_ptr == nullptr) { - GELOGW("Failed to fold node %s, out of memory", op_desc_ptr->GetName().c_str()); + GELOGE(MEMALLOC_FAILED, "Failed to fold node %s, out of memory", op_desc_ptr->GetName().c_str()); return NOT_CHANGED; } @@ -143,14 +143,8 @@ Status SliceDKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector(const_cast(x_tensor->GetData().data())); int64_t x_data_size = x_tensor->GetTensorDesc().GetShape().GetShapeSize(); - - Status ret = CheckOutputDims(size_list, op_desc_ptr); - if (ret != SUCCESS) { - return ret; - } - - ret = OpUtils::SetOutputSliceData(data, x_data_size, x_data_type, x_dims, begin_list, size_list, output_ptr.get(), - stride_list); + Status ret = OpUtils::SetOutputSliceData(data, x_data_size, x_data_type, x_dims, begin_list, size_list, + output_ptr.get(), stride_list); if (ret != SUCCESS) { GELOGW("Set output data of SliceD failed."); return NOT_CHANGED; @@ -161,16 +155,5 @@ Status SliceDKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector &output_dims, const OpDescPtr attr) { - // check dim not all less than 0 - for (auto dim : output_dims) { - if (dim > 0) { - return SUCCESS; - } - } - GELOGW("all output dim <=0, can't be processed. op_name : %s", attr->GetName().c_str()); - return NOT_CHANGED; -} - REGISTER_KERNEL(SLICED, SliceDKernel); } // namespace ge diff --git a/src/ge/host_kernels/slice_d_kernel.h b/src/ge/host_kernels/slice_d_kernel.h index 90ef9b8b..9fe35352 100644 --- a/src/ge/host_kernels/slice_d_kernel.h +++ b/src/ge/host_kernels/slice_d_kernel.h @@ -29,7 +29,6 @@ class SliceDKernel : public Kernel { private: Status SliceDCheck(const OpDescPtr &op_desc_ptr, const std::vector &input, std::vector &begin_list, std::vector &size_list); - Status CheckOutputDims(const std::vector &output_dims, const OpDescPtr attr); }; } // namespace ge diff --git a/src/ge/host_kernels/slice_kernel.cc b/src/ge/host_kernels/slice_kernel.cc index 5f72fc49..1d7d90c2 100644 --- a/src/ge/host_kernels/slice_kernel.cc +++ b/src/ge/host_kernels/slice_kernel.cc @@ -21,8 +21,8 @@ #include "common/types.h" #include "common/util.h" #include "framework/common/debug/ge_log.h" -#include "graph/utils/type_utils.h" #include "host_kernels/kernel_utils.h" +#include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" namespace ge { diff --git a/src/ge/host_kernels/ssd_prior_box_kernel.cc b/src/ge/host_kernels/ssd_prior_box_kernel.cc index 9de5a08d..c874d732 100644 --- a/src/ge/host_kernels/ssd_prior_box_kernel.cc +++ b/src/ge/host_kernels/ssd_prior_box_kernel.cc @@ -365,7 +365,7 @@ Status SsdPriorboxKernel::Compute(const NodePtr &node, std::vector // make TensorDesc GeTensorPtr output_ptr = MakeShared(output_tensor_desc); if (output_ptr == nullptr) { - GELOGW("Create shared ptr for GeTensor failed"); + GELOGE(INTERNAL_ERROR, "Create shared ptr for GeTensor failed"); return NOT_CHANGED; } GE_IF_BOOL_EXEC(output_ptr->SetData(reinterpret_cast(output_data.get()), diff --git a/src/ge/host_kernels/strided_slice_kernel.cc b/src/ge/host_kernels/strided_slice_kernel.cc index 6a9a558c..0d70a36a 100644 --- a/src/ge/host_kernels/strided_slice_kernel.cc +++ b/src/ge/host_kernels/strided_slice_kernel.cc @@ -46,31 +46,31 @@ Status StridedSliceKernel::CheckAndGetAttr(const OpDescPtr &attr, const std::vec int64_t shrink_axis_mask = 0; if (attr == nullptr) { - GELOGW("input opdescptr is nullptr."); + GELOGE(PARAM_INVALID, "input opdescptr is nullptr."); return PARAM_INVALID; } if (input.size() != kStridedSliceInputSize) { - GELOGW("The number of input for strided slice must be %zu.", kStridedSliceInputSize); + GELOGE(PARAM_INVALID, "The number of input for strided slice must be %zu.", kStridedSliceInputSize); return PARAM_INVALID; } if (!AttrUtils::GetInt(attr, STRIDE_SLICE_ATTR_BEGIN_MASK, begin_mask)) { - GELOGW("get begin_mask attr failed."); + GELOGE(PARAM_INVALID, "get begin_mask attr failed."); return PARAM_INVALID; } if (!AttrUtils::GetInt(attr, STRIDE_SLICE_ATTR_END_MASK, end_mask)) { - GELOGW("get end_mask attr failed."); + GELOGE(PARAM_INVALID, "get end_mask attr failed."); return PARAM_INVALID; } if (!AttrUtils::GetInt(attr, STRIDE_SLICE_ATTR_ELLIPSIS_MASK, ellipsis_mask)) { - GELOGW("get ellipsis_mask attr failed."); + GELOGE(PARAM_INVALID, "get ellipsis_mask attr failed."); return PARAM_INVALID; } if (!AttrUtils::GetInt(attr, STRIDE_SLICE_ATTR_NEW_AXIS_MASK, new_axis_mask)) { - GELOGW("get new_axis_mask attr failed."); + GELOGE(PARAM_INVALID, "get new_axis_mask attr failed."); return PARAM_INVALID; } if (!AttrUtils::GetInt(attr, STRIDE_SLICE_ATTR_SHRINK_AXIS_MASK, shrink_axis_mask)) { - GELOGW("get shrink_axis_mask attr failed."); + GELOGE(PARAM_INVALID, "get shrink_axis_mask attr failed."); return PARAM_INVALID; } if ((ellipsis_mask != 0) || (new_axis_mask != 0)) { @@ -98,7 +98,7 @@ Status StridedSliceKernel::CheckAndGetAttr(const OpDescPtr &attr, const std::vec ConstGeTensorPtr weight2 = input[kStridedSliceInputIndex2]; ConstGeTensorPtr weight3 = input[kStridedSliceInputIndex3]; if (CheckWeight(weight0, weight1, weight2, weight3) != SUCCESS) { - GELOGW("Check And Get Attr failed."); + GELOGE(PARAM_INVALID, "Check And Get Attr failed."); return PARAM_INVALID; } @@ -168,17 +168,6 @@ void StridedSliceKernel::GetOutputDims(uint32_t dims_size, const std::vector &output_dims, const OpDescPtr attr) { - // check dim not all less than 0 - for (auto dim : output_dims) { - if (dim > 0) { - return SUCCESS; - } - } - GELOGW("all output dim <=0, can't be processed. op_name : %s", attr->GetName().c_str()); - return NOT_CHANGED; -} - Status StridedSliceKernel::Compute(const ge::OpDescPtr attr, const std::vector &input, vector &v_output) { GELOGI("StridedSliceKernel in."); @@ -202,7 +191,7 @@ Status StridedSliceKernel::Compute(const ge::OpDescPtr attr, const std::vector(weight2->GetData().data()); const int32_t *stride = reinterpret_cast(weight3->GetData().data()); if ((begin == nullptr) || (end == nullptr) || (stride == nullptr)) { - GELOGW("input weight tensor is nullptr."); + GELOGE(PARAM_INVALID, "input weight tensor is nullptr."); return NOT_CHANGED; } @@ -248,22 +237,16 @@ Status StridedSliceKernel::Compute(const ge::OpDescPtr attr, const std::vectorGetOutputDesc(0); GeTensorPtr output_ptr = MakeShared(output_tensor_desc); if (output_ptr == nullptr) { - GELOGW("MakeShared GeTensor failed, node name %s.", attr->GetName().c_str()); + GELOGE(MEMALLOC_FAILED, "MakeShared GeTensor failed, node name %s.", attr->GetName().c_str()); return NOT_CHANGED; } void *data = reinterpret_cast(const_cast(weight0->GetData().data())); GE_CHECK_NOTNULL(data); - - ret = CheckOutputDims(output_dims, attr); - if (ret != SUCCESS) { - return ret; - } - ret = OpUtils::SetOutputSliceData(data, static_cast(data_size), args.data_type, input_dims, begin_vec, output_dims, output_ptr.get(), stride_vec); if (ret != SUCCESS) { - GELOGW("SetOutputSliceData failed."); + GELOGE(INTERNAL_ERROR, "SetOutputSliceData failed."); return NOT_CHANGED; } diff --git a/src/ge/host_kernels/strided_slice_kernel.h b/src/ge/host_kernels/strided_slice_kernel.h index 0ba3afbd..e569b2d0 100644 --- a/src/ge/host_kernels/strided_slice_kernel.h +++ b/src/ge/host_kernels/strided_slice_kernel.h @@ -44,7 +44,6 @@ class StridedSliceKernel : public Kernel { int32_t &end_i, int32_t &dim_i) const; void GetOutputDims(uint32_t dims_size, const std::vector &output_dims, const Attr &args, vector &v_dims); - Status CheckOutputDims(const std::vector &output_dims, const OpDescPtr attr); }; } // namespace ge #endif // GE_GRAPH_PASSES_FOLDING_KERNEL_STRIDED_SLICE_KERNEL_H_ diff --git a/src/ge/host_kernels/sub_kernel.cc b/src/ge/host_kernels/sub_kernel.cc index 70a14c9f..ed1e5808 100644 --- a/src/ge/host_kernels/sub_kernel.cc +++ b/src/ge/host_kernels/sub_kernel.cc @@ -162,7 +162,7 @@ Status SubKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vectorGetOutputDesc(kSubFirstOutput); GeTensorPtr output_ptr = MakeShared(output_tensor_desc); if (output_ptr == nullptr) { - GELOGW("make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str()); + GELOGE(MEMALLOC_FAILED, "make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str()); return NOT_CHANGED; } diff --git a/src/ge/host_kernels/transdata_kernel.cc b/src/ge/host_kernels/transdata_kernel.cc index c5c9da6e..5fe44fe4 100644 --- a/src/ge/host_kernels/transdata_kernel.cc +++ b/src/ge/host_kernels/transdata_kernel.cc @@ -113,7 +113,7 @@ Status TransdataKernel::Compute(const OpDescPtr op_desc_ptr, const std::vectorGetData().data(); formats::TransResult trans_result; auto ret = formats::TransposeWithShapeCheck(src_data, src_shape, data_shape, src_data_type, perm_list, trans_result); if (ret != SUCCESS) { - GELOGW("Failed to Transpose from %s to %s, shape %s to %s, perm_list %s, data type %s", + GELOGE(INTERNAL_ERROR, "Failed to Transpose from %s to %s, shape %s to %s, perm_list %s, data type %s", TypeUtils::FormatToSerialString(src_format).c_str(), TypeUtils::FormatToSerialString(data_format).c_str(), formats::ShapeToString(src_shape).c_str(), formats::ShapeToString(data_shape).c_str(), formats::ShapeToString(perm_list).c_str(), TypeUtils::DataTypeToSerialString(src_data_type).c_str());