modified: gather_v2_kernel.cc modified: strided_slice_kernel.cctags/v1.2.0
@@ -33,7 +33,7 @@ const int kNumOne = 1; | |||
} // namespace | |||
Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vector<ConstGeTensorPtr> &input, | |||
vector<GeTensorPtr> &v_output) { | |||
GELOGI("ConcatOffsetKernel in."); | |||
GELOGD("ConcatOffsetKernel in"); | |||
if (op_desc_ptr == nullptr) { | |||
GELOGE(PARAM_INVALID, "input opdesc is nullptr."); | |||
return PARAM_INVALID; | |||
@@ -41,7 +41,7 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vector<Con | |||
// validate attrs | |||
int N = 0; | |||
if (!(AttrUtils::GetInt(op_desc_ptr, "N", N))) { | |||
GELOGW("Attr %s does not exist.", "N"); | |||
GELOGW("Attr %s does not exist", "N"); | |||
return NOT_CHANGED; | |||
} | |||
// follow IR def, the first input is concat_dim | |||
@@ -50,7 +50,7 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vector<Con | |||
int32_t concat_dim = *(const_cast<int32_t *>(reinterpret_cast<const int32_t *>(input_0->GetData().data()))); | |||
// validate inputs | |||
if ((static_cast<int>(input.size()) != (N + kNumOne)) || (input.size() <= kConcatOffsetInputIndexOne)) { | |||
GELOGW("The number of input for concat offset must be equal to %d, and must be more than one.", (N + kNumOne)); | |||
GELOGW("The number of input for concat offset must be equal to %d, and must be more than one", (N + kNumOne)); | |||
return NOT_CHANGED; | |||
} | |||
@@ -61,7 +61,7 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vector<Con | |||
GELOGW("Concat dim is bigger than the size of output_shape."); | |||
return NOT_CHANGED; | |||
} | |||
GELOGI("Output shape size is %ld", output_size); | |||
GELOGI("Output shape size is %ld.", output_size); | |||
int32_t offset = 0; | |||
if (output_size < 0) { | |||
GELOGE(FAILED, "Index is negative."); | |||
@@ -86,7 +86,7 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vector<Con | |||
output_ptr->MutableTensorDesc().SetShape(output_shape); | |||
GE_IF_BOOL_EXEC(output_ptr->SetData(reinterpret_cast<uint8_t *>(buf.get()), | |||
static_cast<size_t>(sizeof(DT_INT32) * output_size)) != GRAPH_SUCCESS, | |||
GELOGW("set data failed"); | |||
GELOGW("set data failed."); | |||
return NOT_CHANGED); | |||
v_output.push_back(output_ptr); | |||
// caculate offset | |||
@@ -99,7 +99,7 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vector<Con | |||
} | |||
offset += input_dim; | |||
} | |||
GELOGI("ConcatOffsetKernel success."); | |||
GELOGD("ConcatOffsetKernel success"); | |||
return SUCCESS; | |||
} | |||
REGISTER_KERNEL(CONCATOFFSET, ConcatOffsetKernel); | |||
@@ -208,7 +208,7 @@ Status GatherV2Kernel::GenData(const int64_t data_num, ConstGeTensorPtr tensor_x | |||
ret = ProcessAxis3<T>(tensor_x, output); | |||
break; | |||
default: | |||
GELOGI("Only support 4 dims and below but input axis is %ld.", axis); | |||
GELOGI("Only support 4 dims and below but input axis is %ld", axis); | |||
return NOT_CHANGED; | |||
} | |||
return ret; | |||
@@ -267,7 +267,7 @@ Status GatherV2Kernel::Process(int64_t axis, DataType data_type, ConstGeTensorPt | |||
ret = GenData<uint64_t>(data_num, input_tensor_ptr, axis, output_ptr); | |||
break; | |||
default: | |||
GELOGI("GatherV2Kernel does not support this Data type:%s.", TypeUtils::DataTypeToSerialString(data_type).c_str()); | |||
GELOGI("GatherV2Kernel does not support this Data type:%s", TypeUtils::DataTypeToSerialString(data_type).c_str()); | |||
return NOT_CHANGED; | |||
} | |||
return ret; | |||
@@ -330,13 +330,13 @@ Status GatherV2Kernel::Check(const OpDescPtr &op_desc_ptr, const vector<ConstGeT | |||
auto axis_shape = tensor2->GetTensorDesc().GetShape(); | |||
// axis must be scalar | |||
if (axis_shape.GetDimNum() != 0) { | |||
GELOGW("axis must be scalar but its shape is %zu.", axis_shape.GetDimNum()); | |||
GELOGW("axis must be scalar but its shape is %zu", axis_shape.GetDimNum()); | |||
return NOT_CHANGED; | |||
} | |||
auto axis_data_type = tensor2->GetTensorDesc().GetDataType(); | |||
bool is_valid_axis_data_type = axis_data_type == DT_INT32 || axis_data_type == DT_INT64; | |||
if (!is_valid_axis_data_type) { | |||
GELOGW("axis datatype must be DT_INT32 or DT_INT64."); | |||
GELOGW("axis datatype must be DT_INT32 or DT_INT64"); | |||
return NOT_CHANGED; | |||
} | |||
@@ -442,13 +442,13 @@ Status GatherV2Kernel::Compute(const OpDescPtr op_desc_ptr, const vector<ConstGe | |||
auto ret_y = CalcStride(ystride_, y_shape); | |||
ret = (ret_x == SUCCESS && ret_y == SUCCESS) ? SUCCESS : NOT_CHANGED; | |||
if (ret != SUCCESS) { | |||
GELOGE(ret, "CalcStride Failed."); | |||
GELOGE(ret, "CalcStride Failed"); | |||
return ret; | |||
} | |||
ret = Process(axis, x_data_type, tensor0, output_ptr); | |||
if (ret != SUCCESS) { | |||
GELOGE(ret, "GenData failed, data_type: %s.", TypeUtils::DataTypeToSerialString(x_data_type).c_str()); | |||
GELOGE(ret, "GenData failed, data_type: %s", TypeUtils::DataTypeToSerialString(x_data_type).c_str()); | |||
return ret; | |||
} | |||
@@ -45,7 +45,7 @@ bool IsEllipsisMaskValid(const GeTensorDescPtr &input_desc, const uint32_t ellip | |||
++ellipsis_num; | |||
} | |||
if (ellipsis_num > 1) { | |||
GELOGW("Only one non-zero bit is allowed in ellipsis_mask"); | |||
GELOGW("Only one non-zero bit is allowed in ellipsis_mask."); | |||
return false; | |||
} | |||
} | |||
@@ -100,7 +100,7 @@ Status StridedSliceKernel::Compute(const ge::OpDescPtr attr, const std::vector<g | |||
std::vector<int64_t> output_dims; | |||
std::vector<int64_t> stride_vec; | |||
if (InitParamWithAttrs(input, input_dims, begin_vec, output_dims, stride_vec) != SUCCESS) { | |||
GELOGW("Init param with mask attrs failed.Ignore kernel"); | |||
GELOGW("Init param with mask attrs failed.Ignore kernel."); | |||
return NOT_CHANGED; | |||
} | |||
@@ -114,7 +114,7 @@ Status StridedSliceKernel::Compute(const ge::OpDescPtr attr, const std::vector<g | |||
auto output_tensor_desc = attr->GetOutputDesc(0); | |||
GeTensorPtr output_ptr = MakeShared<GeTensor>(output_tensor_desc); | |||
if (output_ptr == nullptr) { | |||
GELOGE(MEMALLOC_FAILED, "MakeShared GeTensor failed, node name %s", attr->GetName().c_str()); | |||
GELOGE(MEMALLOC_FAILED, "MakeShared GeTensor failed, node name %s.", attr->GetName().c_str()); | |||
return NOT_CHANGED; | |||
} | |||
auto ret = OpUtils::SetOutputSliceData(data, static_cast<int64_t>(data_size), data_type, input_dims, begin_vec, | |||
@@ -138,7 +138,7 @@ Status StridedSliceKernel::Compute(const ge::OpDescPtr attr, const std::vector<g | |||
} | |||
Status StridedSliceKernel::CheckAndGetAttr(const OpDescPtr &attr) { | |||
if (attr == nullptr) { | |||
GELOGE(PARAM_INVALID, "input opdescptr is nullptr"); | |||
GELOGE(PARAM_INVALID, "input opdescptr is nullptr."); | |||
return PARAM_INVALID; | |||
} | |||
// Get all op attr value of strided_slice | |||
@@ -159,7 +159,7 @@ Status StridedSliceKernel::CheckAndGetAttr(const OpDescPtr &attr) { | |||
} | |||
Status StridedSliceKernel::CheckInputParam(const std::vector<ConstGeTensorPtr> &input) { | |||
if (input.size() != kStridedSliceInputSize) { | |||
GELOGE(PARAM_INVALID, "The number of input for strided slice must be %zu", kStridedSliceInputSize); | |||
GELOGE(PARAM_INVALID, "The number of input for strided slice must be %zu.", kStridedSliceInputSize); | |||
return PARAM_INVALID; | |||
} | |||
@@ -178,7 +178,7 @@ Status StridedSliceKernel::CheckInputParam(const std::vector<ConstGeTensorPtr> & | |||
auto stride_tensor_desc = begin_tensor->GetTensorDesc(); | |||
if (begin_tensor_desc.GetDataType() != end_tensor_desc.GetDataType() || | |||
end_tensor_desc.GetDataType() != stride_tensor_desc.GetDataType()) { | |||
GELOGW("Data type of StridedSlice OP(begin,end,strides) must be same"); | |||
GELOGW("Data type of StridedSlice OP(begin,end,strides) must be same."); | |||
return PARAM_INVALID; | |||
} | |||
if (kIndexNumberType.find(begin_tensor_desc.GetDataType()) == kIndexNumberType.end()) { | |||
@@ -190,7 +190,7 @@ Status StridedSliceKernel::CheckInputParam(const std::vector<ConstGeTensorPtr> & | |||
auto x_data_type = weight0->GetTensorDesc().GetDataType(); | |||
auto x_data_size = GetSizeByDataType(x_data_type); | |||
if (x_data_size < 0) { | |||
GELOGW("Data type of x input %s is not supported", TypeUtils::DataTypeToSerialString(x_data_type).c_str()); | |||
GELOGW("Data type of x input %s is not supported.", TypeUtils::DataTypeToSerialString(x_data_type).c_str()); | |||
return PARAM_INVALID; | |||
} | |||
size_t weight0_size = weight0->GetData().size() / x_data_size; | |||
@@ -198,12 +198,12 @@ Status StridedSliceKernel::CheckInputParam(const std::vector<ConstGeTensorPtr> & | |||
size_t end_data_size = end_tensor->GetData().size(); | |||
size_t stride_data_size = stride_tensor->GetData().size(); | |||
if ((weight0_size == 0) || (begin_data_size == 0) || (end_data_size == 0) || (stride_data_size == 0)) { | |||
GELOGW("Data size of inputs is 0"); | |||
GELOGW("Data size of inputs is 0."); | |||
return PARAM_INVALID; | |||
} | |||
// check dim size | |||
if (!((begin_data_size == end_data_size) && (end_data_size == stride_data_size))) { | |||
GELOGW("The sizes of begin, end and stride is not supported"); | |||
GELOGW("The sizes of begin, end and stride is not supported."); | |||
return PARAM_INVALID; | |||
} | |||
return SUCCESS; | |||
@@ -254,7 +254,7 @@ Status StridedSliceKernel::InitParamWithAttrs(const std::vector<ConstGeTensorPtr | |||
begin_i, end_i, stride_i, x_dims.at(i)); | |||
auto ret = MaskCal(i, begin_i, end_i, x_dims.at(i)); | |||
if (ret != SUCCESS) { | |||
GELOGW("MaskCal failed, because of data overflow"); | |||
GELOGW("MaskCal failed, because of data overflow."); | |||
return NOT_CHANGED; | |||
} | |||
int64_t dim_final; | |||
@@ -273,7 +273,7 @@ void StridedSliceKernel::ExpandDimsWithNewAxis(const ConstGeTensorPtr &begin_ten | |||
vector<int64_t> &x_dims) { | |||
auto begin_data_type_size = GetSizeByDataType(begin_tensor->GetTensorDesc().GetDataType()); | |||
if (begin_data_type_size == 0) { | |||
GELOGW("Param begin_data_type_size should not be zero"); | |||
GELOGW("Param begin_data_type_size should not be zero."); | |||
return; | |||
} | |||
size_t begin_vec_size = begin_tensor->GetData().size() / begin_data_type_size; | |||