|
|
@@ -104,16 +104,6 @@ Status DataOpParser::Init5DInputTensor(const std::vector<int64_t> &shape, ge::Ge |
|
|
|
ge::TensorUtils::SetReuseInput(tensor_desc, false); |
|
|
|
ge::TensorUtils::SetRealDimCnt(tensor_desc, shape.size()); |
|
|
|
tensor_desc.SetShape(ge::GeShape(shape)); |
|
|
|
|
|
|
|
int64_t tensor_size = 0; |
|
|
|
ge::graphStatus graph_status = ge::TensorUtils::GetTensorSizeInBytes(tensor_desc, tensor_size); |
|
|
|
if (graph_status != ge::GRAPH_SUCCESS) { |
|
|
|
REPORT_CALL_ERROR("E19999", "GetTensorSizeInBytes failed"); |
|
|
|
GELOGE(FAILED, "[Invoke][GetTensorSizeInBytes] failed!"); |
|
|
|
return domi::FAILED; |
|
|
|
} |
|
|
|
// Set the actual occupied space size |
|
|
|
ge::TensorUtils::SetSize(tensor_desc, tensor_size); |
|
|
|
return SUCCESS; |
|
|
|
} |
|
|
|
|
|
|
@@ -127,20 +117,6 @@ Status DataOpParser::InitNDTensor(const std::vector<int64_t> &shape, ge::DataTyp |
|
|
|
ge::TensorUtils::SetRealDimCnt(tensor_desc, shape.size()); |
|
|
|
tensor_desc.SetShape(ge::GeShape(shape)); |
|
|
|
tensor_desc.SetOriginShape(ge::GeShape(shape)); |
|
|
|
|
|
|
|
int64_t size = kScalarLength; |
|
|
|
if (!tensor_desc.GetShape().GetDims().empty()) { |
|
|
|
size = tensor_desc.GetShape().GetShapeSize(); |
|
|
|
} |
|
|
|
uint32_t type_size = 0; |
|
|
|
if (ge::TypeUtils::GetDataTypeLength(data_type, type_size)) { |
|
|
|
PARSER_INT64_UINT32_MULCHECK(size, type_size); |
|
|
|
size *= type_size; |
|
|
|
} else { |
|
|
|
PARSER_INT64_UINT32_MULCHECK(size, static_cast<uint32_t>(sizeof(float))); |
|
|
|
size *= sizeof(float); |
|
|
|
} |
|
|
|
ge::TensorUtils::SetSize(tensor_desc, size); |
|
|
|
return SUCCESS; |
|
|
|
} |
|
|
|
|
|
|
@@ -150,16 +126,6 @@ Status DataOpParser::Init5DOutputTensor(const std::vector<int64_t> &shape, ge::G |
|
|
|
ge::TensorUtils::SetReuseInput(output, false); |
|
|
|
ge::TensorUtils::SetRealDimCnt(output, shape.size()); |
|
|
|
output.SetShape(ge::GeShape(shape)); |
|
|
|
|
|
|
|
int64_t output_size = 0; |
|
|
|
ge::graphStatus graph_status = ge::TensorUtils::GetTensorMemorySizeInBytes(output, output_size); |
|
|
|
if (graph_status != ge::GRAPH_SUCCESS) { |
|
|
|
REPORT_CALL_ERROR("E19999", "GetTensorMemorySizeInBytes failed!"); |
|
|
|
GELOGE(FAILED, "[Invoke][GetTensorMemorySizeInBytes] failed!"); |
|
|
|
return domi::FAILED; |
|
|
|
} |
|
|
|
// Set the actual occupied space size |
|
|
|
ge::TensorUtils::SetSize(output, output_size); |
|
|
|
return SUCCESS; |
|
|
|
} |
|
|
|
|
|
|
@@ -171,19 +137,10 @@ Status DataOpParser::InitInputTensor(const std::vector<int64_t> &shape, ge::GeTe |
|
|
|
|
|
|
|
input.SetShape(ge::GeShape(shape)); |
|
|
|
input.SetOriginShape(ge::GeShape(shape)); |
|
|
|
int64_t size = 0; |
|
|
|
// No need to check dynamic_batch_size since its first dim is -1. |
|
|
|
if (input.GetShape().GetDim(0) != -1) { |
|
|
|
size = input.GetShape().GetShapeSize(); |
|
|
|
} |
|
|
|
PARSER_INT64_UINT32_MULCHECK(size, static_cast<uint32_t>(sizeof(float))); |
|
|
|
ge::TensorUtils::SetSize(input, size * sizeof(float)); |
|
|
|
|
|
|
|
return SUCCESS; |
|
|
|
} |
|
|
|
|
|
|
|
Status DataOpParser::InitOutputTensor(const std::vector<int64_t> &shape, ge::GeTensorDesc &output) { |
|
|
|
int64_t output_size = 0; |
|
|
|
ge::GeShape output_shape = ge::GeShape(shape); |
|
|
|
ge::Format format = ge::FORMAT_ND; |
|
|
|
ge::DataType data_type = ge::DT_FLOAT; |
|
|
@@ -193,41 +150,6 @@ Status DataOpParser::InitOutputTensor(const std::vector<int64_t> &shape, ge::GeT |
|
|
|
ge::TensorUtils::SetRealDimCnt(output, shape.size()); |
|
|
|
output.SetShape(output_shape); |
|
|
|
|
|
|
|
ge::graphStatus graph_status = ge::TensorUtils::CalcTensorMemSize(output_shape, format, data_type, output_size); |
|
|
|
if (graph_status != ge::GRAPH_SUCCESS) { |
|
|
|
REPORT_CALL_ERROR("E19999", "CalcTensorMemSize failed, shape:%s, format:%s, datatype:%s", |
|
|
|
output_shape.ToString().c_str(), |
|
|
|
ge::TypeUtils::FormatToSerialString(format).c_str(), |
|
|
|
ge::TypeUtils::DataTypeToSerialString(data_type).c_str()); |
|
|
|
GELOGE(FAILED, "[Invoke][CalcTensorMemSize] failed, shape:%s, format:%s, datatype:%s", |
|
|
|
output_shape.ToString().c_str(), |
|
|
|
ge::TypeUtils::FormatToSerialString(format).c_str(), |
|
|
|
ge::TypeUtils::DataTypeToSerialString(data_type).c_str()); |
|
|
|
return FAILED; |
|
|
|
} |
|
|
|
|
|
|
|
if (output_size == kDynamicBatchInputSize) { |
|
|
|
GELOGI("After calc tensor memory size, output_mem_size = %ld", output_size); |
|
|
|
return SUCCESS; |
|
|
|
} |
|
|
|
|
|
|
|
int64_t size = output_size; |
|
|
|
auto valid_max_size = INT64_MAX - kTwoTimesAlign * kDataMemAlignSize; |
|
|
|
if (size > valid_max_size || size < 0) { |
|
|
|
REPORT_INNER_ERROR("E19999", "updated mem size is out of data range [0, %ld], shape:%s, format:%s, datatype:%s", |
|
|
|
valid_max_size, output_shape.ToString().c_str(), |
|
|
|
ge::TypeUtils::FormatToSerialString(format).c_str(), |
|
|
|
ge::TypeUtils::DataTypeToSerialString(data_type).c_str()); |
|
|
|
GELOGE(FAILED, "[Check][Size] updated mem size is out of data range [0, %ld], shape:%s, format:%s, datatype:%s", |
|
|
|
valid_max_size, output_shape.ToString().c_str(), |
|
|
|
ge::TypeUtils::FormatToSerialString(format).c_str(), |
|
|
|
ge::TypeUtils::DataTypeToSerialString(data_type).c_str()); |
|
|
|
return FAILED; |
|
|
|
} else { |
|
|
|
size = ((size + kTwoTimesAlign * kDataMemAlignSize - 1) / kDataMemAlignSize) * kDataMemAlignSize; |
|
|
|
} |
|
|
|
// Set the actual occupied space size |
|
|
|
ge::TensorUtils::SetSize(output, size); |
|
|
|
return SUCCESS; |
|
|
|
} |
|
|
|
} // namespace ge |