@@ -0,0 +1,5 @@ | |||||
[graphengine] | |||||
ge | |||||
inc | |||||
metadef | |||||
parser |
@@ -30,6 +30,7 @@ enum DataType | |||||
DT_RESOURCE = 23; // resource type | DT_RESOURCE = 23; // resource type | ||||
DT_STRING_REF = 24; // string_ref type | DT_STRING_REF = 24; // string_ref type | ||||
DT_DUAL = 25; /**< dual output type */ | DT_DUAL = 25; /**< dual output type */ | ||||
DT_VARIANT = 26; // variant type | |||||
} | } | ||||
message AttrDef | message AttrDef | ||||
@@ -62,6 +62,7 @@ std::map<ge::DataType, ge::proto::DataType> g_dump_data_type_map = { | |||||
{ge::DT_RESOURCE, ge::proto::DT_RESOURCE}, | {ge::DT_RESOURCE, ge::proto::DT_RESOURCE}, | ||||
{ge::DT_STRING_REF, ge::proto::DT_STRING_REF}, | {ge::DT_STRING_REF, ge::proto::DT_STRING_REF}, | ||||
{ge::DT_STRING, ge::proto::DT_STRING}, | {ge::DT_STRING, ge::proto::DT_STRING}, | ||||
{ge::DT_VARIANT, ge::proto::DT_VARIANT}, | |||||
}; | }; | ||||
} // namespace | } // namespace | ||||
@@ -122,7 +122,7 @@ Status ModelHelper::SaveModelTbeKernel(std::shared_ptr<OmFileSaveHelper> &om_fil | |||||
if (tbe_kernel_store.DataSize() > 0) { | if (tbe_kernel_store.DataSize() > 0) { | ||||
GE_CHK_STATUS_RET( | GE_CHK_STATUS_RET( | ||||
SaveModelPartition(om_file_save_helper, ModelPartitionType::TBE_KERNELS, | SaveModelPartition(om_file_save_helper, ModelPartitionType::TBE_KERNELS, | ||||
ge_model->GetTBEKernelStore().Data(), ge_model->GetTBEKernelStore().DataSize(), | |||||
ge_model->GetTBEKernelStore().Data(), ge_model->GetTBEKernelStore().DataSize(), | |||||
model_index), "Add tbe kernel partition failed"); | model_index), "Add tbe kernel partition failed"); | ||||
} | } | ||||
// no need to check value, DATA->NetOutput | // no need to check value, DATA->NetOutput | ||||
@@ -306,7 +306,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::SaveToOmRoo | |||||
model_names.emplace_back(item.first); | model_names.emplace_back(item.first); | ||||
} | } | ||||
} | } | ||||
vector<ge::Buffer> model_buffers(model_names.size()); | vector<ge::Buffer> model_buffers(model_names.size()); | ||||
vector<ge::Buffer> task_buffers(model_names.size()); | vector<ge::Buffer> task_buffers(model_names.size()); | ||||
@@ -611,17 +611,16 @@ Status ModelHelper::LoadModelData(OmFileLoadHelper &om_load_helper) { | |||||
return INTERNAL_ERROR; | return INTERNAL_ERROR; | ||||
} | } | ||||
SetModelToGeModel(model); | |||||
SetModelToGeModel(model_, model); | |||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
void ModelHelper::SetModelToGeModel(ge::Model &model) { | |||||
model_->SetGraph(model.GetGraph()); | |||||
model_->SetName(model.GetName()); | |||||
model_->SetVersion(model.GetVersion()); | |||||
model_->SetPlatformVersion(model.GetPlatformVersion()); | |||||
model_->SetAttr(model.MutableAttrMap()); | |||||
void ModelHelper::SetModelToGeModel(GeModelPtr &ge_model, Model &model) { | |||||
ge_model->SetGraph(model.GetGraph()); | |||||
ge_model->SetName(model.GetName()); | |||||
ge_model->SetVersion(model.GetVersion()); | |||||
ge_model->SetPlatformVersion(model.GetPlatformVersion()); | |||||
ge_model->SetAttr(model.MutableAttrMap()); | |||||
} | } | ||||
Status ModelHelper::LoadModelData(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, size_t mode_index) { | Status ModelHelper::LoadModelData(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, size_t mode_index) { | ||||
@@ -636,12 +635,7 @@ Status ModelHelper::LoadModelData(OmFileLoadHelper &om_load_helper, GeModelPtr & | |||||
return INTERNAL_ERROR; | return INTERNAL_ERROR; | ||||
} | } | ||||
cur_model->SetGraph(model.GetGraph()); | |||||
cur_model->SetName(model.GetName()); | |||||
cur_model->SetVersion(model.GetVersion()); | |||||
cur_model->SetPlatformVersion(model.GetPlatformVersion()); | |||||
cur_model->SetAttr(model.MutableAttrMap()); | |||||
SetModelToGeModel(cur_model, model); | |||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
@@ -30,6 +30,7 @@ enum DataType | |||||
DT_RESOURCE = 23; // resource type | DT_RESOURCE = 23; // resource type | ||||
DT_STRING_REF = 24; // string_ref type | DT_STRING_REF = 24; // string_ref type | ||||
DT_DUAL = 25; /**< dual output type */ | DT_DUAL = 25; /**< dual output type */ | ||||
DT_VARIANT = 26; // variant type | |||||
} | } | ||||
message AttrDef | message AttrDef | ||||
@@ -569,7 +569,7 @@ Status GeExecutor::UnloadModel(uint32_t model_id) { | |||||
Status ret = GraphLoader::DestroyAicpuSessionForInfer(model_id); | Status ret = GraphLoader::DestroyAicpuSessionForInfer(model_id); | ||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
GELOGE(ret, "[GraphLoader] DestroyAicpuSessionForInfer failed. model id: %u", model_id); | GELOGE(ret, "[GraphLoader] DestroyAicpuSessionForInfer failed. model id: %u", model_id); | ||||
return ACL_ERROR_GE_INTERNAL_ERROR; | |||||
return ret; | |||||
} | } | ||||
std::shared_ptr<hybrid::HybridDavinciModel> hybrid_davinci_model = | std::shared_ptr<hybrid::HybridDavinciModel> hybrid_davinci_model = | ||||
@@ -587,7 +587,7 @@ Status GeExecutor::UnloadModel(uint32_t model_id) { | |||||
ret = GraphLoader::UnloadModel(model_id); | ret = GraphLoader::UnloadModel(model_id); | ||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
GELOGE(ret, "[GraphLoader] DestroyAicpuSessionForInfer failed. model id: %u", model_id); | GELOGE(ret, "[GraphLoader] DestroyAicpuSessionForInfer failed. model id: %u", model_id); | ||||
return ACL_ERROR_GE_UNLOAD_MODEL; | |||||
return ret; | |||||
} | } | ||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
@@ -28,6 +28,7 @@ enum OutputDataType { | |||||
DT_RESOURCE = 23; | DT_RESOURCE = 23; | ||||
DT_STRING_REF = 24; | DT_STRING_REF = 24; | ||||
DT_DUAL = 25; | DT_DUAL = 25; | ||||
DT_VARIANT = 26; | |||||
} | } | ||||
enum OutputFormat { | enum OutputFormat { | ||||
@@ -30,6 +30,7 @@ enum DataType | |||||
DT_RESOURCE = 23; // resource type | DT_RESOURCE = 23; // resource type | ||||
DT_STRING_REF = 24; // string_ref type | DT_STRING_REF = 24; // string_ref type | ||||
DT_DUAL = 25; /**< dual output type */ | DT_DUAL = 25; /**< dual output type */ | ||||
DT_VARIANT = 26; // variant type | |||||
} | } | ||||
message AttrDef | message AttrDef | ||||
@@ -265,7 +265,7 @@ static Status CheckShapeReset(const OpDescPtr &op_desc, bool &change_shape_flag) | |||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
static void ResetTensorVecShape(const vector<GeTensor> &inputs, vector<GeTensor> &inputs_dynamic) { | |||||
static Status ResetTensorVecShape(const vector<GeTensor> &inputs, vector<GeTensor> &inputs_dynamic) { | |||||
for (auto input : inputs) { | for (auto input : inputs) { | ||||
auto input_desc = input.GetTensorDesc(); | auto input_desc = input.GetTensorDesc(); | ||||
GeShape shape_ori = input_desc.GetShape(); | GeShape shape_ori = input_desc.GetShape(); | ||||
@@ -280,6 +280,12 @@ static void ResetTensorVecShape(const vector<GeTensor> &inputs, vector<GeTensor> | |||||
bool is_const = false; | bool is_const = false; | ||||
(void)AttrUtils::GetBool(input_desc, CONST_ATTR_NAME_INPUT, is_const); | (void)AttrUtils::GetBool(input_desc, CONST_ATTR_NAME_INPUT, is_const); | ||||
if (!is_const && shape_ori.GetDims().size() > 0) { | if (!is_const && shape_ori.GetDims().size() > 0) { | ||||
int64_t storage_format = FORMAT_NCHW; | |||||
if (ge::AttrUtils::GetInt(desc, ge::ATTR_NAME_STORAGE_FORMAT, storage_format) && | |||||
!ge::AttrUtils::SetListInt(desc, ge::ATTR_NAME_STORAGE_SHAPE, dynamic_shape_dims)) { | |||||
GELOGE(FAILED, "Set attr ATTR_NAME_STORAGE_SHAPE fail."); | |||||
return FAILED; | |||||
} | |||||
desc.SetShape(dynamic_shape); | desc.SetShape(dynamic_shape); | ||||
desc.SetShapeRange(dynamic_shape_range); | desc.SetShapeRange(dynamic_shape_range); | ||||
} | } | ||||
@@ -287,6 +293,7 @@ static void ResetTensorVecShape(const vector<GeTensor> &inputs, vector<GeTensor> | |||||
inputTensor.SetTensorDesc(desc); | inputTensor.SetTensorDesc(desc); | ||||
inputs_dynamic.push_back(inputTensor); | inputs_dynamic.push_back(inputTensor); | ||||
} | } | ||||
return SUCCESS; | |||||
} | } | ||||
class GeGenerator::Impl { | class GeGenerator::Impl { | ||||
@@ -684,8 +691,8 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in | |||||
if (CheckShapeReset(op_desc, dynamic_flag) == SUCCESS && dynamic_flag) { | if (CheckShapeReset(op_desc, dynamic_flag) == SUCCESS && dynamic_flag) { | ||||
vector<GeTensor> inputs_dynamic; | vector<GeTensor> inputs_dynamic; | ||||
vector<GeTensor> outputs_dynamic; | vector<GeTensor> outputs_dynamic; | ||||
ResetTensorVecShape(inputs, inputs_dynamic); | |||||
ResetTensorVecShape(outputs, outputs_dynamic); | |||||
GE_CHK_STATUS_RET_NOLOG(ResetTensorVecShape(inputs, inputs_dynamic)); | |||||
GE_CHK_STATUS_RET_NOLOG(ResetTensorVecShape(outputs, outputs_dynamic)); | |||||
GE_CHK_STATUS_RET_NOLOG( | GE_CHK_STATUS_RET_NOLOG( | ||||
impl_->SaveParams(ge_model, op_desc_tmp->GetType(), op_attrs, inputs_dynamic, outputs_dynamic)); | impl_->SaveParams(ge_model, op_desc_tmp->GetType(), op_attrs, inputs_dynamic, outputs_dynamic)); | ||||
} else { | } else { | ||||
@@ -582,9 +582,13 @@ Status ModelBuilder::SaveDataToModel(ge::Model &model, ge::GeModel &ge_model) { | |||||
// Add TBE Kernels and custom aicpu op bin | // Add TBE Kernels and custom aicpu op bin | ||||
std::set<std::string> tbe_name_set; | std::set<std::string> tbe_name_set; | ||||
std::set<std::string> aicpu_name_set; | std::set<std::string> aicpu_name_set; | ||||
std::set<std::string> aicpu_op_types; | |||||
std::set<std::string> aicpu_tf_op_types; | |||||
for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) { | for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) { | ||||
auto node_op_desc = n->GetOpDesc(); | auto node_op_desc = n->GetOpDesc(); | ||||
GE_IF_BOOL_EXEC(node_op_desc == nullptr, continue); | GE_IF_BOOL_EXEC(node_op_desc == nullptr, continue); | ||||
// check aicpu op type | |||||
CollectCheckAicpuAttr(node_op_desc, aicpu_op_types, aicpu_tf_op_types); | |||||
TBEKernelPtr tbe_kernel = node_op_desc->TryGetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, TBEKernelPtr()); | TBEKernelPtr tbe_kernel = node_op_desc->TryGetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, TBEKernelPtr()); | ||||
if (tbe_kernel == nullptr) { | if (tbe_kernel == nullptr) { | ||||
std::string kernel_name; | std::string kernel_name; | ||||
@@ -606,6 +610,8 @@ Status ModelBuilder::SaveDataToModel(ge::Model &model, ge::GeModel &ge_model) { | |||||
tbe_kernel_store_.AddTBEKernel(tbe_kernel); | tbe_kernel_store_.AddTBEKernel(tbe_kernel); | ||||
} | } | ||||
SetModelCheckAicpuAttr(model, aicpu_op_types, aicpu_tf_op_types); | |||||
for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) { | for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) { | ||||
auto node_op_desc = n->GetOpDesc(); | auto node_op_desc = n->GetOpDesc(); | ||||
GE_IF_BOOL_EXEC(node_op_desc == nullptr, continue); | GE_IF_BOOL_EXEC(node_op_desc == nullptr, continue); | ||||
@@ -797,4 +803,51 @@ Status ModelBuilder::CompileSingleOp() { | |||||
GE_TIMESTAMP_CALLNUM_END(BatchCompileOp, "GraphBuild::CompileOp"); | GE_TIMESTAMP_CALLNUM_END(BatchCompileOp, "GraphBuild::CompileOp"); | ||||
return ge::SUCCESS; | return ge::SUCCESS; | ||||
} | } | ||||
void ModelBuilder::CollectCheckAicpuAttr(const OpDescPtr &op_desc, std::set<std::string> &aicpu_op_types, | |||||
std::set<std::string> &aicpu_tf_op_types) { | |||||
std::string aicpu_optype; | |||||
bool has_attr_check_cpu = ge::AttrUtils::GetStr(op_desc, "needCheckCpu", aicpu_optype); | |||||
std::vector<std::string> tf_optypes; | |||||
bool has_attr_check_tf = ge::AttrUtils::GetListStr(op_desc, "needCheckTf", tf_optypes); | |||||
if (has_attr_check_cpu && !aicpu_optype.empty()) { | |||||
aicpu_op_types.insert(aicpu_optype); | |||||
} | |||||
if (has_attr_check_tf && !tf_optypes.empty()) { | |||||
aicpu_tf_op_types.insert(tf_optypes.begin(), tf_optypes.end()); | |||||
} | |||||
return; | |||||
} | |||||
void ModelBuilder::SetModelCheckAicpuAttr(ge::Model &model, std::set<std::string> &aicpu_op_types, | |||||
std::set<std::string> &aicpu_tf_op_types) { | |||||
std::vector<std::string> aicpu_optype_list; | |||||
std::vector<std::string> aicpu_tf_optype_list; | |||||
if (ge::AttrUtils::GetListStr(&model, "needCheckCpu", aicpu_optype_list)) { | |||||
GELOGI("Already have aicpu optype size: %zu", aicpu_optype_list.size()); | |||||
aicpu_op_types.insert(aicpu_optype_list.begin(), aicpu_optype_list.end()); | |||||
} | |||||
if (ge::AttrUtils::GetListStr(&model, "needCheckTf", aicpu_tf_optype_list)) { | |||||
GELOGI("Already have aicpu tf optype size: %zu", aicpu_tf_optype_list.size()); | |||||
aicpu_tf_op_types.insert(aicpu_tf_optype_list.begin(), aicpu_tf_optype_list.end()); | |||||
} | |||||
// reset list with set | |||||
aicpu_optype_list.assign(aicpu_op_types.begin(), aicpu_op_types.end()); | |||||
aicpu_tf_optype_list.assign(aicpu_tf_op_types.begin(), aicpu_tf_op_types.end()); | |||||
GELOGI( | |||||
"Check Aicpu op types ComputeGraph: %s aicpu_op_types: %zu, aicpu_optype_list: %zu, aicpu_tf_op_types: %zu, " | |||||
"aicpu_tf_optype_list:%zu.", | |||||
compute_graph_->GetName().c_str(), aicpu_op_types.size(), aicpu_optype_list.size(), aicpu_tf_op_types.size(), | |||||
aicpu_tf_optype_list.size()); | |||||
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(&model, "needCheckCpu", aicpu_optype_list), return, | |||||
"Set attr needCheckCpu fail."); | |||||
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(&model, "needCheckTf", aicpu_tf_optype_list), return, | |||||
"Set attr needCheckTf fail."); | |||||
return; | |||||
} | |||||
} // namespace ge | } // namespace ge |
@@ -83,6 +83,12 @@ class ModelBuilder { | |||||
Status CompileSingleOp(); | Status CompileSingleOp(); | ||||
void CollectCheckAicpuAttr(const OpDescPtr &op_desc, std::set<std::string> &aicpu_op_types, | |||||
std::set<std::string> &aicpu_tf_op_types); | |||||
void SetModelCheckAicpuAttr(ge::Model &model, std::set<std::string> &aicpu_op_types, | |||||
std::set<std::string> &aicpu_tf_op_types); | |||||
uint64_t session_id_; | uint64_t session_id_; | ||||
map<int64_t, size_t> mem_type_to_mem_offset_; | map<int64_t, size_t> mem_type_to_mem_offset_; | ||||
@@ -176,8 +176,8 @@ Status GraphLoader::LoadModelFromData(uint32_t &model_id, const ModelData &model | |||||
Status ret = model_manager->LoadModelOffline( | Status ret = model_manager->LoadModelOffline( | ||||
model_id, model_data, nullptr, dev_ptr, mem_size, weight_ptr, weight_size); | model_id, model_data, nullptr, dev_ptr, mem_size, weight_ptr, weight_size); | ||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
GELOGE(ACL_ERROR_GE_LOAD_MODEL, "Load model failed, model_id:%u.", model_id); | |||||
return ACL_ERROR_GE_LOAD_MODEL; | |||||
GELOGE(ret, "Load model failed, model_id:%u.", model_id); | |||||
return ret; | |||||
} | } | ||||
GELOGI("Load model success, model_id:%u.", model_id); | GELOGI("Load model success, model_id:%u.", model_id); | ||||
return SUCCESS; | return SUCCESS; | ||||
@@ -202,8 +202,8 @@ Status GraphLoader::LoadModelWithQ(uint32_t &model_id, const ModelData &model_da | |||||
GE_CHECK_NOTNULL(model_manager); | GE_CHECK_NOTNULL(model_manager); | ||||
Status ret = model_manager->LoadModelWithQ(model_id, model_data, input_queue_ids, output_queue_ids); | Status ret = model_manager->LoadModelWithQ(model_id, model_data, input_queue_ids, output_queue_ids); | ||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
GELOGE(ACL_ERROR_GE_LOAD_MODEL, "Load model with queue failed, model_id:%u.", model_id); | |||||
return ACL_ERROR_GE_LOAD_MODEL; | |||||
GELOGE(ret, "Load model with queue failed, model_id:%u.", model_id); | |||||
return ret; | |||||
} | } | ||||
GELOGI("Load model with queue success, model_id:%u.", model_id); | GELOGI("Load model with queue success, model_id:%u.", model_id); | ||||
@@ -120,6 +120,7 @@ static int32_t GetIrDataType(ge::DataType data_type) { | |||||
{ge::DT_RESOURCE, ge::proto::DT_RESOURCE}, | {ge::DT_RESOURCE, ge::proto::DT_RESOURCE}, | ||||
{ge::DT_STRING_REF, ge::proto::DT_STRING_REF}, | {ge::DT_STRING_REF, ge::proto::DT_STRING_REF}, | ||||
{ge::DT_STRING, ge::proto::DT_STRING}, | {ge::DT_STRING, ge::proto::DT_STRING}, | ||||
{ge::DT_VARIANT, ge::proto::DT_VARIANT}, | |||||
}; | }; | ||||
auto iter = data_type_map.find(data_type); | auto iter = data_type_map.find(data_type); | ||||
@@ -485,6 +485,8 @@ Status DavinciModel::DoTaskSink() { | |||||
GE_CHK_STATUS_RET(ModelManager::GetInstance()->LaunchCustAicpuSo(), "Launch cust aicpu so failed."); | GE_CHK_STATUS_RET(ModelManager::GetInstance()->LaunchCustAicpuSo(), "Launch cust aicpu so failed."); | ||||
GE_CHK_STATUS_RET(ModelManager::GetInstance()->CheckAicpuOpList(ge_model_), "Check aicpu op type failed."); | |||||
GE_CHK_STATUS_RET(InitEntryTask(), "InitEntryTask failed."); | GE_CHK_STATUS_RET(InitEntryTask(), "InitEntryTask failed."); | ||||
GE_CHK_STATUS_RET(DistributeTask(), "Distribute failed."); | GE_CHK_STATUS_RET(DistributeTask(), "Distribute failed."); | ||||
@@ -680,7 +682,7 @@ Status DavinciModel::Init(void *dev_ptr, size_t mem_size, void *weight_ptr, size | |||||
SetDataDumperArgs(compute_graph); | SetDataDumperArgs(compute_graph); | ||||
GE_TIMESTAMP_START(DoTaskSink); | GE_TIMESTAMP_START(DoTaskSink); | ||||
auto ret = DoTaskSink(); | |||||
GE_CHK_STATUS_RET(DoTaskSink(), "Task sink failed"); | |||||
GE_TIMESTAMP_END(DoTaskSink, "GraphLoader::DoTaskSink"); | GE_TIMESTAMP_END(DoTaskSink, "GraphLoader::DoTaskSink"); | ||||
auto all_dump_model = GetDumpProperties().GetAllDumpModel(); | auto all_dump_model = GetDumpProperties().GetAllDumpModel(); | ||||
@@ -721,7 +723,7 @@ Status DavinciModel::Init(void *dev_ptr, size_t mem_size, void *weight_ptr, size | |||||
} | } | ||||
Shrink(); | Shrink(); | ||||
return ret; | |||||
return SUCCESS; | |||||
} | } | ||||
Status DavinciModel::ReportProfilingData() { | Status DavinciModel::ReportProfilingData() { | ||||
@@ -18,6 +18,7 @@ | |||||
#include <string> | #include <string> | ||||
#include "aicpu/aicpu_schedule/aicpu_op_type_list.h" | |||||
#include "common/dump/dump_manager.h" | #include "common/dump/dump_manager.h" | ||||
#include "common/l2_cache_optimize.h" | #include "common/l2_cache_optimize.h" | ||||
#include "common/profiling/profiling_manager.h" | #include "common/profiling/profiling_manager.h" | ||||
@@ -30,6 +31,7 @@ | |||||
#include "graph/load/new_model_manager/davinci_model_parser.h" | #include "graph/load/new_model_manager/davinci_model_parser.h" | ||||
#include "model/ge_root_model.h" | #include "model/ge_root_model.h" | ||||
#include "graph/common/local_context.h" | #include "graph/common/local_context.h" | ||||
#include "graph/utils/attr_utils.h" | |||||
#include "common/formats/utils/formats_trans_utils.h" | #include "common/formats/utils/formats_trans_utils.h" | ||||
#include "hybrid/hybrid_davinci_model.h" | #include "hybrid/hybrid_davinci_model.h" | ||||
@@ -52,6 +54,7 @@ const char *const kDeleteCustOp = "deleteCustOp"; | |||||
const int kTimeSpecNano = 1000000000; | const int kTimeSpecNano = 1000000000; | ||||
const int kTimeSpecMiro = 1000000; | const int kTimeSpecMiro = 1000000; | ||||
const int kSessionMaxBias = 100; | const int kSessionMaxBias = 100; | ||||
const int kOpNameMaxSize = 100; | |||||
struct CustAicpuSoBuf { | struct CustAicpuSoBuf { | ||||
uint64_t kernelSoBuf; | uint64_t kernelSoBuf; | ||||
uint32_t kernelSoBufLen; | uint32_t kernelSoBufLen; | ||||
@@ -1534,4 +1537,200 @@ Status ModelManager::EnableExceptionDump(const std::map<string, string> &options | |||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
Status ModelManager::LaunchKernelCheckAicpuOp(std::vector<std::string> &aicpu_optype_list, | |||||
std::vector<std::string> &aicpu_tf_optype_list) { | |||||
std::string kernel_name = "checkOpType"; | |||||
GELOGI("LaunchKernelCheckAicpuOpType in, kernel name %s", kernel_name.c_str()); | |||||
std::lock_guard<std::mutex> lock(cust_aicpu_mutex_); | |||||
std::vector<SysOpInfo> req_aicpu_op_info_list; | |||||
std::vector<SysOpInfo> res_aicpu_op_info_list; | |||||
std::vector<ReturnCode> res_ret_code_list; | |||||
if (aicpu_optype_list.empty() && aicpu_tf_optype_list.empty()) { | |||||
GELOGI("No need to check aicpu op type."); | |||||
return SUCCESS; | |||||
} | |||||
vector<void *> allocated_mem; | |||||
rtError_t status; | |||||
rtStream_t stream = nullptr; | |||||
void *args = nullptr; | |||||
void *d_req_op_list = nullptr; | |||||
void *d_res_op_list = nullptr; | |||||
void *d_ret_code_list = nullptr; | |||||
size_t aicpu_op_nums = aicpu_optype_list.size(); | |||||
size_t tf_op_nums = aicpu_tf_optype_list.size(); | |||||
size_t op_nums = aicpu_op_nums + tf_op_nums; | |||||
// malloc sysOpInfoList in SysOpCheckInfo | |||||
status = rtMalloc(&d_req_op_list, op_nums * sizeof(SysOpInfo), RT_MEMORY_HBM); | |||||
if (status != RT_ERROR_NONE) { | |||||
GELOGE(RT_FAILED, "Call rt failed, status: 0x%x", status); | |||||
return RT_ERROR_TO_GE_STATUS(status); | |||||
} | |||||
allocated_mem.push_back(d_req_op_list); | |||||
// malloc sysOpInfoList in SysOpCheckResp | |||||
status = rtMalloc(&d_res_op_list, op_nums * sizeof(SysOpInfo), RT_MEMORY_HBM); | |||||
if (status != RT_ERROR_NONE) { | |||||
GELOGE(RT_FAILED, "Call rt failed, status: 0x%x", status); | |||||
return RT_ERROR_TO_GE_STATUS(status); | |||||
} | |||||
allocated_mem.push_back(d_res_op_list); | |||||
// malloc returnCodeList in SysOpCheckResp | |||||
status = rtMalloc(&d_ret_code_list, op_nums * sizeof(ReturnCode), RT_MEMORY_HBM); | |||||
if (status != RT_ERROR_NONE) { | |||||
GELOGE(RT_FAILED, "Call rt failed, status: 0x%x", status); | |||||
return RT_ERROR_TO_GE_STATUS(status); | |||||
} | |||||
allocated_mem.push_back(d_ret_code_list); | |||||
for (const auto &op_type : aicpu_optype_list) { | |||||
SysOpInfo op_info; | |||||
// malloc op_type name in SysOpInfo | |||||
void *d_op_type_name = nullptr; | |||||
status = rtMalloc(&d_op_type_name, op_type.length(), RT_MEMORY_HBM); | |||||
if (status != RT_ERROR_NONE) { | |||||
GELOGE(RT_FAILED, "Call rt failed, status: 0x%x", status); | |||||
return RT_ERROR_TO_GE_STATUS(status); | |||||
} | |||||
allocated_mem.push_back(d_op_type_name); | |||||
GE_CHK_RT(rtMemcpy(d_op_type_name, op_type.length(), op_type.c_str(), op_type.length(), RT_MEMCPY_HOST_TO_DEVICE)); | |||||
op_info.opType = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(d_op_type_name)); | |||||
op_info.opLen = op_type.length(); | |||||
op_info.kernelsType = CPU_KERNEL; | |||||
req_aicpu_op_info_list.emplace_back(op_info); | |||||
} | |||||
for (const auto &op_type : aicpu_tf_optype_list) { | |||||
SysOpInfo op_info; | |||||
// malloc op_type name in SysOpInfo | |||||
void *d_op_type_name = nullptr; | |||||
status = rtMalloc(&d_op_type_name, op_type.size(), RT_MEMORY_HBM); | |||||
if (status != RT_ERROR_NONE) { | |||||
GELOGE(RT_FAILED, "Call rt failed, status: 0x%x", status); | |||||
return RT_ERROR_TO_GE_STATUS(status); | |||||
} | |||||
allocated_mem.push_back(d_op_type_name); | |||||
GE_CHK_RT(rtMemcpy(d_op_type_name, op_type.size(), op_type.c_str(), op_type.size(), RT_MEMCPY_HOST_TO_DEVICE)); | |||||
op_info.opType = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(d_op_type_name)); | |||||
op_info.opLen = op_type.size(); | |||||
op_info.kernelsType = TF_KERNEL; | |||||
req_aicpu_op_info_list.emplace_back(op_info); | |||||
} | |||||
GELOGI("Check aicpu op all attr size: %zu, real attr size: %zu.", op_nums, req_aicpu_op_info_list.size()); | |||||
GE_CHK_RT(rtMemcpy(d_req_op_list, sizeof(SysOpInfo) * req_aicpu_op_info_list.size(), req_aicpu_op_info_list.data(), | |||||
sizeof(SysOpInfo) * req_aicpu_op_info_list.size(), RT_MEMCPY_HOST_TO_DEVICE)); | |||||
SysOpCheckInfo op_check_info_req = { 0 }; | |||||
SysOpCheckResp op_check_info_res = { 0 }; | |||||
op_check_info_req.opListNum = op_nums; | |||||
op_check_info_req.offSetLen = sizeof(SysOpCheckInfo); | |||||
op_check_info_req.sysOpInfoList = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(d_req_op_list)); | |||||
op_check_info_res.opListNum = 0; | |||||
op_check_info_res.isWithoutJson = 0; | |||||
op_check_info_res.returnCodeList = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(d_ret_code_list)); | |||||
op_check_info_res.sysOpInfoList = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(d_res_op_list)); | |||||
uint32_t args_size = sizeof(SysOpCheckInfo) + sizeof(SysOpCheckResp); | |||||
status = rtMalloc(&args, args_size, RT_MEMORY_HBM); | |||||
if (status != RT_ERROR_NONE) { | |||||
GELOGE(RT_FAILED, "Call rt failed, status: 0x%x", status); | |||||
return RT_ERROR_TO_GE_STATUS(status); | |||||
} | |||||
allocated_mem.push_back(args); | |||||
GE_CHK_RT( | |||||
rtMemcpy(args, sizeof(SysOpCheckInfo), reinterpret_cast<void *>(&op_check_info_req), sizeof(SysOpCheckInfo), RT_MEMCPY_HOST_TO_DEVICE)); | |||||
GE_CHK_RT(rtMemcpy(reinterpret_cast<void *>(static_cast<uintptr_t>(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(args)) + op_check_info_req.offSetLen)), | |||||
sizeof(SysOpCheckResp), reinterpret_cast<void *>(&op_check_info_res), sizeof(SysOpCheckResp), RT_MEMCPY_HOST_TO_DEVICE)); | |||||
GE_CHK_RT(rtStreamCreate(&stream, 0)); | |||||
GE_CHK_RT(rtCpuKernelLaunch(nullptr, kernel_name.c_str(), 1, args, args_size, nullptr, stream)); | |||||
status = rtStreamSynchronize(stream); | |||||
if (status != RT_ERROR_NONE) { | |||||
GELOGE(RT_FAILED, "Call rt stream sync failed, status: 0x%x", status); | |||||
return RT_ERROR_TO_GE_STATUS(status); | |||||
} | |||||
// Check the response | |||||
SysOpCheckResp *d_op_check_info_res = reinterpret_cast<SysOpCheckResp *>(reinterpret_cast<void *>(static_cast<uintptr_t>(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(args)) + op_check_info_req.offSetLen))); | |||||
(void)memset_s(&op_check_info_res, sizeof(SysOpCheckResp), 0, sizeof(SysOpCheckResp)); | |||||
GE_CHK_RT(rtMemcpy(&op_check_info_res, sizeof(SysOpCheckResp), d_op_check_info_res, sizeof(SysOpCheckResp), | |||||
RT_MEMCPY_DEVICE_TO_HOST)); | |||||
std::function<void()> callback = [&]() { | |||||
for (auto mem : allocated_mem) { | |||||
GE_CHK_RT(rtFree(mem)); | |||||
} | |||||
GE_CHK_RT(rtStreamDestroy(stream)); | |||||
}; | |||||
if (op_check_info_res.isWithoutJson) { | |||||
GELOGI("No need to check aicpu in this scenoria."); | |||||
GE_MAKE_GUARD(release, callback); | |||||
return SUCCESS; | |||||
} | |||||
uint64_t res_op_nums = op_check_info_res.opListNum; | |||||
GELOGI("Check aicpu type, is without json: %d, res op num: %lu.", op_check_info_res.isWithoutJson, res_op_nums); | |||||
if (res_op_nums != 0) { | |||||
res_ret_code_list.clear(); | |||||
res_ret_code_list.resize(res_op_nums); | |||||
res_aicpu_op_info_list.clear(); | |||||
res_aicpu_op_info_list.resize(res_op_nums); | |||||
GE_CHK_RT(rtMemcpy(res_ret_code_list.data(), sizeof(ReturnCode) * res_op_nums, | |||||
reinterpret_cast<void *>(static_cast<uintptr_t>(op_check_info_res.returnCodeList)), | |||||
sizeof(ReturnCode) * res_op_nums, RT_MEMCPY_DEVICE_TO_HOST)); | |||||
GE_CHK_RT(rtMemcpy(res_aicpu_op_info_list.data(), sizeof(SysOpInfo) * res_op_nums, | |||||
reinterpret_cast<void *>(static_cast<uintptr_t>(op_check_info_res.sysOpInfoList)), | |||||
sizeof(SysOpInfo) * res_op_nums, RT_MEMCPY_DEVICE_TO_HOST)); | |||||
if (res_ret_code_list.size() != res_aicpu_op_info_list.size() || res_ret_code_list.size() != res_op_nums) { | |||||
GELOGE(FAILED, "Number of retcode is not equal to number of op type."); | |||||
GE_MAKE_GUARD(release, callback); | |||||
return FAILED; | |||||
} | |||||
std::string fail_reason; | |||||
for (uint32_t i = 0; i < res_op_nums; i++) { | |||||
ReturnCode ret_code = res_ret_code_list.at(i); | |||||
SysOpInfo aicpu_info = res_aicpu_op_info_list.at(i); | |||||
GELOGI("Not support aicpu op type: %lu, kernel_type:%d, opLen:%d, ret_code:%d", aicpu_info.opType, | |||||
aicpu_info.kernelsType, aicpu_info.opLen, ret_code); | |||||
std::vector<char> op_name; | |||||
op_name.clear(); | |||||
op_name.resize(kOpNameMaxSize); | |||||
GE_CHK_RT(rtMemcpy(op_name.data(), aicpu_info.opLen, reinterpret_cast<void *>(aicpu_info.opType), | |||||
aicpu_info.opLen, RT_MEMCPY_DEVICE_TO_HOST)); | |||||
std::string kernel_type = | |||||
(static_cast<OpKernelType>(aicpu_info.kernelsType) == TF_KERNEL) ? "TF_KERNEL" : "CPU_KERNEL"; | |||||
string op_name_str(op_name.data()); | |||||
fail_reason += "op_type: " + op_name_str + " kernel_type: " + kernel_type + | |||||
" ret code:" + std::to_string(static_cast<int>(ret_code)) + | |||||
"<0: op_type, 1: format, 2: datatype> \n"; | |||||
} | |||||
fail_reason += "not support."; | |||||
GELOGE(FAILED, "Check aicpu op_type failed. details: %s", fail_reason.c_str()); | |||||
GE_MAKE_GUARD(release, callback); | |||||
return FAILED; | |||||
} | |||||
GE_MAKE_GUARD(release, callback); | |||||
GELOGI("Cpu kernel launch check optype task success."); | |||||
return SUCCESS; | |||||
} | |||||
Status ModelManager::CheckAicpuOpList(GeModelPtr ge_model) { | |||||
std::vector<std::string> aicpu_optype_list; | |||||
std::vector<std::string> aicpu_tf_optype_list; | |||||
bool aicpu_need_check = ge::AttrUtils::GetListStr(ge_model, "needCheckCpu", aicpu_optype_list); | |||||
bool tf_need_check = ge::AttrUtils::GetListStr(ge_model, "needCheckTf", aicpu_tf_optype_list); | |||||
if (!aicpu_need_check && !tf_need_check) { | |||||
GELOGI("Graph:%s No need to check aicpu optype.", ge_model->GetGraph().GetName().c_str()); | |||||
return SUCCESS; | |||||
} | |||||
GE_CHK_STATUS_RET(LaunchKernelCheckAicpuOp(aicpu_optype_list, aicpu_tf_optype_list), | |||||
"Launch check aicpu op type failed."); | |||||
return SUCCESS; | |||||
} | |||||
} // namespace ge | } // namespace ge |
@@ -295,6 +295,11 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelManager { | |||||
ge::Status LaunchKernelCustAicpuSo(const string &kernel_name); | ge::Status LaunchKernelCustAicpuSo(const string &kernel_name); | ||||
ge::Status LaunchKernelCheckAicpuOp(std::vector<std::string> &aicpu_optype_list, | |||||
std::vector<std::string> &aicpu_tf_optype_list); | |||||
ge::Status CheckAicpuOpList(GeModelPtr ge_model); | |||||
ge::Status GetOrigInputInfo(uint32_t model_id, uint32_t index, OriginInputInfo &orig_input_info); | ge::Status GetOrigInputInfo(uint32_t model_id, uint32_t index, OriginInputInfo &orig_input_info); | ||||
ge::Status GenSessionId(uint64_t &session_id); | ge::Status GenSessionId(uint64_t &session_id); | ||||
@@ -538,7 +538,8 @@ Status GraphManager::OptimizeSubGraphWithMultiThreads(ComputeGraphPtr compute_gr | |||||
(void) AttrUtils::SetStr(subgraph->GetSubGraph(), ATTR_NAME_OP_COMPILE_STRATEGY, op_compile_strategy); | (void) AttrUtils::SetStr(subgraph->GetSubGraph(), ATTR_NAME_OP_COMPILE_STRATEGY, op_compile_strategy); | ||||
} | } | ||||
std::future<Status> f = executor.commit(GraphManager::ProcessSubGraphWithMultiThreads, this, | std::future<Status> f = executor.commit(GraphManager::ProcessSubGraphWithMultiThreads, this, | ||||
compute_graph->GetGraphID(), subgraph, compute_graph, session_id, | |||||
compute_graph->GetGraphID(), subgraph, | |||||
compute_graph->GetName(), session_id, | |||||
GetThreadLocalContext()); | GetThreadLocalContext()); | ||||
if (!f.valid()) { | if (!f.valid()) { | ||||
GELOGE(FAILED, "Future is invalid"); | GELOGE(FAILED, "Future is invalid"); | ||||
@@ -553,7 +554,8 @@ Status GraphManager::OptimizeSubGraphWithMultiThreads(ComputeGraphPtr compute_gr | |||||
(void) AttrUtils::SetStr(subgraph->GetSubGraph(), ATTR_NAME_OP_COMPILE_STRATEGY, op_compile_strategy); | (void) AttrUtils::SetStr(subgraph->GetSubGraph(), ATTR_NAME_OP_COMPILE_STRATEGY, op_compile_strategy); | ||||
} | } | ||||
std::future<Status> f = executor.commit(GraphManager::ProcessSubGraphWithMultiThreads, this, | std::future<Status> f = executor.commit(GraphManager::ProcessSubGraphWithMultiThreads, this, | ||||
compute_graph->GetGraphID(), subgraph, compute_graph, session_id, | |||||
compute_graph->GetGraphID(), subgraph, | |||||
compute_graph->GetName(), session_id, | |||||
GetThreadLocalContext()); | GetThreadLocalContext()); | ||||
if (!f.valid()) { | if (!f.valid()) { | ||||
GELOGE(FAILED, "Future is invalid"); | GELOGE(FAILED, "Future is invalid"); | ||||
@@ -2473,7 +2475,8 @@ Status GraphManager::CheckAndReleaseMemory(const GeModelPtr &ge_model, const Gra | |||||
Status GraphManager::ProcessSubGraphWithMultiThreads(GraphManager *graph_manager, GraphId root_graph_id, | Status GraphManager::ProcessSubGraphWithMultiThreads(GraphManager *graph_manager, GraphId root_graph_id, | ||||
const SubGraphInfoPtr &sub_graph_info_ptr, | const SubGraphInfoPtr &sub_graph_info_ptr, | ||||
const ComputeGraphPtr &compute_graph, uint64_t session_id, | |||||
const std::string &root_graph_name, | |||||
uint64_t session_id, | |||||
const GEThreadLocalContext &ge_context) { | const GEThreadLocalContext &ge_context) { | ||||
if (sub_graph_info_ptr != nullptr && graph_manager != nullptr) { | if (sub_graph_info_ptr != nullptr && graph_manager != nullptr) { | ||||
GetContext().SetSessionId(session_id); | GetContext().SetSessionId(session_id); | ||||
@@ -2490,9 +2493,13 @@ Status GraphManager::ProcessSubGraphWithMultiThreads(GraphManager *graph_manager | |||||
GELOGE(FAILED, "Failed to set attr ATTR_NAME_ROOT_GRAPH_ID for subgraph, graph_id: %u.", root_graph_id); | GELOGE(FAILED, "Failed to set attr ATTR_NAME_ROOT_GRAPH_ID for subgraph, graph_id: %u.", root_graph_id); | ||||
return FAILED; | return FAILED; | ||||
} | } | ||||
if (!AttrUtils::SetStr(*compute_graph_tmp, ATTR_NAME_ROOT_GRAPH_NAME, root_graph_name)) { | |||||
GELOGE(FAILED, "Failed to set attr ATTR_NAME_ROOT_GRAPH_NAME for subgraph, \ | |||||
root_graph_name: %s.", root_graph_name.c_str()); | |||||
return FAILED; | |||||
} | |||||
compute_graph_tmp->SetSessionID(session_id); | compute_graph_tmp->SetSessionID(session_id); | ||||
Status ret = graph_manager->GetCompilerStages(root_graph_id).optimizer.OptimizeSubGraph(compute_graph_tmp, | Status ret = graph_manager->GetCompilerStages(root_graph_id).optimizer.OptimizeSubGraph(compute_graph_tmp, | ||||
compute_graph, | |||||
engine_name); | engine_name); | ||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
GELOGE(ret, "SubGraph optimize Failed %s", engine_name.c_str()); | GELOGE(ret, "SubGraph optimize Failed %s", engine_name.c_str()); | ||||
@@ -219,7 +219,8 @@ class GraphManager { | |||||
static Status ProcessSubGraphWithMultiThreads(GraphManager *graph_manager, GraphId root_graph_id, | static Status ProcessSubGraphWithMultiThreads(GraphManager *graph_manager, GraphId root_graph_id, | ||||
const SubGraphInfoPtr &sub_graph_info_ptr, | const SubGraphInfoPtr &sub_graph_info_ptr, | ||||
const ComputeGraphPtr &compute_graph, uint64_t session_id, | |||||
const std::string &root_graph_name, | |||||
uint64_t session_id, | |||||
const GEThreadLocalContext &ge_context); | const GEThreadLocalContext &ge_context); | ||||
Status ParseInputsDims(const std::vector<InputTensorInfo> &input_tensor); | Status ParseInputsDims(const std::vector<InputTensorInfo> &input_tensor); | ||||
void ParseInputsDimsForData(const std::vector<InputTensorInfo> &input_tensor); | void ParseInputsDimsForData(const std::vector<InputTensorInfo> &input_tensor); | ||||
@@ -76,8 +76,7 @@ void AddNodeInputProperty(ComputeGraphPtr &compute_graph) { | |||||
} | } | ||||
} | } | ||||
Status GraphOptimize::OptimizeSubGraph(ComputeGraphPtr &compute_graph, const ComputeGraphPtr &parent_graph, | |||||
const std::string &engine_name) { | |||||
Status GraphOptimize::OptimizeSubGraph(ComputeGraphPtr &compute_graph, const std::string &engine_name) { | |||||
if (compute_graph == nullptr) { | if (compute_graph == nullptr) { | ||||
GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[OptimizeSubGraph]: compute_graph is nullptr."); | GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[OptimizeSubGraph]: compute_graph is nullptr."); | ||||
return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL; | return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL; | ||||
@@ -106,10 +105,6 @@ Status GraphOptimize::OptimizeSubGraph(ComputeGraphPtr &compute_graph, const Com | |||||
for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) { | for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) { | ||||
Status ret = (*iter)->OptimizeFusedGraphAfterGraphSlice(*(compute_graph)); | Status ret = (*iter)->OptimizeFusedGraphAfterGraphSlice(*(compute_graph)); | ||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
auto root_graph = ge::GraphUtils::FindRootGraph(parent_graph); | |||||
if (root_graph != nullptr) { | |||||
ErrorManager::GetInstance().SaveMstuneCompileFailedMsg(root_graph->GetName()); | |||||
} | |||||
GELOGE(ret, "[OptimizeSubGraph][OptimizeFusedGraphAfterGraphSlice]: graph optimize failed, ret:%d", ret); | GELOGE(ret, "[OptimizeSubGraph][OptimizeFusedGraphAfterGraphSlice]: graph optimize failed, ret:%d", ret); | ||||
return ret; | return ret; | ||||
} | } | ||||
@@ -42,8 +42,7 @@ class GraphOptimize { | |||||
~GraphOptimize() = default; | ~GraphOptimize() = default; | ||||
// subgraph optimize | // subgraph optimize | ||||
Status OptimizeSubGraph(ComputeGraphPtr &compute_graph, const ComputeGraphPtr &parent_graph, | |||||
const std::string &engine_name); | |||||
Status OptimizeSubGraph(ComputeGraphPtr &compute_graph, const std::string &engine_name); | |||||
// original graph optimize | // original graph optimize | ||||
Status OptimizeOriginalGraph(ComputeGraphPtr &compute_graph); | Status OptimizeOriginalGraph(ComputeGraphPtr &compute_graph); | ||||
@@ -503,12 +503,24 @@ Status MultiBatchClonePass::SetMaxShapeToData(const NodePtr &data) { | |||||
/// | /// | ||||
/// @ingroup ge | /// @ingroup ge | ||||
/// @brief Set shape to Data node in branch. | |||||
/// @param [in] const NodePtr &data: data in branch. | |||||
/// @brief Update Data node in Subgraph. | |||||
/// @param [in] const NodePtr &data: data in Subgraph. | |||||
/// @param [in] size_t index: The batch index. | /// @param [in] size_t index: The batch index. | ||||
/// @return 0: SUCCESS / others: FAILED | /// @return 0: SUCCESS / others: FAILED | ||||
/// | /// | ||||
Status MultiBatchClonePass::UpdateShapeToData(const NodePtr &data, size_t index) { | |||||
Status MultiBatchClonePass::UpdateSubgraphData(const NodePtr &data, size_t index) { | |||||
int node_index = -1; | |||||
if (!AttrUtils::GetInt(data->GetOpDesc(), ATTR_NAME_INDEX, node_index)) { | |||||
GELOGE(FAILED, "Failed to get index from data[%s]", data->GetName().c_str()); | |||||
return FAILED; | |||||
} | |||||
int parent_index = node_index + 1; | |||||
if (!AttrUtils::SetInt(data->GetOpDesc(), ATTR_NAME_PARENT_NODE_INDEX, parent_index)) { | |||||
GELOGE(FAILED, "Failed to set parent index for node %s", data->GetName().c_str()); | |||||
return FAILED; | |||||
} | |||||
auto data_shape = NodeUtils::GetOutputDesc(*data, kDataOutIndex).GetShape(); | auto data_shape = NodeUtils::GetOutputDesc(*data, kDataOutIndex).GetShape(); | ||||
const auto &dims = data_shape.GetDims(); | const auto &dims = data_shape.GetDims(); | ||||
if (std::all_of(dims.begin(), dims.end(), [](int64_t val) { return val >= 0; })) { | if (std::all_of(dims.begin(), dims.end(), [](int64_t val) { return val >= 0; })) { | ||||
@@ -580,13 +592,15 @@ Status MultiBatchClonePass::CreateSubgraphs(const ComputeGraphPtr &graph, const | |||||
subgraph->SetParentGraph(graph); | subgraph->SetParentGraph(graph); | ||||
graph->AddSubgraph(subgraph->GetName(), subgraph); | graph->AddSubgraph(subgraph->GetName(), subgraph); | ||||
all_branch_output_[subgraph] = subgraph->FindFirstNodeMatchType(NETOUTPUT); | all_branch_output_[subgraph] = subgraph->FindFirstNodeMatchType(NETOUTPUT); | ||||
GE_CHK_STATUS_RET(UpdateSubgraphOutput(all_branch_output_[subgraph]), | |||||
"Update %s failed", all_branch_output_[subgraph]->GetName().c_str()); | |||||
const string key_name = "branches" + std::to_string(i); | const string key_name = "branches" + std::to_string(i); | ||||
op_desc->AddSubgraphName(key_name); | op_desc->AddSubgraphName(key_name); | ||||
op_desc->SetSubgraphInstanceName(i, subgraph->GetName()); | op_desc->SetSubgraphInstanceName(i, subgraph->GetName()); | ||||
for (const auto &data : input_nodes) { | for (const auto &data : input_nodes) { | ||||
GE_CHK_STATUS_RET(UpdateShapeToData(data, i), "Update %s failed", subgraph->GetName().c_str()); | |||||
GE_CHK_STATUS_RET(UpdateSubgraphData(data, i), "Update %s failed", subgraph->GetName().c_str()); | |||||
} | } | ||||
} | } | ||||
@@ -595,55 +609,27 @@ Status MultiBatchClonePass::CreateSubgraphs(const ComputeGraphPtr &graph, const | |||||
const auto &op_desc = n->GetOpDesc(); | const auto &op_desc = n->GetOpDesc(); | ||||
op_desc->SetName(n->GetName() + kMultiBatchNodePostfix + "0"); | op_desc->SetName(n->GetName() + kMultiBatchNodePostfix + "0"); | ||||
if (n->GetType() == DATA) { | if (n->GetType() == DATA) { | ||||
GE_CHK_STATUS_RET(UpdateShapeToData(n, 0), "Update %s failed", branch->GetName().c_str()); | |||||
GE_CHK_STATUS_RET(UpdateSubgraphData(n, 0), "Update %s failed", branch->GetName().c_str()); | |||||
} | } | ||||
} | } | ||||
return PostProcSubgraph(graph); | |||||
return SUCCESS; | |||||
} | } | ||||
/// | /// | ||||
/// @ingroup ge | /// @ingroup ge | ||||
/// @brief Assign parent index for branches. | |||||
/// @param [in] const ComputeGraphPtr &graph: Root/Case graph. | |||||
/// @brief Update output_node in Subgraph. | |||||
/// @param [in] const NodePtr &output_node: output_node in Subgraph. | |||||
/// @return 0: SUCCESS / others: FAILED | /// @return 0: SUCCESS / others: FAILED | ||||
/// | /// | ||||
Status MultiBatchClonePass::PostProcSubgraph(const ComputeGraphPtr &graph) { | |||||
auto func_desc = case_node_->GetOpDesc(); | |||||
domi::ParseSubgraphFuncV2 parse_func_v2 = nullptr; | |||||
auto post_func = domi::OpRegistry::Instance()->GetParseSubgraphPostFunc(func_desc->GetType()); | |||||
if (post_func == nullptr) { | |||||
GELOGW("The subgraph post func for node %s type %s is null.", case_node_->GetName().c_str(), | |||||
case_node_->GetType().c_str()); | |||||
if (domi::OpRegistry::Instance()->GetParseSubgraphPostFunc(func_desc->GetType(), parse_func_v2) != SUCCESS || | |||||
parse_func_v2 == nullptr) { | |||||
GELOGW("The subgraph new post func v2 for node %s type %s is null", case_node_->GetName().c_str(), | |||||
case_node_->GetType().c_str()); | |||||
return FAILED; | |||||
} | |||||
} | |||||
for (const auto &name : func_desc->GetSubgraphInstanceNames()) { | |||||
const auto &subgraph = graph->GetSubgraph(name); | |||||
if (subgraph == nullptr) { | |||||
GELOGE(FAILED, "Subgraph not found, name: %s", name.c_str()); | |||||
return FAILED; | |||||
} | |||||
std::string subgraph_name; | |||||
GE_CHK_STATUS_RET(func_desc->GetSubgraphNameByInstanceName(subgraph->GetName(), subgraph_name), | |||||
"Subgraph: %s get subgraph name failed.", subgraph->GetName().c_str()); | |||||
auto graph = GraphUtils::CreateGraphFromComputeGraph(subgraph); | |||||
Status ret = FAILED; | |||||
if (post_func != nullptr) { | |||||
ret = post_func(subgraph_name, graph); | |||||
} else if (parse_func_v2 != nullptr) { | |||||
ret = parse_func_v2(subgraph_name.c_str(), graph); | |||||
} | |||||
if (ret != SUCCESS) { | |||||
GELOGE(FAILED, "Failed to post-process subgraph %s on node %s type %s", graph.GetName().c_str(), | |||||
case_node_->GetName().c_str(), case_node_->GetType().c_str()); | |||||
Status MultiBatchClonePass::UpdateSubgraphOutput(const NodePtr &output_node) { | |||||
const auto &op_desc = output_node->GetOpDesc(); | |||||
GE_CHECK_NOTNULL(op_desc); | |||||
for (size_t index = 0; index < op_desc->GetInputsSize(); ++index) { | |||||
GeTensorDescPtr tensor = op_desc->MutableInputDesc(index); | |||||
GE_CHECK_NOTNULL(tensor); | |||||
if (!AttrUtils::SetInt(tensor, ATTR_NAME_PARENT_NODE_INDEX, index)) { | |||||
GELOGE(FAILED, "Failed to set parent index for node %s", output_node->GetName().c_str()); | |||||
return FAILED; | return FAILED; | ||||
} | } | ||||
} | } | ||||
@@ -105,12 +105,20 @@ class MultiBatchClonePass : public GraphPass { | |||||
/// | /// | ||||
/// @ingroup ge | /// @ingroup ge | ||||
/// @brief Set shape to Data node in branch. | |||||
/// @param [in] const NodePtr &data: data in branch. | |||||
/// @brief Update Data node in Subgraph. | |||||
/// @param [in] const NodePtr &data: data in Subgraph. | |||||
/// @param [in] size_t index: The batch index. | /// @param [in] size_t index: The batch index. | ||||
/// @return 0: SUCCESS / others: FAILED | /// @return 0: SUCCESS / others: FAILED | ||||
/// | /// | ||||
Status UpdateShapeToData(const NodePtr &data, size_t index); | |||||
Status UpdateSubgraphData(const NodePtr &data, size_t index); | |||||
/// | |||||
/// @ingroup ge | |||||
/// @brief Update output_node in Subgraph. | |||||
/// @param [in] const NodePtr &output_node: output_node in Subgraph. | |||||
/// @return 0: SUCCESS / others: FAILED | |||||
/// | |||||
Status UpdateSubgraphOutput(const NodePtr &output_node); | |||||
/// | /// | ||||
/// @ingroup ge | /// @ingroup ge | ||||
@@ -133,14 +141,6 @@ class MultiBatchClonePass : public GraphPass { | |||||
/// | /// | ||||
/// @ingroup ge | /// @ingroup ge | ||||
/// @brief Assign parent index for branches. | |||||
/// @param [in] const ComputeGraphPtr &graph: Root/Case graph. | |||||
/// @return 0: SUCCESS / others: FAILED | |||||
/// | |||||
Status PostProcSubgraph(const ComputeGraphPtr &graph); | |||||
/// | |||||
/// @ingroup ge | |||||
/// @brief Remove subgraph supend output anchor. | /// @brief Remove subgraph supend output anchor. | ||||
/// @param [in] ComputeGraphPtr &graph: Parent compute graph. | /// @param [in] ComputeGraphPtr &graph: Parent compute graph. | ||||
/// @return 0: SUCCESS / others: FAILED | /// @return 0: SUCCESS / others: FAILED | ||||
@@ -33,6 +33,8 @@ namespace { | |||||
const int kDoubleAttrN = 2; | const int kDoubleAttrN = 2; | ||||
const int kFirstOutputDescIdx = 0; | const int kFirstOutputDescIdx = 0; | ||||
const int kMergedShapeSecondDim = 1; | const int kMergedShapeSecondDim = 1; | ||||
const size_t kNullTensorDimNum = 1; | |||||
const int64_t kNullTensorDimValue = 0; | |||||
const std::set<DataType> kSupportedTypeSet = {DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, | const std::set<DataType> kSupportedTypeSet = {DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, | ||||
DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}; | DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}; | ||||
} // namespace | } // namespace | ||||
@@ -177,7 +179,14 @@ Status DynamicStitchKernel::StitchDataFollowIndices(int64_t data_unit, const vec | |||||
int64_t src_offset = 0; | int64_t src_offset = 0; | ||||
std::set<int32_t> indices_set; | std::set<int32_t> indices_set; | ||||
for (int i = 0; i < n_; i++) { | for (int i = 0; i < n_; i++) { | ||||
auto indices_shape_size = input[i]->GetTensorDesc().GetShape().GetShapeSize(); | |||||
GeShape indices_shape = input[i]->GetTensorDesc().GetShape(); | |||||
size_t indices_dim_num = indices_shape.GetDimNum(); | |||||
// skip null indices tensor | |||||
if (indices_dim_num == kNullTensorDimNum && indices_shape.GetDim(0) == kNullTensorDimValue) { | |||||
GELOGD("Input indices[%d] has null tensor, skip it.", i); | |||||
continue; | |||||
} | |||||
auto indices_shape_size = indices_shape.GetShapeSize(); | |||||
// to normalize logic, assume scalar as vector with shape of [1]. | // to normalize logic, assume scalar as vector with shape of [1]. | ||||
indices_shape_size = (indices_shape_size == 0) ? 1 : indices_shape_size; | indices_shape_size = (indices_shape_size == 0) ? 1 : indices_shape_size; | ||||
// all index for input is less than size of input | // all index for input is less than size of input | ||||
@@ -21,6 +21,7 @@ | |||||
#include "graph/build/memory/var_mem_assign_util.h" | #include "graph/build/memory/var_mem_assign_util.h" | ||||
#include "graph/debug/ge_attr_define.h" | #include "graph/debug/ge_attr_define.h" | ||||
#include "graph/load/new_model_manager/model_utils.h" | #include "graph/load/new_model_manager/model_utils.h" | ||||
#include "graph/load/new_model_manager/model_manager.h" | |||||
#include "graph/manager/graph_var_manager.h" | #include "graph/manager/graph_var_manager.h" | ||||
#include "graph/manager/host_mem_manager.h" | #include "graph/manager/host_mem_manager.h" | ||||
#include "graph/manager/trans_var_data_utils.h" | #include "graph/manager/trans_var_data_utils.h" | ||||
@@ -954,6 +955,7 @@ Status HybridModelBuilder::InitWeights() { | |||||
} | } | ||||
Status HybridModelBuilder::LoadTasks() { | Status HybridModelBuilder::LoadTasks() { | ||||
GE_CHK_STATUS_RET(CheckAicpuOpList(), "Check Aicpu op failed."); | |||||
for (auto &it : hybrid_model_.node_items_) { | for (auto &it : hybrid_model_.node_items_) { | ||||
auto &node_item = it.second; | auto &node_item = it.second; | ||||
auto &node_ptr = node_item->node; | auto &node_ptr = node_item->node; | ||||
@@ -1590,5 +1592,29 @@ Status HybridModelBuilder::BuildInputMapping(GraphItem &graph_item, | |||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
Status HybridModelBuilder::CheckAicpuOpList() { | |||||
std::vector<std::string> aicpu_optype_list; | |||||
std::vector<std::string> aicpu_tf_optype_list; | |||||
std::set<std::string> aicpu_optype_set; | |||||
std::set<std::string> aicpu_tf_optype_set; | |||||
for (auto &it : ge_root_model_->GetSubgraphInstanceNameToModel()) { | |||||
auto &ge_model = it.second; | |||||
GE_CHECK_NOTNULL(ge_model); | |||||
if (ge::AttrUtils::GetListStr(*ge_model, "needCheckCpu", aicpu_optype_list)) { | |||||
aicpu_optype_set.insert(aicpu_optype_list.begin(), aicpu_optype_list.end()); | |||||
} | |||||
if (ge::AttrUtils::GetListStr(*ge_model, "needCheckTf", aicpu_tf_optype_list)) { | |||||
aicpu_tf_optype_set.insert(aicpu_tf_optype_list.begin(), aicpu_tf_optype_list.end()); | |||||
} | |||||
} | |||||
// reset list with set | |||||
aicpu_optype_list.assign(aicpu_optype_set.begin(), aicpu_optype_set.end()); | |||||
aicpu_tf_optype_list.assign(aicpu_tf_optype_set.begin(), aicpu_tf_optype_set.end()); | |||||
GE_CHK_STATUS_RET(ModelManager::GetInstance()->LaunchKernelCheckAicpuOp(aicpu_optype_list, aicpu_tf_optype_list), | |||||
"Launch check aicpu op type failed."); | |||||
return SUCCESS; | |||||
} | |||||
} // namespace hybrid | } // namespace hybrid | ||||
} // namespace ge | } // namespace ge |
@@ -78,6 +78,7 @@ class HybridModelBuilder { | |||||
Status ParseVarOutputs(NodeItem &node_item); | Status ParseVarOutputs(NodeItem &node_item); | ||||
Status LoadKnownShapedSubgraph(ComputeGraph &graph, NodeItem *parent_node_item); | Status LoadKnownShapedSubgraph(ComputeGraph &graph, NodeItem *parent_node_item); | ||||
Status RecoverGraphUnknownFlag(); | Status RecoverGraphUnknownFlag(); | ||||
Status CheckAicpuOpList(); | |||||
const char* GetGraphName() const { | const char* GetGraphName() const { | ||||
return hybrid_model_.model_name_.c_str(); | return hybrid_model_.model_name_.c_str(); | ||||
@@ -96,7 +96,8 @@ Status HcclNodeTask::ExecuteAsync(TaskContext &context, std::function<void()> do | |||||
op_info.root = root_id; | op_info.root = root_id; | ||||
auto callback = [this, op_desc](HcclResult status) { | auto callback = [this, op_desc](HcclResult status) { | ||||
if (status != HCCL_SUCCESS) { | if (status != HCCL_SUCCESS) { | ||||
GELOGE(HCCL_E_INTERNAL, "node %s call HcomExecEnqueueOperation failed, ret: 0x%X", op_desc->GetName().c_str(), status); | |||||
GELOGE(HCCL_E_INTERNAL, "node %s call HcomExecEnqueueOperation failed, ret: 0x%X", | |||||
op_desc->GetName().c_str(), status); | |||||
} | } | ||||
std::lock_guard<std::mutex> lock(this->hccl_mutex_); | std::lock_guard<std::mutex> lock(this->hccl_mutex_); | ||||
this->cond_.notify_all(); | this->cond_.notify_all(); | ||||
@@ -51,6 +51,7 @@ const char *const kDigitError = "is not digit"; | |||||
const char *const kCompressWeightError = "it must be appointed when appoint parameter[--optypelist_for_implmode]"; | const char *const kCompressWeightError = "it must be appointed when appoint parameter[--optypelist_for_implmode]"; | ||||
const char *const kSelectImplmodeError = "only support high_performance, high_precision"; | const char *const kSelectImplmodeError = "only support high_performance, high_precision"; | ||||
const char *const kDynamicBatchSizeError = "It can only contains digit, \",\", \" \""; | const char *const kDynamicBatchSizeError = "It can only contains digit, \",\", \" \""; | ||||
const char *const kKeepDtypeError = "file not found"; | |||||
vector<string> SplitInputShape(const std::string &input_shape) { | vector<string> SplitInputShape(const std::string &input_shape) { | ||||
vector<string> shape_pair_vec; | vector<string> shape_pair_vec; | ||||
@@ -439,6 +440,17 @@ Status CheckCompressWeightParamValid(const std::string enable_compress_weight, c | |||||
return ge::SUCCESS; | return ge::SUCCESS; | ||||
} | } | ||||
Status CheckKeepTypeParamValid(const std::string &keep_dtype) { | |||||
if ((!keep_dtype.empty()) && (!CheckInputPathValid(keep_dtype, "--keep_dtype"))) { | |||||
ErrorManager::GetInstance().ATCReportErrMessage( | |||||
"E10001", {"parameter", "value", "reason"}, {"--keep_dtype", keep_dtype, kKeepDtypeError}); | |||||
GELOGE(ge::PARAM_INVALID, "keep dtype config file not found, file_name:%s", keep_dtype.c_str()); | |||||
return ge::PARAM_INVALID; | |||||
} | |||||
return ge::SUCCESS; | |||||
} | |||||
int CheckLogParamValidAndSetLogLevel(const std::string log) { | int CheckLogParamValidAndSetLogLevel(const std::string log) { | ||||
int ret = -1; | int ret = -1; | ||||
if (log == "default") { | if (log == "default") { | ||||
@@ -76,6 +76,7 @@ Status CheckDisableReuseMemoryParamValid(const std::string disable_reuse_memory) | |||||
Status CheckEnableSingleStreamParamValid(const std::string enable_single_stream); | Status CheckEnableSingleStreamParamValid(const std::string enable_single_stream); | ||||
Status CheckImplmodeParamValid(const std::string &optypelist_for_implmode, std::string &op_select_implmode); | Status CheckImplmodeParamValid(const std::string &optypelist_for_implmode, std::string &op_select_implmode); | ||||
Status CheckInputFormat(const string &input_format); | Status CheckInputFormat(const string &input_format); | ||||
Status CheckKeepTypeParamValid(const std::string &keep_dtype); | |||||
void PrintOptionMap(std::map<std::string, std::string> &options, std::string tips); | void PrintOptionMap(std::map<std::string, std::string> &options, std::string tips); | ||||
void EraseEndSemicolon(std::string ¶m); | void EraseEndSemicolon(std::string ¶m); | ||||
} | } | ||||
@@ -10,6 +10,7 @@ protobuf_generate(ge PROTO_SRCS PROTO_HDRS ${PROTO_LIST}) | |||||
set(SRC_LIST | set(SRC_LIST | ||||
"main.cc" | "main.cc" | ||||
"single_op_parser.cc" | "single_op_parser.cc" | ||||
"keep_dtype_option.cc" | |||||
"../session/omg.cc" | "../session/omg.cc" | ||||
"../ir_build/atc_ir_common.cc" | "../ir_build/atc_ir_common.cc" | ||||
) | ) | ||||
@@ -0,0 +1,107 @@ | |||||
/** | |||||
* Copyright 2020 Huawei Technologies Co., Ltd | |||||
* | |||||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||||
* you may not use this file except in compliance with the License. | |||||
* You may obtain a copy of the License at | |||||
* | |||||
* http://www.apache.org/licenses/LICENSE-2.0 | |||||
* | |||||
* Unless required by applicable law or agreed to in writing, software | |||||
* distributed under the License is distributed on an "AS IS" BASIS, | |||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
* See the License for the specific language governing permissions and | |||||
* limitations under the License. | |||||
*/ | |||||
#include "keep_dtype_option.h" | |||||
#include <fstream> | |||||
#include <iostream> | |||||
#include <sstream> | |||||
#include <vector> | |||||
#include "graph/debug/ge_attr_define.h" | |||||
#include "framework/common/util.h" | |||||
#include "common/util/error_manager/error_manager.h" | |||||
namespace ge { | |||||
namespace { | |||||
const size_t kMaxOpsNum = 10; | |||||
} // namespace | |||||
bool IsOriginalOpFind(OpDescPtr &op_desc, const std::string &op_name) { | |||||
std::vector<std::string> original_op_names; | |||||
if (!AttrUtils::GetListStr(op_desc, ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, original_op_names)) { | |||||
return false; | |||||
} | |||||
for (auto &origin_name : original_op_names) { | |||||
if (origin_name == op_name) { | |||||
return true; | |||||
} | |||||
} | |||||
return false; | |||||
} | |||||
void KeepDtypeReportError(const std::vector<std::string> &invalid_list) { | |||||
std::stringstream error_ops; | |||||
for (size_t i = 0; i < invalid_list.size(); i++) { | |||||
if (i == kMaxOpsNum) { | |||||
error_ops << "..."; | |||||
break; | |||||
} | |||||
error_ops << invalid_list[i] << " "; | |||||
} | |||||
std::string err_msg = "config file contains "; | |||||
err_msg = err_msg.append(std::to_string(invalid_list.size())) | |||||
.append(" operators not in the graph, op names:") | |||||
.append(error_ops.str()); | |||||
ErrorManager::GetInstance().ATCReportErrMessage( | |||||
"E10042", {"parameter", "reason"}, {"keep_dtype", err_msg.c_str()}); | |||||
GELOGE(FAILED, "%s", err_msg.c_str()); | |||||
} | |||||
Status DealKeepDtypeOption(const ComputeGraphPtr &graph, const std::string &keep_dtype) { | |||||
GE_CHECK_NOTNULL(graph); | |||||
if (keep_dtype.empty()) { | |||||
return SUCCESS; | |||||
} | |||||
std::string real_path = RealPath(keep_dtype.c_str()); | |||||
if (real_path.empty()) { | |||||
GELOGE(PARAM_INVALID, "Can not get real path for %s.", keep_dtype.c_str()); | |||||
return PARAM_INVALID; | |||||
} | |||||
std::ifstream ifs(real_path); | |||||
if (!ifs.is_open()) { | |||||
GELOGE(FAILED, "Open file %s failed", keep_dtype.c_str()); | |||||
return FAILED; | |||||
} | |||||
std::string op_name; | |||||
std::vector<std::string> invalid_list; | |||||
while (std::getline(ifs, op_name)) { | |||||
if (op_name.empty()) { | |||||
continue; | |||||
} | |||||
op_name = StringUtils::Trim(op_name); | |||||
bool is_find = false; | |||||
for (auto &node_ptr : graph->GetDirectNode()) { | |||||
auto op_desc = node_ptr->GetOpDesc(); | |||||
GE_CHECK_NOTNULL(op_desc); | |||||
if ((op_desc->GetName() == op_name) || IsOriginalOpFind(op_desc, op_name)) { | |||||
is_find = true; | |||||
(void)AttrUtils::SetInt(op_desc, ATTR_NAME_KEEP_DTYPE, 1); | |||||
} | |||||
} | |||||
if (!is_find) { | |||||
invalid_list.push_back(op_name); | |||||
} | |||||
} | |||||
if (!invalid_list.empty()) { | |||||
KeepDtypeReportError(invalid_list); | |||||
return PARAM_INVALID; | |||||
} | |||||
return SUCCESS; | |||||
} | |||||
} // namespace ge |
@@ -0,0 +1,26 @@ | |||||
/** | |||||
* Copyright 2020 Huawei Technologies Co., Ltd | |||||
* | |||||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||||
* you may not use this file except in compliance with the License. | |||||
* You may obtain a copy of the License at | |||||
* | |||||
* http://www.apache.org/licenses/LICENSE-2.0 | |||||
* | |||||
* Unless required by applicable law or agreed to in writing, software | |||||
* distributed under the License is distributed on an "AS IS" BASIS, | |||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
* See the License for the specific language governing permissions and | |||||
* limitations under the License. | |||||
*/ | |||||
#ifndef KEEP_DTYPE_OPTION_H_ | |||||
#define KEEP_DTYPE_OPTION_H_ | |||||
#include <string> | |||||
#include "graph/compute_graph.h" | |||||
#include "framework/common/ge_inner_error_codes.h" | |||||
namespace ge { | |||||
Status DealKeepDtypeOption(const ComputeGraphPtr &graph, const std::string &keep_dtype); | |||||
} // namespace | |||||
#endif // KEEP_DTYPE_OPTION_H_ |
@@ -43,6 +43,7 @@ | |||||
#include "parser/common/register_tbe.h" | #include "parser/common/register_tbe.h" | ||||
#include "register/op_registry.h" | #include "register/op_registry.h" | ||||
#include "single_op_parser.h" | #include "single_op_parser.h" | ||||
#include "keep_dtype_option.h" | |||||
using domi::BuildMode; | using domi::BuildMode; | ||||
using domi::OpRegistrationData; | using domi::OpRegistrationData; | ||||
@@ -109,6 +110,9 @@ DEFINE_string(precision_mode, "force_fp16", | |||||
"Optional; precision mode." | "Optional; precision mode." | ||||
"Support force_fp16, allow_mix_precision, allow_fp32_to_fp16, must_keep_origin_dtype."); | "Support force_fp16, allow_mix_precision, allow_fp32_to_fp16, must_keep_origin_dtype."); | ||||
DEFINE_string(keep_dtype, "", | |||||
"Optional; config file to specify the precision used by the operator during compilation."); | |||||
DEFINE_string(input_format, "", | DEFINE_string(input_format, "", | ||||
"Optional; input_format, format of input data, NCHW;NHWC." | "Optional; input_format, format of input data, NCHW;NHWC." | ||||
"Format:\"NHWC\""); | "Format:\"NHWC\""); | ||||
@@ -282,11 +286,13 @@ class GFlagUtils { | |||||
" --enable_compress_weight Enable compress weight. true: enable; false(default): disable\n" | " --enable_compress_weight Enable compress weight. true: enable; false(default): disable\n" | ||||
" --compress_weight_conf Config file to compress weight\n" | " --compress_weight_conf Config file to compress weight\n" | ||||
" --buffer_optimize Set buffer optimize. \"l2_optimize\" (default). Set \"off_optimize\" to close\n" | " --buffer_optimize Set buffer optimize. \"l2_optimize\" (default). Set \"off_optimize\" to close\n" | ||||
" --mdl_bank_path Set the path of the custom repository generated after model tuning.\n" | |||||
"\n[Operator Tuning]\n" | "\n[Operator Tuning]\n" | ||||
" --precision_mode precision mode, support force_fp16(default), allow_mix_precision, " | " --precision_mode precision mode, support force_fp16(default), allow_mix_precision, " | ||||
"allow_fp32_to_fp16, must_keep_origin_dtype.\n" | "allow_fp32_to_fp16, must_keep_origin_dtype.\n" | ||||
" --auto_tune_mode Set tune mode. E.g.: \"GA,RL\", support configure multiple, spit by ,\n" | " --auto_tune_mode Set tune mode. E.g.: \"GA,RL\", support configure multiple, spit by ,\n" | ||||
" --op_select_implmode Set op select implmode. Support high_precision, high_performance. " | |||||
" --op_bank_path Set the path of the custom repository generated after operator tuning with Auto Tune.\n" | |||||
" --op_select_implmode Set op select implmode. Support high_precision, high_performance. " | |||||
"default: high_performance\n" | "default: high_performance\n" | ||||
" --optypelist_for_implmode Appoint which op to select implmode, cooperated with op_select_implmode.\n" | " --optypelist_for_implmode Appoint which op to select implmode, cooperated with op_select_implmode.\n" | ||||
" Separate multiple nodes with commas (,). Use double quotation marks (\") " | " Separate multiple nodes with commas (,). Use double quotation marks (\") " | ||||
@@ -305,7 +311,7 @@ class GFlagUtils { | |||||
" --debug_dir Set the save path of operator compilation intermediate files.\n" | " --debug_dir Set the save path of operator compilation intermediate files.\n" | ||||
"Default value: ./kernel_meta\n" | "Default value: ./kernel_meta\n" | ||||
" --op_compiler_cache_dir Set the save path of operator compilation cache files.\n" | " --op_compiler_cache_dir Set the save path of operator compilation cache files.\n" | ||||
"Default value: $HOME/atc_data/kernel_cache\n" | |||||
"Default value: $HOME/atc_data\n" | |||||
" --op_compiler_cache_mode Set the operator compilation cache mode." | " --op_compiler_cache_mode Set the operator compilation cache mode." | ||||
"Options are disable(default), enable and force(force to refresh the cache)"); | "Options are disable(default), enable and force(force to refresh the cache)"); | ||||
@@ -421,6 +427,9 @@ class GFlagUtils { | |||||
FLAGS_enable_compress_weight, FLAGS_compress_weight_conf) == ge::SUCCESS, | FLAGS_enable_compress_weight, FLAGS_compress_weight_conf) == ge::SUCCESS, | ||||
ret = ge::FAILED, "check compress weight failed!"); | ret = ge::FAILED, "check compress weight failed!"); | ||||
GE_CHK_BOOL_EXEC(ge::CheckKeepTypeParamValid(FLAGS_keep_dtype) == ge::SUCCESS, | |||||
ret = ge::FAILED, "check keep dtype failed!"); | |||||
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( | GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( | ||||
!ge::CheckOutputPathValid(FLAGS_check_report, "--check_report"), ret = ge::FAILED, | !ge::CheckOutputPathValid(FLAGS_check_report, "--check_report"), ret = ge::FAILED, | ||||
"check_report file %s not found!!", FLAGS_check_report.c_str()); | "check_report file %s not found!!", FLAGS_check_report.c_str()); | ||||
@@ -979,6 +988,11 @@ domi::Status GenerateModel(std::map<string, string> &options, std::string output | |||||
} | } | ||||
} | } | ||||
Status ret = ge::DealKeepDtypeOption(ge::GraphUtils::GetComputeGraph(graph), FLAGS_keep_dtype); | |||||
if (ret != SUCCESS) { | |||||
return ret; | |||||
} | |||||
geRet = ge_generator.GenerateOfflineModel(graph, output, inputs); | geRet = ge_generator.GenerateOfflineModel(graph, output, inputs); | ||||
if (geRet != ge::SUCCESS) { | if (geRet != ge::SUCCESS) { | ||||
DOMI_LOGE("GE GenerateOfflineModel execute failed"); | DOMI_LOGE("GE GenerateOfflineModel execute failed"); | ||||
@@ -10,6 +10,7 @@ LOCAL_CFLAGS += -DPROTOBUF_INLINE_NOT_IN_HEADERS=0 -DCOMPILE_OMG_PACKAGE -O2 -Dg | |||||
LOCAL_SRC_FILES := \ | LOCAL_SRC_FILES := \ | ||||
main.cc \ | main.cc \ | ||||
keep_dtype_option.cc \ | |||||
single_op_parser.cc \ | single_op_parser.cc \ | ||||
../session/omg.cc \ | ../session/omg.cc \ | ||||
../ir_build/atc_ir_common.cc \ | ../ir_build/atc_ir_common.cc \ | ||||
@@ -63,6 +64,7 @@ LOCAL_CFLAGS += -DPROTOBUF_INLINE_NOT_IN_HEADERS=0 -DCOMPILE_OMG_PACKAGE -O2 -Dg | |||||
LOCAL_SRC_FILES := \ | LOCAL_SRC_FILES := \ | ||||
main.cc \ | main.cc \ | ||||
keep_dtype_option.cc \ | |||||
single_op_parser.cc \ | single_op_parser.cc \ | ||||
../session/omg.cc \ | ../session/omg.cc \ | ||||
../ir_build/atc_ir_common.cc \ | ../ir_build/atc_ir_common.cc \ | ||||
@@ -116,6 +118,7 @@ LOCAL_CFLAGS += -DPROTOBUF_INLINE_NOT_IN_HEADERS=0 -DCOMPILE_OMG_PACKAGE -O2 -Dg | |||||
LOCAL_SRC_FILES := \ | LOCAL_SRC_FILES := \ | ||||
main.cc \ | main.cc \ | ||||
keep_dtype_option.cc \ | |||||
single_op_parser.cc \ | single_op_parser.cc \ | ||||
../session/omg.cc \ | ../session/omg.cc \ | ||||
../ir_build/atc_ir_common.cc \ | ../ir_build/atc_ir_common.cc \ | ||||
@@ -30,6 +30,7 @@ enum DataType | |||||
DT_RESOURCE = 23; // resource type | DT_RESOURCE = 23; // resource type | ||||
DT_STRING_REF = 24; // string_ref type | DT_STRING_REF = 24; // string_ref type | ||||
DT_DUAL = 25; /**< dual output type */ | DT_DUAL = 25; /**< dual output type */ | ||||
DT_VARIANT = 26; // variant type | |||||
} | } | ||||
message AttrDef | message AttrDef | ||||
@@ -28,6 +28,7 @@ enum OutputDataType { | |||||
DT_RESOURCE = 23; | DT_RESOURCE = 23; | ||||
DT_STRING_REF = 24; | DT_STRING_REF = 24; | ||||
DT_DUAL = 25; | DT_DUAL = 25; | ||||
DT_VARIANT = 26; | |||||
} | } | ||||
enum OutputFormat { | enum OutputFormat { | ||||
@@ -30,6 +30,7 @@ enum DataType | |||||
DT_RESOURCE = 23; // resource type | DT_RESOURCE = 23; // resource type | ||||
DT_STRING_REF = 24; // string_ref type | DT_STRING_REF = 24; // string_ref type | ||||
DT_DUAL = 25; /**< dual output type */ | DT_DUAL = 25; /**< dual output type */ | ||||
DT_VARIANT = 26; // variant type | |||||
} | } | ||||
message AttrDef | message AttrDef | ||||
@@ -69,7 +69,7 @@ class ModelHelper { | |||||
Status GenerateGeModel(OmFileLoadHelper &om_load_helper); | Status GenerateGeModel(OmFileLoadHelper &om_load_helper); | ||||
Status GenerateGeRootModel(OmFileLoadHelper &om_load_helper); | Status GenerateGeRootModel(OmFileLoadHelper &om_load_helper); | ||||
Status LoadModelData(OmFileLoadHelper &om_load_helper); | Status LoadModelData(OmFileLoadHelper &om_load_helper); | ||||
void SetModelToGeModel(ge::Model &model); | |||||
void SetModelToGeModel(GeModelPtr &ge_model, Model &model); | |||||
Status LoadModelData(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, size_t mode_index); | Status LoadModelData(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, size_t mode_index); | ||||
Status LoadWeights(OmFileLoadHelper &om_load_helper); | Status LoadWeights(OmFileLoadHelper &om_load_helper); | ||||
Status LoadWeights(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, size_t mode_index); | Status LoadWeights(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, size_t mode_index); | ||||
@@ -1 +1 @@ | |||||
Subproject commit 2f774bcd66b0d4b8d65b629f50148e9dd2248403 | |||||
Subproject commit c14d2be38171eed63416e71178774103faf1f5cd |
@@ -1 +1 @@ | |||||
Subproject commit 89e2455f653807f7bb3177b9b5eb096100a600db | |||||
Subproject commit 34559943b6cb645042a87d99bc88ead016b15b64 |
@@ -79,7 +79,7 @@ TEST_F(UtestFormatTransfer, get_size_by_data_type) { | |||||
EXPECT_EQ(GetSizeByDataType(DT_STRING_REF), -1); | EXPECT_EQ(GetSizeByDataType(DT_STRING_REF), -1); | ||||
EXPECT_EQ(GetSizeByDataType(DT_DUAL), 5); | EXPECT_EQ(GetSizeByDataType(DT_DUAL), 5); | ||||
EXPECT_EQ(GetSizeByDataType(DT_UNDEFINED), -1); | EXPECT_EQ(GetSizeByDataType(DT_UNDEFINED), -1); | ||||
EXPECT_EQ(DT_UNDEFINED, 26); | |||||
EXPECT_EQ(DT_UNDEFINED, 27); | |||||
} | } | ||||
} // namespace formats | } // namespace formats | ||||
} // namespace ge | } // namespace ge |