diff --git a/ge/ge_runner.mk b/ge/ge_runner.mk index 2d0671a9..fa795ced 100644 --- a/ge/ge_runner.mk +++ b/ge/ge_runner.mk @@ -402,7 +402,6 @@ LOCAL_C_INCLUDES := $(RUNNER_LOCAL_C_INCLUDES) LOCAL_SRC_FILES := ../../out/ge/lib64/stub/ge_api.cc \ ../../out/ge/lib64/stub/ge_prof.cc \ - LOCAL_SHARED_LIBRARIES := LOCAL_LDFLAGS := -lrt -ldl diff --git a/ge/graph/preprocess/insert_op/ge_aipp_op.cc b/ge/graph/preprocess/insert_op/ge_aipp_op.cc index 729c47de..b8c51ad1 100755 --- a/ge/graph/preprocess/insert_op/ge_aipp_op.cc +++ b/ge/graph/preprocess/insert_op/ge_aipp_op.cc @@ -825,6 +825,7 @@ Status AippOp::AddAttrToAippData(const OpDescPtr &aipp_data_op_desc) { } Status AippOp::AddNodeToGraph(const NodePtr &aipp_node, int64_t max_dynamic_aipp_size) { + static int index = 0; std::vector input_shape_dim(1, max_dynamic_aipp_size); GeShape input_shape(input_shape_dim); // construct input tensor diff --git a/ge/graph/preprocess/multi_batch_copy_graph.cc b/ge/graph/preprocess/multi_batch_copy_graph.cc index c0ba89f4..78c55dec 100644 --- a/ge/graph/preprocess/multi_batch_copy_graph.cc +++ b/ge/graph/preprocess/multi_batch_copy_graph.cc @@ -40,6 +40,7 @@ #include "inc/pass_manager.h" #include "graph/common/local_context.h" +using std::map; using std::set; using std::string; using std::vector; @@ -263,27 +264,24 @@ Status MultiBatchGraphCopyer::Init() { } Status MultiBatchGraphCopyer::LabelStatus() { - for (const auto &data : origin_data_nodes_) { - auto data_shape = NodeUtils::GetOutputDesc(*data, kDataOutIndex).GetShape(); - if (!IsAllDimsPositive(data_shape.GetDims())) { - origin_nodes_status_[data.get()] = kNodeInBatchBranch; - } - } + map> frame_enters; + InitStatus(frame_enters); + bool changed = true; // If anyone of in node is kNodeInBatchBranch, it is also kNodeInBatchBranch while (changed) { changed = false; for (const auto &node : origin_all_nodes_) { - auto iter = origin_nodes_status_.find(node.get()); - if (iter != origin_nodes_status_.end()) { - continue; - } for (auto &in_node : node->GetInAllNodes()) { bool is_in_batch = origin_nodes_status_.find(in_node.get()) != origin_nodes_status_.end() && origin_nodes_status_[in_node.get()] == kNodeInBatchBranch; if (is_in_batch) { - origin_nodes_status_[node.get()] = kNodeInBatchBranch; - changed = true; + if (origin_nodes_status_.find(node.get()) == origin_nodes_status_.end() || + origin_nodes_status_[node.get()] != kNodeInBatchBranch) { + origin_nodes_status_[node.get()] = kNodeInBatchBranch; + ResetEnterStatus(frame_enters, node); + changed = true; + } break; } } diff --git a/ge/graph/preprocess/multi_batch_copy_graph.h b/ge/graph/preprocess/multi_batch_copy_graph.h index f8aa6ab4..edd79ada 100644 --- a/ge/graph/preprocess/multi_batch_copy_graph.h +++ b/ge/graph/preprocess/multi_batch_copy_graph.h @@ -69,6 +69,8 @@ class MultiBatchGraphCopyer { // label status for origin_all_nodes_ Status LabelStatus(); + void InitStatus(std::map> &frame_enters); + void ResetEnterStatus(std::map> &frame_enters, const NodePtr &node); // add nodes functions Status CreateNewNodes(); diff --git a/ge/session/inner_session.cc b/ge/session/inner_session.cc index aa825a4b..22a3ed86 100755 --- a/ge/session/inner_session.cc +++ b/ge/session/inner_session.cc @@ -1,18 +1,18 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +* Copyright 2020 Huawei Technologies Co., Ltd +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ #include "session/inner_session.h" @@ -39,300 +39,300 @@ namespace { const int32_t kDumpStatus = 0; Status CheckReuseMemoryOption(const std::map &options) { - auto iter = options.find(OPTION_EXEC_DISABLE_REUSED_MEMORY); - if (iter != options.end()) { - if (iter->second == "0") { - GELOGD("%s=0, reuse memory is open", OPTION_EXEC_DISABLE_REUSED_MEMORY); - } else if (iter->second == "1") { - GELOGD("%s=1, reuse memory is close", OPTION_EXEC_DISABLE_REUSED_MEMORY); - } else { - GELOGE(PARAM_INVALID, "option %s=%s is invalid", OPTION_EXEC_DISABLE_REUSED_MEMORY, iter->second.c_str()); - return FAILED; - } - } - return SUCCESS; +auto iter = options.find(OPTION_EXEC_DISABLE_REUSED_MEMORY); +if (iter != options.end()) { +if (iter->second == "0") { +GELOGD("%s=0, reuse memory is open", OPTION_EXEC_DISABLE_REUSED_MEMORY); +} else if (iter->second == "1") { +GELOGD("%s=1, reuse memory is close", OPTION_EXEC_DISABLE_REUSED_MEMORY); +} else { +GELOGE(PARAM_INVALID, "option %s=%s is invalid", OPTION_EXEC_DISABLE_REUSED_MEMORY, iter->second.c_str()); +return FAILED; +} +} +return SUCCESS; } } static std::mutex mutex_; // BuildGraph and RunGraph use bool InnerSession::is_dump_server_inited_ = false; InnerSession::InnerSession(uint64_t session_id, const std::map &options) - : init_flag_(false), session_id_(session_id), options_(options) {} +: init_flag_(false), session_id_(session_id), options_(options) {} Status InnerSession::Initialize() { - if (init_flag_) { - GELOGW("[InnerSession:%lu] session already initialize.", session_id_); - return SUCCESS; - } +if (init_flag_) { +GELOGW("[InnerSession:%lu] session already initialize.", session_id_); +return SUCCESS; +} - // If the global options and the session options are duplicated, the session options is preferred. - auto all_options = options_; - all_options.insert(GetMutableGlobalOptions().begin(), GetMutableGlobalOptions().end()); +// If the global options and the session options are duplicated, the session options is preferred. +auto all_options = options_; +all_options.insert(GetMutableGlobalOptions().begin(), GetMutableGlobalOptions().end()); - Status ret = CheckReuseMemoryOption(all_options); - if (ret != SUCCESS) { - GELOGE(ret, "[InnerSession:%lu] check reuse memory option failed.", session_id_); - return ret; - } +Status ret = CheckReuseMemoryOption(all_options); +if (ret != SUCCESS) { +GELOGE(ret, "[InnerSession:%lu] check reuse memory option failed.", session_id_); +return ret; +} - UpdateThreadContext(std::map{}); +UpdateThreadContext(std::map{}); - GE_CHK_RT_RET(rtSetDevice(GetContext().DeviceId())); +GE_CHK_RT_RET(rtSetDevice(GetContext().DeviceId())); - DumpProperties dump_properties; - dump_properties.InitByOptions(); - GE_CHK_STATUS_RET(AddDumpProperties(dump_properties), "Add dump properties failed"); +DumpProperties dump_properties; +dump_properties.InitByOptions(); +GE_CHK_STATUS_RET(AddDumpProperties(dump_properties), "Add dump properties failed"); - ret = graph_manager_.Initialize(options_); - if (ret != SUCCESS) { - GELOGE(ret, "[InnerSession:%lu] initialize failed.", session_id_); - GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed"); - return ret; - } +ret = graph_manager_.Initialize(options_); +if (ret != SUCCESS) { +GELOGE(ret, "[InnerSession:%lu] initialize failed.", session_id_); +GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed"); +return ret; +} - ret = VarManager::Instance(session_id_)->SetMemoryMallocSize(all_options); - if (ret != SUCCESS) { - GELOGE(ret, "failed to set malloc size"); - (void)graph_manager_.Finalize(); - GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed"); - GE_CHK_RT(rtDeviceReset(static_cast(GetContext().DeviceId()))); - return ret; - } +ret = VarManager::Instance(session_id_)->SetMemoryMallocSize(all_options); +if (ret != SUCCESS) { +GELOGE(ret, "failed to set malloc size"); +(void)graph_manager_.Finalize(); +GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed"); +GE_CHK_RT(rtDeviceReset(static_cast(GetContext().DeviceId()))); +return ret; +} - int32_t version = static_cast(SessionVersion::ClOUD_VERSION); - const int DEFAULT_DEVICE_ID = 0; - const int DEFAULT_JOB_ID = 0; - ret = VarManager::Instance(session_id_)->Init(version, session_id_, DEFAULT_DEVICE_ID, DEFAULT_JOB_ID); - if (ret != SUCCESS) { - GELOGE(ret, "failed to init session instance"); - GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed"); - } - init_flag_ = true; - return SUCCESS; +int32_t version = static_cast(SessionVersion::ClOUD_VERSION); +const int DEFAULT_DEVICE_ID = 0; +const int DEFAULT_JOB_ID = 0; +ret = VarManager::Instance(session_id_)->Init(version, session_id_, DEFAULT_DEVICE_ID, DEFAULT_JOB_ID); +if (ret != SUCCESS) { +GELOGE(ret, "failed to init session instance"); +GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed"); +} +init_flag_ = true; +return SUCCESS; } Status InnerSession::Finalize() { - std::lock_guard lock(resource_mutex_); - if (!init_flag_) { - GELOGW("[InnerSession:%lu] session does not initialize.", session_id_); - return SUCCESS; - } - UpdateThreadContext(std::map{}); - Status ret = graph_manager_.Finalize(); - if (ret != SUCCESS) { - // Subsequent code execution is required, so no return is required - GELOGE(ret, "[InnerSession:%lu] finalize failed.", session_id_); - } +std::lock_guard lock(resource_mutex_); +if (!init_flag_) { +GELOGW("[InnerSession:%lu] session does not initialize.", session_id_); +return SUCCESS; +} +UpdateThreadContext(std::map{}); +Status ret = graph_manager_.Finalize(); +if (ret != SUCCESS) { +// Subsequent code execution is required, so no return is required +GELOGE(ret, "[InnerSession:%lu] finalize failed.", session_id_); +} - ModelManager::GetInstance()->DestroyAicpuSession(session_id_); - init_flag_ = false; - // release var memory - GELOGI("VarManager free var memory."); - (void)VarManager::Instance(session_id_)->FreeVarMemory(); - // release analyzer saved info(Session Level) - Analyzer::GetInstance()->DestroySessionJsonObject(session_id_); +ModelManager::GetInstance()->DestroyAicpuSession(session_id_); +init_flag_ = false; +// release var memory +GELOGI("VarManager free var memory."); +(void)VarManager::Instance(session_id_)->FreeVarMemory(); +// release analyzer saved info(Session Level) +Analyzer::GetInstance()->DestroySessionJsonObject(session_id_); - GE_CHK_RT(rtDeviceReset(static_cast(GetContext().DeviceId()))); - GE_CHK_STATUS_RET(RemoveDumpProperties(), "Remove dump properties failed"); +GE_CHK_RT(rtDeviceReset(static_cast(GetContext().DeviceId()))); +GE_CHK_STATUS_RET(RemoveDumpProperties(), "Remove dump properties failed"); - return ret; +return ret; } Status InnerSession::GetVariable(const std::string &name, Tensor &val) { - UpdateThreadContext(std::map{}); - return graph_manager_.GetVariable(name, val); +UpdateThreadContext(std::map{}); +return graph_manager_.GetVariable(name, val); } Status InnerSession::AddGraph(uint32_t graph_id, const Graph &graph) { - std::map options; - return AddGraph(graph_id, graph, options); +std::map options; +return AddGraph(graph_id, graph, options); } Status InnerSession::AddGraph(uint32_t graph_id, const Graph &graph, - const std::map &options) { - std::lock_guard lock(resource_mutex_); - if (!init_flag_) { - GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_); - return GE_SESS_INIT_FAILED; - } - UpdateThreadContext(options); - Status ret = graph_manager_.AddGraph(graph_id, graph, options, domi::GetContext()); - if (ret != SUCCESS) { - GELOGE(ret, "[InnerSession:%lu] add graph %u failed.", session_id_, graph_id); - return ret; - } + const std::map &options) { +std::lock_guard lock(resource_mutex_); +if (!init_flag_) { +GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_); +return GE_SESS_INIT_FAILED; +} +UpdateThreadContext(options); +Status ret = graph_manager_.AddGraph(graph_id, graph, options, domi::GetContext()); +if (ret != SUCCESS) { +GELOGE(ret, "[InnerSession:%lu] add graph %u failed.", session_id_, graph_id); +return ret; +} - GELOGI("[InnerSession:%lu] add graph success, graph_id=%u.", session_id_, graph_id); - return SUCCESS; +GELOGI("[InnerSession:%lu] add graph success, graph_id=%u.", session_id_, graph_id); +return SUCCESS; } Status InnerSession::RunGraph(uint32_t graph_id, const std::vector &inputs, std::vector &outputs) { - GELOGI("[InnerSession:%lu] run graph on session, graph_id=%u.", session_id_, graph_id); - if (mutex_.try_lock()) { - std::lock_guard lock(mutex_, std::adopt_lock); - if (!init_flag_) { - GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_); - return GE_SESS_INIT_FAILED; - } - UpdateThreadContext(graph_id); - vector geInputs; - for (auto &item : inputs) { - geInputs.push_back(TensorAdapter::AsGeTensor(item)); - } - vector geOutputs; - Status ret = graph_manager_.RunGraph(graph_id, geInputs, geOutputs, session_id_); - domi::GetContext().out_nodes_map.clear(); - domi::GetContext().user_out_nodes.clear(); - if (ret != SUCCESS) { - GELOGE(ret, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id); - return ret; - } - outputs.clear(); - for (auto &item : geOutputs) { - outputs.push_back(TensorAdapter::AsTensor(item)); - } +GELOGI("[InnerSession:%lu] run graph on session, graph_id=%u.", session_id_, graph_id); +if (mutex_.try_lock()) { +std::lock_guard lock(mutex_, std::adopt_lock); +if (!init_flag_) { +GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_); +return GE_SESS_INIT_FAILED; +} +UpdateThreadContext(graph_id); +vector geInputs; +for (auto &item : inputs) { +geInputs.push_back(TensorAdapter::AsGeTensor(item)); +} +vector geOutputs; +Status ret = graph_manager_.RunGraph(graph_id, geInputs, geOutputs, session_id_); +domi::GetContext().out_nodes_map.clear(); +domi::GetContext().user_out_nodes.clear(); +if (ret != SUCCESS) { +GELOGE(ret, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id); +return ret; +} +outputs.clear(); +for (auto &item : geOutputs) { +outputs.push_back(TensorAdapter::AsTensor(item)); +} - GELOGI("[InnerSession:%lu] run graph success, graph_id=%u.", session_id_, graph_id); - return SUCCESS; - } else { - GELOGE(GE_SESS_ALREADY_RUNNING, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id); - return GE_SESS_ALREADY_RUNNING; - } +GELOGI("[InnerSession:%lu] run graph success, graph_id=%u.", session_id_, graph_id); +return SUCCESS; +} else { +GELOGE(GE_SESS_ALREADY_RUNNING, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id); +return GE_SESS_ALREADY_RUNNING; +} } Status InnerSession::RemoveGraph(uint32_t graph_id) { - std::lock_guard lock(resource_mutex_); - if (!init_flag_) { - GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_); - return GE_SESS_INIT_FAILED; - } - UpdateThreadContext(graph_id); - Status ret = graph_manager_.RemoveGraph(graph_id); - if (ret != SUCCESS) { - GELOGE(ret, "[InnerSession:%lu] remove graph failed, graph_id=%u.", session_id_, graph_id); - return ret; - } +std::lock_guard lock(resource_mutex_); +if (!init_flag_) { +GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_); +return GE_SESS_INIT_FAILED; +} +UpdateThreadContext(graph_id); +Status ret = graph_manager_.RemoveGraph(graph_id); +if (ret != SUCCESS) { +GELOGE(ret, "[InnerSession:%lu] remove graph failed, graph_id=%u.", session_id_, graph_id); +return ret; +} - GELOGI("[InnerSession:%lu] remove graph success, graph_id=%u.", session_id_, graph_id); - return SUCCESS; +GELOGI("[InnerSession:%lu] remove graph success, graph_id=%u.", session_id_, graph_id); +return SUCCESS; } Status InnerSession::RegisterCallBackFunc( - const std::string &key, - const std::function &)> &callback) { - std::lock_guard lock(resource_mutex_); - if (!init_flag_) { - GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_); - return GE_SESS_INIT_FAILED; - } - UpdateThreadContext(std::map{}); - Status ret = graph_manager_.RegisterCallBackFunc(key, callback); - if (ret != SUCCESS) { - GELOGE(ret, "[InnerSession:%lu] register %s callback function failed.", session_id_, key.c_str()); - return ret; - } +const std::string &key, +const std::function &)> &callback) { +std::lock_guard lock(resource_mutex_); +if (!init_flag_) { +GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_); +return GE_SESS_INIT_FAILED; +} +UpdateThreadContext(std::map{}); +Status ret = graph_manager_.RegisterCallBackFunc(key, callback); +if (ret != SUCCESS) { +GELOGE(ret, "[InnerSession:%lu] register %s callback function failed.", session_id_, key.c_str()); +return ret; +} - GELOGI("[InnerSession:%lu] register %s callback function success.", session_id_, key.c_str()); - return SUCCESS; +GELOGI("[InnerSession:%lu] register %s callback function success.", session_id_, key.c_str()); +return SUCCESS; } Status InnerSession::BuildGraph(uint32_t graph_id, const std::vector &inputs) { - UpdateThreadContext(graph_id); - GELOGI("[InnerSession:%lu] build graph on session, graph_id=%u.", session_id_, graph_id); - std::vector ge_inputs; - for (auto const &input : inputs) { - std::vector input_dims; - std::transform(input.dims.begin(), input.dims.end(), std::back_inserter(input_dims), - [](int64_t x) -> int64_t { return x; }); - GeShape input_shape(input_dims); - GeTensorDesc input_tensor_desc; - input_tensor_desc.SetShape(input_shape); - input_tensor_desc.SetDataType(static_cast(input.data_type)); - ge_inputs.emplace_back(input_tensor_desc); - } - GeRootModelPtr ge_root_model = nullptr; - Status ret = graph_manager_.BuildGraph(graph_id, ge_inputs, ge_root_model, session_id_, true); - if (ret != SUCCESS) { - GELOGE(ret, "[InnerSession:%lu] build graph failed, graph_id=%u.", session_id_, graph_id); - return ret; - } - GELOGI("[InnerSession:%lu] build graph success, graph_id=%u.", session_id_, graph_id); - return ret; +UpdateThreadContext(graph_id); +GELOGI("[InnerSession:%lu] build graph on session, graph_id=%u.", session_id_, graph_id); +std::vector ge_inputs; +for (auto const &input : inputs) { +std::vector input_dims; +std::transform(input.dims.begin(), input.dims.end(), std::back_inserter(input_dims), + [](int64_t x) -> int64_t { return x; }); +GeShape input_shape(input_dims); +GeTensorDesc input_tensor_desc; +input_tensor_desc.SetShape(input_shape); +input_tensor_desc.SetDataType(static_cast(input.data_type)); +ge_inputs.emplace_back(input_tensor_desc); +} +GeRootModelPtr ge_root_model = nullptr; +Status ret = graph_manager_.BuildGraph(graph_id, ge_inputs, ge_root_model, session_id_, true); +if (ret != SUCCESS) { +GELOGE(ret, "[InnerSession:%lu] build graph failed, graph_id=%u.", session_id_, graph_id); +return ret; +} +GELOGI("[InnerSession:%lu] build graph success, graph_id=%u.", session_id_, graph_id); +return ret; } Status InnerSession::RunGraphAsync(uint32_t graph_id, const std::vector &inputs, - RunAsyncCallback callback) { - UpdateThreadContext(graph_id); - GELOGI("[InnerSession:%lu] run graph on session, graph_id=%u.", session_id_, graph_id); - Status ret = graph_manager_.RunGraphAsync(graph_id, inputs, session_id_, callback); - if (ret != SUCCESS) { - GELOGE(ret, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id); - return ret; - } - GELOGI("[InnerSession:%lu] run graph success, graph_id=%u.", session_id_, graph_id); - return ret; + RunAsyncCallback callback) { +UpdateThreadContext(graph_id); +GELOGI("[InnerSession:%lu] run graph on session, graph_id=%u.", session_id_, graph_id); +Status ret = graph_manager_.RunGraphAsync(graph_id, inputs, session_id_, callback); +if (ret != SUCCESS) { +GELOGE(ret, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id); +return ret; +} +GELOGI("[InnerSession:%lu] run graph success, graph_id=%u.", session_id_, graph_id); +return ret; } const GraphManager &InnerSession::getGraphManagerObj() const { return graph_manager_; } void InnerSession::UpdateThreadContext(const std::map &options) { - GetThreadLocalContext().SetGlobalOption(GetMutableGlobalOptions()); - GetThreadLocalContext().SetSessionOption(options_); - GetThreadLocalContext().SetGraphOption(options); - GetContext().SetSessionId(session_id_); - SetRtSocVersion(); +GetThreadLocalContext().SetGlobalOption(GetMutableGlobalOptions()); +GetThreadLocalContext().SetSessionOption(options_); +GetThreadLocalContext().SetGraphOption(options); +GetContext().SetSessionId(session_id_); +SetRtSocVersion(); } void InnerSession::UpdateThreadContext(uint32_t graph_id) { - auto options = graph_manager_.GetGraphOptions(graph_id); - if (options == nullptr) { - GELOGW("graph level options is null."); - UpdateThreadContext(std::map{}); - } else { - UpdateThreadContext(*options); - } +auto options = graph_manager_.GetGraphOptions(graph_id); +if (options == nullptr) { +GELOGW("graph level options is null."); +UpdateThreadContext(std::map{}); +} else { +UpdateThreadContext(*options); +} } bool InnerSession::IsGraphNeedRebuild(uint32_t graph_id) { - UpdateThreadContext(graph_id); - return graph_manager_.IsGraphNeedRebuild(graph_id); +UpdateThreadContext(graph_id); +return graph_manager_.IsGraphNeedRebuild(graph_id); } Status InnerSession::GetAllVariables(std::map &all_variables) { - return VarManager::Instance(session_id_)->GetAllVariables(all_variables); +return VarManager::Instance(session_id_)->GetAllVariables(all_variables); } Status InnerSession::GenCheckPointGraph(const std::map &all_variables, Graph &graph) { - return graph_manager_.GenCheckPointGraph(all_variables, graph); +return graph_manager_.GenCheckPointGraph(all_variables, graph); } Status InnerSession::SaveVariables(const Graph &graph, const std::vector &var_names, - const std::vector &outputs, std::vector &var_values) { - return graph_manager_.SaveVariables(graph, var_names, outputs, var_values); + const std::vector &outputs, std::vector &var_values) { +return graph_manager_.SaveVariables(graph, var_names, outputs, var_values); } Status InnerSession::AddDumpProperties(const DumpProperties &dump_properties) { - if (!is_dump_server_inited_) { - if (dump_properties.IsDumpOpen() || dump_properties.IsOpDebugOpen()) { - GE_IF_BOOL_EXEC(AdxDataDumpServerInit() != kDumpStatus, GELOGE(PARAM_INVALID, "Data dump server init failed"); - return PARAM_INVALID) - GELOGI("Init adx data dump server success"); - is_dump_server_inited_ = true; - } - } - PropertiesManager::Instance().AddDumpProperties(session_id_, dump_properties); - return SUCCESS; +if (!is_dump_server_inited_) { +if (dump_properties.IsDumpOpen() || dump_properties.IsOpDebugOpen()) { +GE_IF_BOOL_EXEC(AdxDataDumpServerInit() != kDumpStatus, GELOGE(PARAM_INVALID, "Data dump server init failed"); + return PARAM_INVALID) +GELOGI("Init adx data dump server success"); +is_dump_server_inited_ = true; +} +} +PropertiesManager::Instance().AddDumpProperties(session_id_, dump_properties); +return SUCCESS; } Status InnerSession::RemoveDumpProperties() { - PropertiesManager::Instance().RemoveDumpProperties(session_id_); - if (is_dump_server_inited_ && PropertiesManager::Instance().GetDumpPropertiesMap().empty()) { - GE_IF_BOOL_EXEC(AdxDataDumpServerUnInit() != kDumpStatus, GELOGE(PARAM_INVALID, "Data dump server uninit failed"); - return PARAM_INVALID) - GELOGI("UnInit adx data dump server success"); - is_dump_server_inited_ = false; - } - return SUCCESS; +PropertiesManager::Instance().RemoveDumpProperties(session_id_); +if (is_dump_server_inited_ && PropertiesManager::Instance().GetDumpPropertiesMap().empty()) { +GE_IF_BOOL_EXEC(AdxDataDumpServerUnInit() != kDumpStatus, GELOGE(PARAM_INVALID, "Data dump server uninit failed"); + return PARAM_INVALID) +GELOGI("UnInit adx data dump server success"); +is_dump_server_inited_ = false; +} +return SUCCESS; } void InnerSession::SetRtSocVersion() { diff --git a/inc/framework/common/ge_types.h b/inc/framework/common/ge_types.h index 91815fc2..52fdc9ed 100644 --- a/inc/framework/common/ge_types.h +++ b/inc/framework/common/ge_types.h @@ -1,18 +1,18 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +* Copyright 2019-2020 Huawei Technologies Co., Ltd +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ #ifndef INC_FRAMEWORK_COMMON_GE_TYPES_H_ #define INC_FRAMEWORK_COMMON_GE_TYPES_H_ @@ -29,30 +29,30 @@ namespace ge { enum RuntimeType { - HOST = 0, - DEVICE = 1 +HOST = 0, +DEVICE = 1 }; enum PerfLevel { - GEN_TASK_WITH_FUSION = -1, - GEN_TASK_WITHOUT_L2FUSION = 3, - GEN_TASK_WITHOUT_FUSION = 4 +GEN_TASK_WITH_FUSION = -1, +GEN_TASK_WITHOUT_L2FUSION = 3, +GEN_TASK_WITHOUT_FUSION = 4 }; enum FrameworkType { - CAFFE = 0, - MINDSPORE = 1, - TENSORFLOW = 3, - ANDROID_NN, - FRAMEWORK_RESERVED, +CAFFE = 0, +MINDSPORE = 1, +TENSORFLOW = 3, +ANDROID_NN, +FRAMEWORK_RESERVED, }; enum OpEngineType { - ENGINE_SYS = 0, // default engine - ENGINE_AICORE = 1, - ENGINE_VECTOR = 2, - ENGINE_AICUBE = 3, // not support - ENGINE_AIVECTOR = 4 // not support +ENGINE_SYS = 0, // default engine +ENGINE_AICORE = 1, +ENGINE_VECTOR = 2, +ENGINE_AICUBE = 3, // not support +ENGINE_AIVECTOR = 4 // not support }; enum InputAippType{ diff --git a/third_party/fwkacllib/inc/ops/nn_calculation_ops.h b/third_party/fwkacllib/inc/ops/nn_calculation_ops.h index bd361f5d..7c06637f 100644 --- a/third_party/fwkacllib/inc/ops/nn_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_calculation_ops.h @@ -725,14 +725,14 @@ REG_OP(Conv2D) | | int8 | int8 | int32 | int8 | int32 -----------|---------|---------|---------|----------|-------- |Format | NCHW | NCHW | ND | ND | NCHW - | | NHWC | NHWC | | | NHWC - | | | HWCN | | | + | | NHWC | HWCN | | | NHWC @endverbatim -* It should be noted that the data types must correspond to each other, but the -* format does not need to . \n - +* Type float32 is allowed only in mixed precision (float32->float16) scenarios. +* Mixed precision is enabled by default. +* \n +* *@par Attributes: -* @li strides: A list of 4 integers. Specifying the strides of the +*@li strides: Required. A list of 4 integers. Specifying the strides of the * convolution along the height and width. The dimension order is determined * by the data format of "x". By default the N and C dimensions are set to 1. * @li pads: A list of 4 integers. Specifying the top, bottom, left and right @@ -865,8 +865,8 @@ REG_OP(Conv2DCompress) * "HxW(filter)" indicates the filter size after dilation. *@par Quantization supported or not -* Yes - +*@li Yes +* *@par Third-party framework compatibility *@li Compatible with the TensorFlow operator "conv2d". *@li Compatible with the Caffe operator 2D "Convolution".