@@ -118,7 +118,6 @@ set(EXECUTOR_SRC_LIST | |||
"common/profiling/profiling_manager.cc" | |||
"executor/ge_executor.cc" | |||
"ge_local_engine/engine/host_cpu_engine.cc" | |||
"graph/build/memory/var_mem_assign_util.cc" | |||
"graph/execute/graph_execute.cc" | |||
"graph/execute/model_executor.cc" | |||
"graph/load/graph_loader.cc" | |||
@@ -155,10 +154,8 @@ set(EXECUTOR_SRC_LIST | |||
"graph/load/model_manager/zero_copy_offset.cc" | |||
"graph/load/model_manager/zero_copy_task.cc" | |||
"graph/manager/graph_caching_allocator.cc" | |||
"graph/manager/graph_manager_utils.cc" | |||
"graph/manager/graph_mem_allocator.cc" | |||
"graph/manager/graph_mem_manager.cc" | |||
"graph/manager/graph_var_manager.cc" | |||
"graph/manager/host_mem_allocator.cc" | |||
"graph/manager/host_mem_manager.cc" | |||
#"graph/manager/memory_api.cc" # Just for runner. | |||
@@ -278,7 +275,6 @@ set(COMPILER_SRC_LIST | |||
"graph/build/memory/hybrid_mem_assigner.cc" | |||
"graph/build/memory/max_block_mem_assigner.cc" | |||
"graph/build/memory/memory_assigner.cc" | |||
"graph/build/memory/var_mem_assign_util.cc" | |||
"graph/build/model_builder.cc" | |||
"graph/build/run_context.cc" | |||
"graph/build/stream_allocator.cc" | |||
@@ -289,20 +285,8 @@ set(COMPILER_SRC_LIST | |||
"graph/label/label_maker.cc" | |||
"graph/label/partitioned_call_label_maker.cc" | |||
"graph/label/while_label_maker.cc" | |||
"graph/load/model_manager/model_utils.cc" | |||
"graph/manager/graph_caching_allocator.cc" | |||
"graph/manager/graph_context.cc" | |||
"graph/manager/graph_manager.cc" | |||
"graph/manager/graph_manager_utils.cc" | |||
"graph/manager/graph_mem_allocator.cc" | |||
"graph/manager/graph_mem_manager.cc" | |||
"graph/manager/graph_var_manager.cc" | |||
"graph/manager/host_mem_allocator.cc" | |||
"graph/manager/host_mem_manager.cc" | |||
"graph/manager/rdma_pool_allocator.cc" | |||
"graph/manager/session_scope_mem_allocator.cc" | |||
"graph/manager/trans_var_data_utils.cc" | |||
"graph/manager/util/debug.cc" | |||
"graph/manager/util/rt_context_util.cc" | |||
"graph/manager/util/variable_accelerate_ctrl.cc" | |||
"graph/optimize/graph_optimize.cc" | |||
@@ -594,7 +578,6 @@ target_compile_definitions(ge_compiler PRIVATE | |||
PROTOBUF_INLINE_NOT_IN_HEADERS=0 | |||
REUSE_MEMORY=1 | |||
FMK_SUPPORT_DUMP | |||
FMK_HOST_INFER | |||
google=ascend_private | |||
FUNC_VISIBILITY | |||
$<$<STREQUAL:${ENABLE_OPEN_SRC},True>:ONLY_COMPILE_OPEN_SRC> | |||
@@ -213,16 +213,17 @@ Status GEFinalize() { | |||
ErrorManager::GetInstance().GenWorkStreamIdDefault(); | |||
GELOGT(TRACE_INIT, "GEFinalize start"); | |||
GELOGI("SessionManager finalization."); | |||
if (g_session_manager != nullptr) { | |||
(void)g_session_manager->Finalize(); // always success. | |||
} | |||
// call Finalize | |||
Status ret = SUCCESS; | |||
Status middle_ret; | |||
GELOGT(TRACE_RUNNING, "Finalizing environment"); | |||
std::shared_ptr<GELib> instancePtr = ge::GELib::GetInstance(); | |||
if (instancePtr == nullptr || !instancePtr->InitFlag()) { | |||
GELOGW("GEFinalize Failed: GE not initialized."); | |||
ret = GE_CLI_GE_NOT_INITIALIZED; | |||
} | |||
if (ret != GE_CLI_GE_NOT_INITIALIZED) { | |||
std::shared_ptr<GELib> instancePtr = GELib::GetInstance(); | |||
if (instancePtr != nullptr) { | |||
middle_ret = instancePtr->Finalize(); | |||
GELOGI("GEFinalize finalize gelib ret=%u", middle_ret); | |||
if (middle_ret != SUCCESS) { | |||
@@ -230,11 +231,6 @@ Status GEFinalize() { | |||
} | |||
} | |||
GELOGI("SessionManager finalization."); | |||
if (g_session_manager != nullptr) { | |||
(void)g_session_manager->Finalize(); // always success. | |||
} | |||
middle_ret = TBEPluginManager::Instance().Finalize(); | |||
if (middle_ret != SUCCESS) { | |||
ret = middle_ret; | |||
@@ -50,6 +50,9 @@ set(SRC_LIST | |||
"${GE_CODE_DIR}/ge/common/transop_util.cc" | |||
"${GE_CODE_DIR}/ge/common/types.cc" | |||
"${GE_CODE_DIR}/ge/common/util.cc" | |||
"${GE_CODE_DIR}/ge/graph/manager/graph_var_manager.cc" | |||
"${GE_CODE_DIR}/ge/graph/manager/graph_manager_utils.cc" | |||
"${GE_CODE_DIR}/ge/graph/build/memory/var_mem_assign_util.cc" | |||
) | |||
if (NOT ENABLE_D AND NOT ENABLE_ACL) | |||
@@ -0,0 +1,41 @@ | |||
/** | |||
* Copyright 2021 Huawei Technologies Co., Ltd | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
* You may obtain a copy of the License at | |||
* | |||
* http://www.apache.org/licenses/LICENSE-2.0 | |||
* | |||
* Unless required by applicable law or agreed to in writing, software | |||
* distributed under the License is distributed on an "AS IS" BASIS, | |||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
* See the License for the specific language governing permissions and | |||
* limitations under the License. | |||
*/ | |||
#ifndef GE_GRAPH_COMMON_MEM_MANAGER_H_ | |||
#define GE_GRAPH_COMMON_MEM_MANAGER_H_ | |||
#include <string> | |||
#include "external/ge/ge_api_types.h" | |||
#include "runtime/mem.h" | |||
namespace ge { | |||
class MemoryManager { | |||
public: | |||
virtual uint8_t *MallocMemory(rtMemType_t memory_type, const std::string &purpose, const std::string &memory_key, | |||
size_t memory_size, uint32_t device_id) = 0; | |||
virtual Status FreeMemory(rtMemType_t memory_type, const std::string &memory_key, uint32_t device_id) = 0; | |||
virtual uint8_t *GetMemoryBase(rtMemType_t memory_type, const std::string &memory_key, uint32_t device_id) = 0; | |||
virtual uint8_t *GetMemoryAddr(rtMemType_t memory_type, const std::string &memory_key, uint32_t device_id) = 0; | |||
virtual uint8_t *GetPoolMemory(rtMemType_t memory_type, size_t memory_size, uint32_t device_id) = 0; | |||
}; | |||
} // namespace ge | |||
#endif // GE_GRAPH_COMMON_MEM_MANAGER_H_ |
@@ -32,7 +32,6 @@ | |||
#include "graph/ge_attr_value.h" | |||
#include "graph/ge_context.h" | |||
#include "external/graph/ge_error_codes.h" | |||
#include "graph/manager/graph_var_manager.h" | |||
#include "graph/optimize/common/params.h" | |||
#include "external/graph/types.h" | |||
#include "graph/utils/attr_utils.h" | |||
@@ -21,6 +21,8 @@ | |||
#include "common/ge_call_wrapper.h" | |||
#include "common/local_context.h" | |||
#include "graph/manager/graph_var_manager.h" | |||
#include "graph/manager/graph_mem_manager.h" | |||
#include "graph/manager/host_mem_manager.h" | |||
#include "graph/utils/tensor_adapter.h" | |||
#include "graph/load/graph_loader.h" | |||
#include "graph/load/model_manager/model_manager.h" | |||
@@ -54,6 +56,26 @@ Status ModelExecutor::Initialize(const map<string, string> &options, uint64_t se | |||
return status; | |||
} | |||
GE_CHK_STATUS_RET(HostMemManager::Instance().Initialize()); | |||
const std::vector<rtMemType_t> mem_type({RT_MEMORY_HBM, RT_MEMORY_P2P_DDR}); | |||
status = MemManager::Instance().Initialize(mem_type); | |||
if (status != SUCCESS) { | |||
GELOGE(status, "[Init][MemManager] MemoryAllocatorManager initialize failed."); | |||
REPORT_CALL_ERROR("E19999", "MemManager initialize failed."); | |||
return status; | |||
} | |||
VarManager::Instance(session_id)->SetMemManager(&MemManager::Instance()); | |||
size_t total_mem_size = 0; | |||
GE_CHK_STATUS_RET_NOLOG(GetTotalMemorySize(total_mem_size)); | |||
status = VarManager::Instance(session_id)->SetMemoryMallocSize(options, total_mem_size); | |||
if (status != SUCCESS) { | |||
GELOGE(status, "[Set][MemoryMallocSize] failed."); | |||
REPORT_CALL_ERROR("E19999", "VarManager SetMemoryMallocSize failed, InnerSession:%lu.", session_id_); | |||
return status; | |||
} | |||
session_id_ = session_id; | |||
train_graph_flag_ = ParseTrainGraphFlag(); | |||
thread_run_flag_.store(true); | |||
@@ -83,10 +105,40 @@ Status ModelExecutor::Finalize() { | |||
GELOGW("Graph executor FreeExecuteMemory failed, resources may not be released correctly."); | |||
} | |||
GELOGI("VarManager free var memory."); | |||
(void)VarManager::Instance(session_id_)->FreeVarMemory(); | |||
MemManager::Instance().FreeSessionMemory(session_id_); | |||
HostMemManager::Instance().Finalize(); | |||
ModelManager::GetInstance()->DestroyAicpuSession(session_id_); | |||
return SUCCESS; | |||
} | |||
Status ModelExecutor::GetTotalMemorySize(size_t &total_mem_size) { | |||
rtError_t rt_ret = rtSetDevice(GetContext().DeviceId()); | |||
if (rt_ret != RT_ERROR_NONE) { | |||
REPORT_CALL_ERROR("E19999", "Call rtSetDevice failed, device_id:%u, ret:0x%X", | |||
GetContext().DeviceId(), rt_ret); | |||
GELOGE(RT_FAILED, "[Call][RtSetDevice] failed, device_id:%u, ret:0x%X", GetContext().DeviceId(), rt_ret); | |||
return RT_FAILED; | |||
} | |||
size_t free_mem = 0; | |||
rt_ret = rtMemGetInfoEx(RT_MEMORYINFO_HBM, &free_mem, &total_mem_size); | |||
if (rt_ret != RT_ERROR_NONE) { | |||
REPORT_CALL_ERROR("E19999", "Call rtMemGetInfo failed, ret:0x%X", rt_ret); | |||
GELOGE(RT_FAILED, "[Call][RtMemGetInfo] failed, ret:0x%X", rt_ret); | |||
return RT_FAILED; | |||
} | |||
rt_ret = rtDeviceReset(GetContext().DeviceId()); | |||
if (rt_ret != RT_ERROR_NONE) { | |||
REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u, ret:0x%X", | |||
GetContext().DeviceId(), rt_ret); | |||
GELOGE(RT_FAILED, "[Call][RtDeviceReset] failed, device_id:%u, ret:0x%X", GetContext().DeviceId(), rt_ret); | |||
return RT_FAILED; | |||
} | |||
return SUCCESS; | |||
} | |||
// OPTION_GRAPH_RUN_MODE is supposed to be a session-level option, but it used to be set to global-level in the past. | |||
// If can not parse from session, it can parse from global by GetContext(). | |||
bool ModelExecutor::ParseTrainGraphFlag() { | |||
@@ -92,6 +92,7 @@ class ModelExecutor : public Executor { | |||
private: | |||
bool ParseTrainGraphFlag(); | |||
Status GetTotalMemorySize(size_t &total_mem_size); | |||
void AddGraphNode(GraphId graph_id, const GraphNodePtr &graph_node); | |||
void RemoveGraphNode(GraphId graph_id); | |||
@@ -69,6 +69,37 @@ Status MemManager::Initialize(const std::vector<rtMemType_t> &memory_type) { | |||
return SUCCESS; | |||
} | |||
uint8_t *MemManager::MallocMemory(rtMemType_t memory_type, const std::string &purpose, const std::string &memory_key, | |||
size_t memory_size, uint32_t device_id) { | |||
return MemManager::Instance().MemInstance(memory_type).MallocMemory(purpose, memory_key, memory_size, device_id); | |||
} | |||
Status MemManager::FreeMemory(rtMemType_t memory_type, const std::string &memory_key, uint32_t device_id) { | |||
return MemManager::Instance().MemInstance(memory_type).FreeMemory(memory_key, device_id); | |||
} | |||
uint8_t *MemManager::GetMemoryBase(rtMemType_t memory_type, const std::string &memory_key, uint32_t device_id) { | |||
if (memory_type == RT_MEMORY_RDMA_HBM) { | |||
return MemManager::Instance().RdmaPoolInstance(RT_MEMORY_HBM).GetRdmaBaseAddr(); | |||
} | |||
return MemManager::Instance().MemInstance(memory_type).GetMemoryAddr(memory_key, device_id); | |||
} | |||
uint8_t *MemManager::GetMemoryAddr(rtMemType_t memory_type, const std::string &memory_key, uint32_t device_id) { | |||
return MemManager::Instance().MemInstance(memory_type).GetMemoryAddr(memory_key, device_id); | |||
} | |||
uint8_t *MemManager::GetPoolMemory(rtMemType_t memory_type, size_t memory_size, uint32_t device_id) { | |||
return MemManager::Instance().RdmaPoolInstance(memory_type).Malloc(memory_size, device_id); | |||
} | |||
void MemManager::FreeSessionMemory(uint64_t session_id, uint32_t device_id) { | |||
for (auto memory_type : memory_type_) { | |||
(void)SessionScopeMemInstance(memory_type).Free(session_id, device_id); | |||
} | |||
} | |||
template <typename T> | |||
void FinalizeAllocatorMap(std::map<rtMemType_t, T *> &allocate_map) { | |||
for (auto &allocator : allocate_map) { | |||
@@ -34,22 +34,34 @@ | |||
#include "graph/manager/session_scope_mem_allocator.h" | |||
#include "graph/node.h" | |||
#include "runtime/mem.h" | |||
#include "common/mem_manager.h" | |||
namespace ge { | |||
using MemoryAllocatorPtr = std::shared_ptr<MemoryAllocator>; | |||
class MemManager { | |||
class MemManager : public MemoryManager { | |||
public: | |||
MemManager(); | |||
virtual ~MemManager(); | |||
static MemManager &Instance(); | |||
uint8_t *MallocMemory(rtMemType_t memory_type, const std::string &purpose, const std::string &memory_key, | |||
size_t memory_size, uint32_t device_id); | |||
Status FreeMemory(rtMemType_t memory_type, const std::string &memory_key, uint32_t device_id); | |||
uint8_t *GetMemoryBase(rtMemType_t memory_type, const std::string &memory_key, uint32_t device_id); | |||
uint8_t *GetMemoryAddr(rtMemType_t memory_type, const std::string &memory_key, uint32_t device_id); | |||
uint8_t *GetPoolMemory(rtMemType_t memory_type, size_t memory_size, uint32_t device_id); | |||
void FreeSessionMemory(uint64_t session_id, uint32_t device_id = 0); | |||
MemoryAllocator &MemInstance(rtMemType_t memory_type); | |||
CachingAllocator &CachingInstance(rtMemType_t memory_type); | |||
RdmaPoolAllocator &RdmaPoolInstance(rtMemType_t memory_type); | |||
HostMemAllocator &HostMemInstance(rtMemType_t memory_type); | |||
SessionScopeMemAllocator &SessionScopeMemInstance(rtMemType_t memory_type); | |||
MemManager(const MemManager &) = delete; | |||
MemManager &operator=(const MemManager &) = delete; | |||
/// | |||
/// @ingroup ge_graph | |||
/// @brief memory allocator manager init | |||
@@ -65,9 +77,10 @@ class MemManager { | |||
/// | |||
void Finalize() noexcept; | |||
const std::vector<rtMemType_t> &GetAllMemoryType() const { return memory_type_; } | |||
private: | |||
MemManager(const MemManager &) = delete; | |||
MemManager &operator=(const MemManager &) = delete; | |||
/// | |||
/// @ingroup ge_graph | |||
/// @param [in] memory_type memory type | |||
@@ -17,7 +17,6 @@ | |||
#include "graph/manager/graph_var_manager.h" | |||
#include "graph/debug/ge_attr_define.h" | |||
#include "graph/manager/graph_mem_manager.h" | |||
#include "graph/manager/trans_var_data_utils.h" | |||
#include "graph/utils/type_utils.h" | |||
#include "graph/ge_context.h" | |||
@@ -291,7 +290,7 @@ Status HbmMemResource::AssignVarMem(const std::string &var_name, uint64_t size, | |||
} | |||
Status RdmaMemResource::AssignVarMem(const std::string &var_name, uint64_t size, uint64_t session_id, size_t &address) { | |||
uint8_t *buffer = MemManager::Instance().RdmaPoolInstance(RT_MEMORY_HBM).Malloc(size); | |||
uint8_t *buffer = VarManager::Instance(session_id)->GetPoolMemory(RT_MEMORY_HBM, size); | |||
if (buffer == nullptr) { | |||
REPORT_CALL_ERROR("E19999", "malloc rdma memory fail, var_size:%lu, var_name:%s", | |||
size, var_name.c_str()); | |||
@@ -339,8 +338,7 @@ void VarManager::Destory() { | |||
mem_resource_map_.clear(); | |||
} | |||
ge::Status VarManager::Init(const uint32_t &version, const uint64_t &session_id, const uint32_t &device_id, | |||
const uint64_t &job_id) { | |||
Status VarManager::Init(uint32_t version, uint64_t session_id, uint32_t device_id, uint64_t job_id) { | |||
std::lock_guard<std::recursive_mutex> lock(mutex_); | |||
GELOGI("VarManager::Init, session id = %lu.", session_id); | |||
if (var_resource_ == nullptr) { | |||
@@ -389,21 +387,6 @@ ge::Status VarManager::SetVarAddr(const std::string &var_name, const ge::GeTenso | |||
return ge::SUCCESS; | |||
} | |||
ge::Status VarManager::SaveVarAddr(const std::string &var_name, const ge::GeTensorDesc &tensor_desc, uint8_t *address, | |||
rtMemType_t memory_type) { | |||
GELOGI("VarManager::SaveVarAddr var_name = %s, data_type = %s, data_format = %s.", var_name.c_str(), | |||
ge::TypeUtils::DataTypeToSerialString(tensor_desc.GetDataType()).c_str(), | |||
ge::TypeUtils::FormatToSerialString(tensor_desc.GetFormat()).c_str()); | |||
std::lock_guard<std::recursive_mutex> lock(mutex_); | |||
if (var_resource_ == nullptr) { | |||
GELOGW("VarManager has not been init."); | |||
return ge::INTERNAL_ERROR; | |||
} | |||
var_resource_->SaveVarAddr(var_name, tensor_desc, address, memory_type); | |||
return ge::SUCCESS; | |||
} | |||
ge::Status VarManager::GetVarAddr(const std::string &var_name, const ge::GeTensorDesc &tensor_desc, uint8_t **dev_ptr, | |||
rtMemType_t &memory_type) { | |||
std::lock_guard<std::recursive_mutex> lock(mutex_); | |||
@@ -637,8 +620,19 @@ rtMemType_t VarManager::GetVarMemType(const int64_t &offset) { | |||
return var_resource_->GetVarMemType(offset); | |||
} | |||
void VarManager::SetMemManager(MemoryManager *mem_manager) { | |||
// Better use shared_ptr instead, reconsitution later. | |||
GELOGI("Set MemManager to VarManager."); | |||
mem_manager_ = mem_manager; | |||
} | |||
ge::Status VarManager::MallocVarMemory(size_t memory_size) { | |||
std::lock_guard<std::recursive_mutex> lock(mutex_); | |||
if (mem_manager_ == nullptr) { | |||
GELOGE(FAILED, "MemManager has not been init."); | |||
REPORT_INNER_ERROR("E19999", "MemManager has not been init, session_id: %lu", session_id_); | |||
return FAILED; | |||
} | |||
uint8_t *var_mem_base = nullptr; | |||
string memory_key = std::to_string(session_id_); | |||
@@ -649,7 +643,7 @@ ge::Status VarManager::MallocVarMemory(size_t memory_size) { | |||
var_memory_size = (var_memory_size + kSessionMemAlignSize - 1) / kSessionMemAlignSize * kSessionMemAlignSize; | |||
const string purpose("variables and constant op memory in training network."); | |||
var_mem_base = MemManager::Instance().MemInstance(RT_MEMORY_HBM).MallocMemory(purpose, memory_key, var_memory_size); | |||
var_mem_base = mem_manager_->MallocMemory(RT_MEMORY_HBM, purpose, memory_key, var_memory_size, device_id_); | |||
if (var_mem_base == nullptr) { | |||
GELOGE(ge::INTERNAL_ERROR, "[Malloc][VarMemory] failed, size:%zu, session_id:%s", | |||
var_memory_size, memory_key.c_str()); | |||
@@ -660,20 +654,29 @@ ge::Status VarManager::MallocVarMemory(size_t memory_size) { | |||
uint8_t *VarManager::GetVarMemoryBase(rtMemType_t memory_type) { | |||
std::lock_guard<std::recursive_mutex> lock(mutex_); | |||
if (memory_type == RT_MEMORY_RDMA_HBM) { | |||
return MemManager::Instance().RdmaPoolInstance(RT_MEMORY_HBM).GetRdmaBaseAddr(); | |||
if (mem_manager_ == nullptr) { | |||
GELOGE(FAILED, "MemManager has not been init."); | |||
REPORT_INNER_ERROR("E19999", "MemManager has not been init, session_id: %lu", session_id_); | |||
return nullptr; | |||
} | |||
string memory_key = std::to_string(session_id_); | |||
return MemManager::Instance().MemInstance(memory_type).GetMemoryAddr(memory_key); | |||
return mem_manager_->GetMemoryBase(memory_type, memory_key, device_id_); | |||
} | |||
uint8_t *VarManager::GetVarMemoryAddr(uint8_t *logic_addr, rtMemType_t memory_type) { | |||
std::lock_guard<std::recursive_mutex> lock(mutex_); | |||
if (mem_manager_ == nullptr) { | |||
GELOGE(FAILED, "MemManager has not been init."); | |||
REPORT_INNER_ERROR("E19999", "MemManager has not been init, session_id: %lu", session_id_); | |||
return nullptr; | |||
} | |||
if (memory_type == RT_MEMORY_RDMA_HBM) { | |||
return logic_addr; | |||
} | |||
string mem_key = std::to_string(session_id_); | |||
uint8_t *mem_base = MemManager::Instance().MemInstance(memory_type).GetMemoryAddr(mem_key); | |||
uint8_t *mem_base = mem_manager_->GetMemoryAddr(memory_type, mem_key, device_id_); | |||
if (mem_base == nullptr) { | |||
return nullptr; | |||
} | |||
@@ -684,8 +687,25 @@ uint8_t *VarManager::GetVarMemoryAddr(uint8_t *logic_addr, rtMemType_t memory_ty | |||
ge::Status VarManager::FreeVarMemory() { | |||
std::lock_guard<std::recursive_mutex> lock(mutex_); | |||
if (mem_manager_ == nullptr) { | |||
GELOGE(FAILED, "MemManager has not been init."); | |||
REPORT_INNER_ERROR("E19999", "MemManager has not been init, session_id: %lu", session_id_); | |||
return FAILED; | |||
} | |||
string memory_key = std::to_string(SessionId()); | |||
return MemManager::Instance().MemInstance(RT_MEMORY_HBM).FreeMemory(memory_key); | |||
return mem_manager_->FreeMemory(RT_MEMORY_HBM, memory_key, device_id_); | |||
} | |||
uint8_t *VarManager::GetPoolMemory(rtMemType_t memory_type, size_t mem_size) { | |||
std::lock_guard<std::recursive_mutex> lock(mutex_); | |||
if (mem_manager_ == nullptr) { | |||
GELOGE(FAILED, "MemManager has not been init."); | |||
REPORT_INNER_ERROR("E19999", "MemManager has not been init, session_id: %lu", session_id_); | |||
return nullptr; | |||
} | |||
return mem_manager_->GetPoolMemory(memory_type, mem_size, device_id_); | |||
} | |||
ge::Status VarManager::SetTransRoad(const std::string &var_name, const VarTransRoad &trans_road) { | |||
@@ -724,34 +744,7 @@ Status VarManager::GetChangedGraphId(const std::string &var_name, uint32_t &grap | |||
return var_resource_->GetChangedGraphId(var_name, graph_id); | |||
} | |||
Status VarManager::GetTotalMemorySize(size_t &total_mem_size) { | |||
rtError_t rt_ret = rtSetDevice(GetContext().DeviceId()); | |||
if (rt_ret != RT_ERROR_NONE) { | |||
REPORT_CALL_ERROR("E19999", "Call rtSetDevice failed, device_id:%u, ret:0x%X", | |||
GetContext().DeviceId(), rt_ret); | |||
GELOGE(RT_FAILED, "[Call][RtSetDevice] failed, device_id:%u, ret:0x%X", GetContext().DeviceId(), rt_ret); | |||
return RT_FAILED; | |||
} | |||
size_t free_mem = 0; | |||
rt_ret = rtMemGetInfoEx(RT_MEMORYINFO_HBM, &free_mem, &total_mem_size); | |||
if (rt_ret != RT_ERROR_NONE) { | |||
REPORT_CALL_ERROR("E19999", "Call rtMemGetInfo failed, ret:0x%X", rt_ret); | |||
GELOGE(RT_FAILED, "[Call][RtMemGetInfo] failed, ret:0x%X", rt_ret); | |||
return RT_FAILED; | |||
} | |||
rt_ret = rtDeviceReset(GetContext().DeviceId()); | |||
if (rt_ret != RT_ERROR_NONE) { | |||
REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u, ret:0x%X", | |||
GetContext().DeviceId(), rt_ret); | |||
GELOGE(RT_FAILED, "[Call][RtDeviceReset] failed, device_id:%u, ret:0x%X", GetContext().DeviceId(), rt_ret); | |||
return RT_FAILED; | |||
} | |||
return SUCCESS; | |||
} | |||
Status VarManager::SetMemoryMallocSize(const map<string, string> &options) { | |||
size_t total_mem_size = 0; | |||
GE_CHK_STATUS_RET_NOLOG(VarManager::GetTotalMemorySize(total_mem_size)); | |||
Status VarManager::SetMemoryMallocSize(const map<string, string> &options, size_t total_mem_size) { | |||
GEEVENT("Total memory size is %zu", total_mem_size); | |||
graph_mem_max_size_ = floor(total_mem_size * kGraphMemoryManagerMallocRatio); | |||
@@ -32,6 +32,7 @@ | |||
#include "graph/op_desc.h" | |||
#include "external/graph/tensor.h" | |||
#include "runtime/mem.h" | |||
#include "common/mem_manager.h" | |||
namespace ge { | |||
const size_t kGraphMemoryManagerMallocMaxSize = 26UL * 1024UL * 1024UL * 1024UL; | |||
@@ -201,39 +202,37 @@ class RdmaMemResource : public MemResource { | |||
Status AssignVarMem(const std::string &var_name, uint64_t size, uint64_t session_id, size_t &address) override; | |||
}; | |||
class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY VarManager { | |||
class VarManager { | |||
public: | |||
static VarManager *Instance(uint64_t session_id); | |||
explicit VarManager(uint64_t session_id); | |||
~VarManager() = default; | |||
ge::Status Init(const uint32_t &version, const uint64_t &session_id, const uint32_t &device_id, | |||
const uint64_t &job_id); | |||
Status Init(uint32_t version, uint64_t session_id, uint32_t device_id, uint64_t job_id); | |||
void Destory(); | |||
void SetMemManager(MemoryManager *mem_manager); | |||
ge::Status AssignVarMem(const std::string &var_name, const ge::GeTensorDesc &tensor_desc, rtMemType_t memory_type); | |||
void Destory(); | |||
ge::Status SetVarAddr(const std::string &var_name, const ge::GeTensorDesc &tensor_desc, uint8_t *dev_ptr, | |||
rtMemType_t memory_type); | |||
Status AssignVarMem(const std::string &var_name, const GeTensorDesc &tensor_desc, rtMemType_t memory_type); | |||
ge::Status SaveVarAddr(const std::string &var_name, const ge::GeTensorDesc &tensor_desc, uint8_t *address, | |||
rtMemType_t memory_type); | |||
Status SetVarAddr(const std::string &var_name, const GeTensorDesc &tensor_desc, uint8_t *dev_ptr, | |||
rtMemType_t memory_type); | |||
ge::Status GetVarAddr(const std::string &var_name, const ge::GeTensorDesc &tensor_desc, uint8_t **dev_ptr, | |||
rtMemType_t &memory_type); | |||
Status GetVarAddr(const std::string &var_name, const GeTensorDesc &tensor_desc, uint8_t **dev_ptr, | |||
rtMemType_t &memory_type); | |||
ge::Status GetVarAddr(const std::string &var_name, const ge::GeTensorDesc &tensor_desc, uint8_t **dev_ptr); | |||
Status GetVarAddr(const std::string &var_name, const GeTensorDesc &tensor_desc, uint8_t **dev_ptr); | |||
ge::Status SaveBroadCastInfo(uint32_t graph_id, const VarBroadCastInfo &broad_cast_info); | |||
Status SaveBroadCastInfo(uint32_t graph_id, const VarBroadCastInfo &broad_cast_info); | |||
ge::Status GetCurVarDesc(const std::string &var_name, ge::GeTensorDesc &tensor_desc); | |||
Status GetCurVarDesc(const std::string &var_name, GeTensorDesc &tensor_desc); | |||
ge::Status RenewCurVarDesc(const std::string &var_name, ge::OpDescPtr op_desc); | |||
Status RenewCurVarDesc(const std::string &var_name, OpDescPtr op_desc); | |||
ge::Status MallocVarMemory(size_t memory_size = kMemoryVarManagerMallocSize); | |||
Status MallocVarMemory(size_t memory_size = kMemoryVarManagerMallocSize); | |||
ge::Status FreeVarMemory(); | |||
Status FreeVarMemory(); | |||
Status SetTransRoad(const std::string &var_name, const VarTransRoad &trans_road); | |||
@@ -243,7 +242,7 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY VarManager { | |||
Status GetChangedGraphId(const std::string &var_name, uint32_t &graph_id); | |||
Status SetMemoryMallocSize(const std::map<string, string> &options); | |||
Status SetMemoryMallocSize(const std::map<string, string> &options, size_t total_mem_size); | |||
const size_t &GetGraphMemoryMaxSize() const { return graph_mem_max_size_; } | |||
@@ -281,6 +280,8 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY VarManager { | |||
uint8_t *GetVarMemoryAddr(uint8_t *logic_addr, rtMemType_t memory_type); | |||
uint8_t *GetPoolMemory(rtMemType_t memory_type, size_t mem_size); | |||
Status GetAllVariables(std::map<std::string, GeTensorDesc> &all_variables); | |||
private: | |||
@@ -295,6 +296,7 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY VarManager { | |||
std::unique_ptr<ge::VarResource> var_resource_; | |||
map<rtMemType_t, MemResource *> mem_resource_map_; | |||
mutable std::recursive_mutex mutex_; | |||
MemoryManager *mem_manager_{nullptr}; | |||
Status ParseMemoryMallocSize(std::string &memory_size, size_t &my_size); | |||
Status GetTotalMemorySize(size_t &total_mem_size); | |||
@@ -37,8 +37,6 @@ | |||
#include "common/ge_call_wrapper.h" | |||
#include "graph/ge_context.h" | |||
#include "graph/ge_global_options.h" | |||
#include "graph/manager/graph_mem_manager.h" | |||
#include "graph/manager/host_mem_manager.h" | |||
#include "graph/manager/graph_var_manager.h" | |||
#include "runtime/kernel.h" | |||
#include "opskernel_manager/ops_kernel_builder_manager.h" | |||
@@ -342,18 +340,6 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status GELib::InitSystemWithOpt | |||
GELOGW("System init with options is already inited and not shutdown."); | |||
return SUCCESS); | |||
std::vector<rtMemType_t> mem_type; | |||
mem_type.push_back(RT_MEMORY_HBM); | |||
mem_type.push_back(RT_MEMORY_P2P_DDR); | |||
Status initMmStatus = MemManager::Instance().Initialize(mem_type); | |||
if (initMmStatus != SUCCESS) { | |||
GELOGE(initMmStatus, "[Init][MemManager] MemoryAllocatorManager initialize failed."); | |||
REPORT_CALL_ERROR("E19999", "MemManager initialize failed."); | |||
return initMmStatus; | |||
} | |||
GE_CHK_STATUS_RET(HostMemManager::Instance().Initialize()); | |||
// set device id | |||
GELOGI("set logical device id:%u", options.device_id); | |||
GetContext().SetCtxDeviceId(static_cast<uint32_t>(options.device_id)); | |||
@@ -390,17 +376,6 @@ Status GELib::SystemShutdownWithOptions(const Options &options) { | |||
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status GELib::InitSystemWithoutOptions() { | |||
GELOGI("Inference Init GELib begin."); | |||
std::vector<rtMemType_t> mem_type; | |||
mem_type.push_back(RT_MEMORY_HBM); | |||
mem_type.push_back(RT_MEMORY_P2P_DDR); | |||
Status initMmStatus = MemManager::Instance().Initialize(mem_type); | |||
if (initMmStatus != SUCCESS) { | |||
GELOGE(initMmStatus, "[Init][MemoryManager] initialize failed."); | |||
REPORT_CALL_ERROR("E19999", "MemManager initialize failed."); | |||
return initMmStatus; | |||
} | |||
GE_CHK_STATUS_RET(HostMemManager::Instance().Initialize()); | |||
static bool is_inited = false; | |||
if (is_inited) { | |||
GELOGW("System init without options is already inited, don't need to init again."); | |||
@@ -451,12 +426,6 @@ Status GELib::Finalize() { | |||
GELOGI("VarManagerPool finalization."); | |||
VarManagerPool::Instance().Destory(); | |||
GELOGI("MemManager finalization."); | |||
MemManager::Instance().Finalize(); | |||
GELOGI("HostMemManager finalization."); | |||
HostMemManager::Instance().Finalize(); | |||
GELOGI("HostCpuEngine finalization."); | |||
HostCpuEngine::GetInstance().Finalize(); | |||
@@ -513,8 +482,7 @@ void GELib::RollbackInit() { | |||
if (opsManager_.init_flag_) { | |||
(void)opsManager_.Finalize(); | |||
} | |||
MemManager::Instance().Finalize(); | |||
HostMemManager::Instance().Finalize(); | |||
VarManagerPool::Instance().Destory(); | |||
} | |||
@@ -31,7 +31,6 @@ | |||
#include "graph/ge_local_context.h" | |||
#include "common/local_context.h" | |||
#include "graph/manager/graph_var_manager.h" | |||
#include "graph/manager/graph_mem_manager.h" | |||
#include "graph/utils/tensor_adapter.h" | |||
#include "runtime/mem.h" | |||
#include "ir_build/option_utils.h" | |||
@@ -132,16 +131,6 @@ Status InnerSession::Initialize() { | |||
return ret; | |||
} | |||
ret = VarManager::Instance(session_id_)->SetMemoryMallocSize(all_options); | |||
if (ret != SUCCESS) { | |||
GELOGE(ret, "[Set][MemoryMallocSize] failed."); | |||
REPORT_CALL_ERROR("E19999", "VarManager SetMemoryMallocSize failed, InnerSession:%lu.", session_id_); | |||
(void)InnerFinalize(); | |||
GE_CHK_STATUS(RemoveDumpProperties(), "[Remove][DumpProperties] failed."); | |||
GE_CHK_RT(rtDeviceReset(static_cast<int32_t>(GetContext().DeviceId()))); | |||
return ret; | |||
} | |||
int32_t version = static_cast<int32_t>(SessionVersion::ClOUD_VERSION); | |||
const int DEFAULT_DEVICE_ID = 0; | |||
const int DEFAULT_JOB_ID = 0; | |||
@@ -170,13 +159,6 @@ Status InnerSession::Finalize() { | |||
} | |||
init_flag_ = false; | |||
// release var memory | |||
GELOGI("VarManager free var memory."); | |||
(void)VarManager::Instance(session_id_)->FreeVarMemory(); | |||
for (auto memory_type : MemManager::Instance().GetAllMemoryType()) { | |||
(void)MemManager::Instance().SessionScopeMemInstance(memory_type).Free(session_id_); | |||
} | |||
// release analyzer saved info(Session Level) | |||
Analyzer::GetInstance()->DestroySessionJsonObject(session_id_); | |||
@@ -530,7 +530,7 @@ set(DISTINCT_GRAPH_LOAD_TEST_FILES | |||
"graph/load/memcpy_addr_async_task_info_unittest.cc" | |||
"graph/load/memcpy_async_task_info_unittest.cc" | |||
"graph/load/cpu_queue_schedule_unittest.cc" | |||
"graph/ge_executor_unittest.cc" | |||
"executor/ge_executor_unittest.cc" | |||
"graph/load/model_helper_unittest.cc" | |||
"graph/load/model_utils_unittest.cc" | |||
) | |||
@@ -702,10 +702,6 @@ set(GENERATOR_TEST_FILES | |||
"generator/ge_generator_unittest.cc" | |||
) | |||
set(EXECUTOR_TEST_FILES | |||
"executor/ge_executor_unittest.cc" | |||
) | |||
set(SINGLE_OP_TEST_FILES | |||
"single_op/single_op_model_unittest.cc" | |||
"single_op/single_op_manager_unittest.cc" | |||
@@ -1008,7 +1004,6 @@ target_link_libraries(ut_libge_kernel_utest | |||
add_executable(ut_libge_distinct_load_utest | |||
${COMMON_TEST_FILES} | |||
${GENERATOR_TEST_FILES} | |||
${EXECUTOR_TEST_FILES} | |||
${DISTINCT_GRAPH_LOAD_TEST_FILES} | |||
${SINGLE_OP_TEST_FILES} | |||
${PROFILING_MNG_TEST_FILES} | |||
@@ -15,22 +15,103 @@ | |||
*/ | |||
#include <gtest/gtest.h> | |||
#include <memory> | |||
#include "common/ge_inner_error_codes.h" | |||
#include "common/types.h" | |||
#include "common/util.h" | |||
#include "runtime/mem.h" | |||
#include "common/util.h" | |||
#include "omg/omg_inner_types.h" | |||
#define private public | |||
#define protected public | |||
#include "executor/ge_executor.h" | |||
#include "graph/utils/tensor_utils.h" | |||
using namespace std; | |||
#include "common/auth/file_saver.h" | |||
#include "common/debug/log.h" | |||
#include "common/properties_manager.h" | |||
#include "common/types.h" | |||
#include "graph/load/graph_loader.h" | |||
#include "graph/load/model_manager/davinci_model.h" | |||
#include "hybrid/hybrid_davinci_model.h" | |||
#include "graph/load/model_manager/model_manager.h" | |||
#include "graph/load/model_manager/task_info/kernel_task_info.h" | |||
#include "graph/load/model_manager/task_info/kernel_ex_task_info.h" | |||
#include "graph/execute/graph_execute.h" | |||
#include "ge/common/dump/dump_properties.h" | |||
#include "graph/manager/graph_mem_allocator.h" | |||
#include "graph/utils/graph_utils.h" | |||
#include "proto/ge_ir.pb.h" | |||
#include "graph/manager/graph_var_manager.h" | |||
#undef private | |||
#undef protected | |||
using namespace std; | |||
namespace ge { | |||
class UtestGeExecutor : public testing::Test { | |||
protected: | |||
void SetUp() {} | |||
static void InitModelDefault(ge::Model &model) { | |||
ge::AttrUtils::SetInt(&model, ATTR_MODEL_MEMORY_SIZE, 0); | |||
ge::AttrUtils::SetInt(&model, ATTR_MODEL_WEIGHT_SIZE, 0); | |||
ge::AttrUtils::SetInt(&model, ATTR_MODEL_STREAM_NUM, 0); | |||
ge::AttrUtils::SetInt(&model, ATTR_MODEL_EVENT_NUM, 0); | |||
ge::AttrUtils::SetStr(&model, ATTR_MODEL_TARGET_TYPE, "MINI"); // domi::MINI | |||
auto compute_graph = std::make_shared<ge::ComputeGraph>("graph"); | |||
auto graph = ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph); | |||
model.SetGraph(graph); | |||
} | |||
void TearDown() {} | |||
void SetUp() { | |||
unsetenv("FMK_SYSMODE"); | |||
unsetenv("FMK_DUMP_PATH"); | |||
unsetenv("FMK_USE_FUSION"); | |||
unsetenv("DAVINCI_TIMESTAT_ENABLE"); | |||
} | |||
}; | |||
class DModelListener : public ge::ModelListener { | |||
public: | |||
DModelListener() { | |||
}; | |||
Status OnComputeDone(uint32_t model_id, uint32_t data_index, uint32_t resultCode, | |||
std::vector<ge::Tensor> &outputs) { | |||
GELOGI("In Call back. OnComputeDone"); | |||
return SUCCESS; | |||
} | |||
}; | |||
shared_ptr<ge::ModelListener> g_label_call_back(new DModelListener()); | |||
static ge::OpDescPtr CreateOpDesc(string name = "", string type = "") { | |||
auto op_desc = std::make_shared<ge::OpDesc>(name, type); | |||
op_desc->SetStreamId(0); | |||
op_desc->SetId(0); | |||
ge::AttrUtils::SetFloat(op_desc, ge::ATTR_NAME_ALPHA, 0); | |||
ge::AttrUtils::SetFloat(op_desc, ge::ATTR_NAME_BETA, 0); | |||
op_desc->SetWorkspace({}); | |||
; | |||
op_desc->SetWorkspaceBytes({}); | |||
op_desc->SetInputOffset({}); | |||
op_desc->SetOutputOffset({}); | |||
ge::AttrUtils::SetListStr(op_desc, ge::ATTR_NAME_WEIGHT_NAME, {}); | |||
ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_MODE, 0); | |||
ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_PAD_MODE, 0); | |||
ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_DATA_MODE, 0); | |||
ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_CEIL_MODE, 0); | |||
ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_NAN_OPT, 0); | |||
ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_WINDOW, {}); | |||
ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_PAD, {}); | |||
ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_STRIDE, {}); | |||
ge::AttrUtils::SetListInt(op_desc, ge::ATTR_NAME_ACTIVE_STREAM_LIST, {1, 1}); | |||
ge::AttrUtils::SetInt(op_desc, ge::ATTR_NAME_STREAM_SWITCH_COND, 0); | |||
return op_desc; | |||
} | |||
TEST_F(UtestGeExecutor, test_single_op_exec) { | |||
GeExecutor exeutor; | |||
ModelData model_data; | |||
@@ -45,4 +126,219 @@ TEST_F(UtestGeExecutor, test_ge_initialize) { | |||
EXPECT_EQ(executor.Initialize(), SUCCESS); | |||
EXPECT_EQ(executor.Initialize(), SUCCESS); | |||
} | |||
} // namespace ge | |||
TEST_F(UtestGeExecutor, load_data_from_file) { | |||
GeExecutor ge_executor; | |||
ge_executor.isInit_ = true; | |||
string test_smap = "/tmp/" + std::to_string(getpid()) + "_maps"; | |||
string self_smap = "/proc/" + std::to_string(getpid()) + "/maps"; | |||
string copy_smap = "cp -f " + self_smap + " " + test_smap; | |||
EXPECT_EQ(system(copy_smap.c_str()), 0); | |||
ModelData model_data; | |||
EXPECT_EQ(ge_executor.LoadDataFromFile(test_smap, model_data), SUCCESS); | |||
EXPECT_NE(model_data.model_data, nullptr); | |||
delete[] static_cast<char *>(model_data.model_data); | |||
model_data.model_data = nullptr; | |||
ge_executor.isInit_ = false; | |||
} | |||
TEST_F(UtestGeExecutor, InitFeatureMapAndP2PMem_failed) { | |||
DavinciModel model(0, g_label_call_back); | |||
model.is_feature_map_mem_has_inited_ = true; | |||
EXPECT_EQ(model.InitFeatureMapAndP2PMem(nullptr, 0), PARAM_INVALID); | |||
} | |||
TEST_F(UtestGeExecutor, kernel_InitDumpArgs) { | |||
DavinciModel model(0, g_label_call_back); | |||
model.om_name_ = "testom"; | |||
model.name_ = "test"; | |||
OpDescPtr op_desc = CreateOpDesc("test", "test"); | |||
std::map<std::string, std::set<std::string>> model_dump_properties_map; | |||
std::set<std::string> s; | |||
model_dump_properties_map[DUMP_ALL_MODEL] = s; | |||
DumpProperties dp; | |||
dp.model_dump_properties_map_ = model_dump_properties_map; | |||
model.SetDumpProperties(dp); | |||
KernelTaskInfo kernel_task_info; | |||
kernel_task_info.davinci_model_ = &model; | |||
kernel_task_info.op_desc_ = op_desc; | |||
kernel_task_info.InitDumpArgs(0); | |||
} | |||
TEST_F(UtestGeExecutor, kernel_ex_InitDumpArgs) { | |||
DavinciModel model(0, g_label_call_back); | |||
model.om_name_ = "testom"; | |||
model.name_ = "test"; | |||
OpDescPtr op_desc = CreateOpDesc("test", "test"); | |||
std::map<std::string, std::set<std::string>> model_dump_properties_map; | |||
std::set<std::string> s; | |||
model_dump_properties_map[DUMP_ALL_MODEL] = s; | |||
DumpProperties dp; | |||
dp.model_dump_properties_map_ = model_dump_properties_map; | |||
model.SetDumpProperties(dp); | |||
KernelExTaskInfo kernel_ex_task_info; | |||
kernel_ex_task_info.davinci_model_ = &model; | |||
kernel_ex_task_info.InitDumpArgs(nullptr, op_desc); | |||
} | |||
TEST_F(UtestGeExecutor, kernel_ex_InitDumpFlag) { | |||
DavinciModel model(0, g_label_call_back); | |||
model.om_name_ = "testom"; | |||
model.name_ = "test"; | |||
OpDescPtr op_desc = CreateOpDesc("test", "test"); | |||
std::map<std::string, std::set<std::string>> model_dump_properties_map; | |||
std::set<std::string> s; | |||
model_dump_properties_map[DUMP_ALL_MODEL] = s; | |||
DumpProperties dp; | |||
dp.model_dump_properties_map_ = model_dump_properties_map; | |||
model.SetDumpProperties(dp); | |||
KernelExTaskInfo kernel_ex_task_info; | |||
kernel_ex_task_info.davinci_model_ = &model; | |||
kernel_ex_task_info.InitDumpFlag(op_desc); | |||
} | |||
TEST_F(UtestGeExecutor, execute_graph_with_stream) { | |||
VarManager::Instance(0)->Init(0, 0, 0, 0); | |||
map<string, string> options; | |||
options[GRAPH_MEMORY_MAX_SIZE] = "1048576"; | |||
VarManager::Instance(0)->SetMemoryMallocSize(options, 1024UL * 1024UL * 1024UL); | |||
DavinciModel model(0, nullptr); | |||
ComputeGraphPtr graph = make_shared<ComputeGraph>("default"); | |||
GeModelPtr ge_model = make_shared<GeModel>(); | |||
ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph)); | |||
AttrUtils::SetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, 10240); | |||
AttrUtils::SetInt(ge_model, ATTR_MODEL_STREAM_NUM, 1); | |||
shared_ptr<domi::ModelTaskDef> model_task_def = make_shared<domi::ModelTaskDef>(); | |||
ge_model->SetModelTaskDef(model_task_def); | |||
GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT); | |||
TensorUtils::SetSize(tensor, 512); | |||
{ | |||
OpDescPtr op_desc = CreateOpDesc("data", DATA); | |||
op_desc->AddInputDesc(tensor); | |||
op_desc->AddOutputDesc(tensor); | |||
op_desc->SetInputOffset({1024}); | |||
op_desc->SetOutputOffset({1024}); | |||
NodePtr node = graph->AddNode(op_desc); // op_index = 0 | |||
} | |||
{ | |||
OpDescPtr op_desc = CreateOpDesc("square", "Square"); | |||
op_desc->AddInputDesc(tensor); | |||
op_desc->AddOutputDesc(tensor); | |||
op_desc->SetInputOffset({1024}); | |||
op_desc->SetOutputOffset({1024}); | |||
NodePtr node = graph->AddNode(op_desc); // op_index = 1 | |||
domi::TaskDef *task_def = model_task_def->add_task(); | |||
task_def->set_stream_id(0); | |||
task_def->set_type(RT_MODEL_TASK_KERNEL); | |||
domi::KernelDef *kernel_def = task_def->mutable_kernel(); | |||
kernel_def->set_stub_func("stub_func"); | |||
kernel_def->set_args_size(64); | |||
string args(64, '1'); | |||
kernel_def->set_args(args.data(), 64); | |||
domi::KernelContext *context = kernel_def->mutable_context(); | |||
context->set_op_index(op_desc->GetId()); | |||
context->set_kernel_type(2); // ccKernelType::TE | |||
uint16_t args_offset[9] = {0}; | |||
context->set_args_offset(args_offset, 9 * sizeof(uint16_t)); | |||
} | |||
{ | |||
OpDescPtr op_desc = CreateOpDesc("memcpy", MEMCPYASYNC); | |||
op_desc->AddInputDesc(tensor); | |||
op_desc->AddOutputDesc(tensor); | |||
op_desc->SetInputOffset({1024}); | |||
op_desc->SetOutputOffset({5120}); | |||
NodePtr node = graph->AddNode(op_desc); // op_index = 2 | |||
domi::TaskDef *task_def = model_task_def->add_task(); | |||
task_def->set_stream_id(0); | |||
task_def->set_type(RT_MODEL_TASK_MEMCPY_ASYNC); | |||
domi::MemcpyAsyncDef *memcpy_async = task_def->mutable_memcpy_async(); | |||
memcpy_async->set_src(1024); | |||
memcpy_async->set_dst(5120); | |||
memcpy_async->set_dst_max(512); | |||
memcpy_async->set_count(1); | |||
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE); | |||
memcpy_async->set_op_index(op_desc->GetId()); | |||
} | |||
{ | |||
OpDescPtr op_desc = CreateOpDesc("output", NETOUTPUT); | |||
op_desc->AddInputDesc(tensor); | |||
op_desc->SetInputOffset({5120}); | |||
op_desc->SetSrcName( { "memcpy" } ); | |||
op_desc->SetSrcIndex( { 0 } ); | |||
NodePtr node = graph->AddNode(op_desc); // op_index = 3 | |||
} | |||
EXPECT_EQ(model.Assign(ge_model), SUCCESS); | |||
EXPECT_EQ(model.Init(), SUCCESS); | |||
EXPECT_EQ(model.input_addrs_list_.size(), 1); | |||
EXPECT_EQ(model.output_addrs_list_.size(), 1); | |||
EXPECT_EQ(model.task_list_.size(), 2); | |||
OutputData output_data; | |||
vector<Tensor> outputs; | |||
EXPECT_EQ(model.GenOutputTensorInfo(&output_data, outputs), SUCCESS); | |||
GraphExecutor graph_executer; | |||
graph_executer.init_flag_ = true; | |||
GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph); | |||
std::vector<GeTensor> input_tensor; | |||
std::vector<GeTensor> output_tensor; | |||
std::vector<InputOutputDescInfo> output_desc; | |||
InputOutputDescInfo desc0; | |||
output_desc.push_back(desc0); | |||
graph_executer.ExecuteGraphWithStream(0, nullptr, ge_root_model, input_tensor, output_tensor); | |||
} | |||
TEST_F(UtestGeExecutor, get_op_attr) { | |||
shared_ptr<DavinciModel> model = MakeShared<DavinciModel>(1, g_label_call_back); | |||
model->SetId(1); | |||
model->om_name_ = "testom"; | |||
model->name_ = "test"; | |||
shared_ptr<hybrid::HybridDavinciModel> hybrid_model = MakeShared<hybrid::HybridDavinciModel>(); | |||
model->SetId(2); | |||
model->om_name_ = "testom_hybrid"; | |||
model->name_ = "test_hybrid"; | |||
std::shared_ptr<ModelManager> model_manager = ModelManager::GetInstance(); | |||
model_manager->InsertModel(1, model); | |||
model_manager->InsertModel(2, hybrid_model); | |||
OpDescPtr op_desc = CreateOpDesc("test", "test"); | |||
std::vector<std::string> value{"test"}; | |||
ge::AttrUtils::SetListStr(op_desc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, value); | |||
model->SaveSpecifyAttrValues(op_desc); | |||
GeExecutor ge_executor; | |||
GeExecutor::isInit_ = true; | |||
std::string attr_value; | |||
auto ret = ge_executor.GetOpAttr(1, "test", ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, attr_value); | |||
EXPECT_EQ(ret, SUCCESS); | |||
EXPECT_EQ(attr_value, "[4]test"); | |||
ret = ge_executor.GetOpAttr(2, "test", ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, attr_value); | |||
EXPECT_EQ(ret, PARAM_INVALID); | |||
ret = ge_executor.GetOpAttr(3, "test", ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, attr_value); | |||
EXPECT_EQ(ret, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID); | |||
} | |||
} |
@@ -62,6 +62,13 @@ static NodePtr CreateNode(ComputeGraph &graph, const string &name, const string | |||
return graph.AddNode(op_desc); | |||
} | |||
TEST_F(UtestModelExecutorTest, test_get_total_memory_size) { | |||
ModelExecutor model_executor; | |||
size_t total_mem_size = 0; | |||
EXPECT_EQ(model_executor.GetTotalMemorySize(total_mem_size), SUCCESS); | |||
EXPECT_EQ(total_mem_size, 1024UL * 1024UL * 1024UL); | |||
} | |||
TEST_F(UtestModelExecutorTest, test_load_graph_sync) { | |||
ModelExecutor model_executor; | |||
EXPECT_EQ(model_executor.Initialize({}, 0), SUCCESS); | |||
@@ -1,349 +0,0 @@ | |||
/** | |||
* Copyright 2019-2020 Huawei Technologies Co., Ltd | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
* You may obtain a copy of the License at | |||
* | |||
* http://www.apache.org/licenses/LICENSE-2.0 | |||
* | |||
* Unless required by applicable law or agreed to in writing, software | |||
* distributed under the License is distributed on an "AS IS" BASIS, | |||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
* See the License for the specific language governing permissions and | |||
* limitations under the License. | |||
*/ | |||
#include <gtest/gtest.h> | |||
#include <memory> | |||
#include "common/ge_inner_error_codes.h" | |||
#include "common/types.h" | |||
#include "common/util.h" | |||
#include "runtime/mem.h" | |||
#include "common/util.h" | |||
#include "omg/omg_inner_types.h" | |||
#define private public | |||
#define protected public | |||
#include "executor/ge_executor.h" | |||
#include "common/auth/file_saver.h" | |||
#include "common/debug/log.h" | |||
#include "common/properties_manager.h" | |||
#include "common/types.h" | |||
#include "graph/load/graph_loader.h" | |||
#include "graph/load/model_manager/davinci_model.h" | |||
#include "hybrid/hybrid_davinci_model.h" | |||
#include "graph/load/model_manager/model_manager.h" | |||
#include "graph/load/model_manager/task_info/kernel_task_info.h" | |||
#include "graph/load/model_manager/task_info/kernel_ex_task_info.h" | |||
#include "graph/execute/graph_execute.h" | |||
#include "ge/common/dump/dump_properties.h" | |||
#include "graph/manager/graph_mem_allocator.h" | |||
#include "graph/utils/graph_utils.h" | |||
#include "proto/ge_ir.pb.h" | |||
#include "graph/manager/graph_var_manager.h" | |||
#undef private | |||
#undef protected | |||
using namespace std; | |||
namespace ge { | |||
class UtestGeExecutor : public testing::Test { | |||
protected: | |||
static void InitModelDefault(ge::Model &model) { | |||
ge::AttrUtils::SetInt(&model, ATTR_MODEL_MEMORY_SIZE, 0); | |||
ge::AttrUtils::SetInt(&model, ATTR_MODEL_WEIGHT_SIZE, 0); | |||
ge::AttrUtils::SetInt(&model, ATTR_MODEL_STREAM_NUM, 0); | |||
ge::AttrUtils::SetInt(&model, ATTR_MODEL_EVENT_NUM, 0); | |||
ge::AttrUtils::SetStr(&model, ATTR_MODEL_TARGET_TYPE, "MINI"); // domi::MINI | |||
auto compute_graph = std::make_shared<ge::ComputeGraph>("graph"); | |||
auto graph = ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph); | |||
model.SetGraph(graph); | |||
} | |||
void SetUp() { | |||
unsetenv("FMK_SYSMODE"); | |||
unsetenv("FMK_DUMP_PATH"); | |||
unsetenv("FMK_USE_FUSION"); | |||
unsetenv("DAVINCI_TIMESTAT_ENABLE"); | |||
} | |||
}; | |||
class DModelListener : public ge::ModelListener { | |||
public: | |||
DModelListener() { | |||
}; | |||
Status OnComputeDone(uint32_t model_id, uint32_t data_index, uint32_t resultCode, | |||
std::vector<ge::Tensor> &outputs) { | |||
GELOGI("In Call back. OnComputeDone"); | |||
return SUCCESS; | |||
} | |||
}; | |||
shared_ptr<ge::ModelListener> g_label_call_back(new DModelListener()); | |||
static ge::OpDescPtr CreateOpDesc(string name = "", string type = "") { | |||
auto op_desc = std::make_shared<ge::OpDesc>(name, type); | |||
op_desc->SetStreamId(0); | |||
op_desc->SetId(0); | |||
ge::AttrUtils::SetFloat(op_desc, ge::ATTR_NAME_ALPHA, 0); | |||
ge::AttrUtils::SetFloat(op_desc, ge::ATTR_NAME_BETA, 0); | |||
op_desc->SetWorkspace({}); | |||
; | |||
op_desc->SetWorkspaceBytes({}); | |||
op_desc->SetInputOffset({}); | |||
op_desc->SetOutputOffset({}); | |||
ge::AttrUtils::SetListStr(op_desc, ge::ATTR_NAME_WEIGHT_NAME, {}); | |||
ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_MODE, 0); | |||
ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_PAD_MODE, 0); | |||
ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_DATA_MODE, 0); | |||
ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_CEIL_MODE, 0); | |||
ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_NAN_OPT, 0); | |||
ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_WINDOW, {}); | |||
ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_PAD, {}); | |||
ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_STRIDE, {}); | |||
ge::AttrUtils::SetListInt(op_desc, ge::ATTR_NAME_ACTIVE_STREAM_LIST, {1, 1}); | |||
ge::AttrUtils::SetInt(op_desc, ge::ATTR_NAME_STREAM_SWITCH_COND, 0); | |||
return op_desc; | |||
} | |||
TEST_F(UtestGeExecutor, load_data_from_file) { | |||
GeExecutor ge_executor; | |||
ge_executor.isInit_ = true; | |||
string test_smap = "/tmp/" + std::to_string(getpid()) + "_maps"; | |||
string self_smap = "/proc/" + std::to_string(getpid()) + "/maps"; | |||
string copy_smap = "cp -f " + self_smap + " " + test_smap; | |||
EXPECT_EQ(system(copy_smap.c_str()), 0); | |||
ModelData model_data; | |||
EXPECT_EQ(ge_executor.LoadDataFromFile(test_smap, model_data), SUCCESS); | |||
EXPECT_NE(model_data.model_data, nullptr); | |||
delete[] static_cast<char *>(model_data.model_data); | |||
model_data.model_data = nullptr; | |||
ge_executor.isInit_ = false; | |||
} | |||
/* | |||
TEST_F(UtestGeExecutor, fail_UnloadModel_model_manager_stop_unload_error) { | |||
uint32_t model_id = 1; | |||
ge::GeExecutor ge_executor; | |||
ge_executor.isInit_ = true; | |||
ge::Status ret = ge_executor.UnloadModel(model_id); | |||
EXPECT_EQ(ge::PARAM_INVALID, ret); | |||
ge_executor.isInit_ = false; | |||
ret = ge_executor.UnloadModel(model_id); | |||
EXPECT_EQ(ge::GE_EXEC_NOT_INIT, ret); | |||
} | |||
TEST_F(UtestGeExecutor, fail_CommandHandle_model_manager_HandleCommand_error) { | |||
ge::Command cmd; | |||
ge::GeExecutor ge_executor; | |||
ge::Status ret = ge_executor.CommandHandle(cmd); | |||
EXPECT_EQ(ge::PARAM_INVALID, ret); | |||
} | |||
*/ | |||
TEST_F(UtestGeExecutor, InitFeatureMapAndP2PMem_failed) { | |||
DavinciModel model(0, g_label_call_back); | |||
model.is_feature_map_mem_has_inited_ = true; | |||
EXPECT_EQ(model.InitFeatureMapAndP2PMem(nullptr, 0), PARAM_INVALID); | |||
} | |||
TEST_F(UtestGeExecutor, kernel_InitDumpArgs) { | |||
DavinciModel model(0, g_label_call_back); | |||
model.om_name_ = "testom"; | |||
model.name_ = "test"; | |||
OpDescPtr op_desc = CreateOpDesc("test", "test"); | |||
std::map<std::string, std::set<std::string>> model_dump_properties_map; | |||
std::set<std::string> s; | |||
model_dump_properties_map[DUMP_ALL_MODEL] = s; | |||
DumpProperties dp; | |||
dp.model_dump_properties_map_ = model_dump_properties_map; | |||
model.SetDumpProperties(dp); | |||
KernelTaskInfo kernel_task_info; | |||
kernel_task_info.davinci_model_ = &model; | |||
kernel_task_info.op_desc_ = op_desc; | |||
kernel_task_info.InitDumpArgs(0); | |||
} | |||
TEST_F(UtestGeExecutor, kernel_ex_InitDumpArgs) { | |||
DavinciModel model(0, g_label_call_back); | |||
model.om_name_ = "testom"; | |||
model.name_ = "test"; | |||
OpDescPtr op_desc = CreateOpDesc("test", "test"); | |||
std::map<std::string, std::set<std::string>> model_dump_properties_map; | |||
std::set<std::string> s; | |||
model_dump_properties_map[DUMP_ALL_MODEL] = s; | |||
DumpProperties dp; | |||
dp.model_dump_properties_map_ = model_dump_properties_map; | |||
model.SetDumpProperties(dp); | |||
KernelExTaskInfo kernel_ex_task_info; | |||
kernel_ex_task_info.davinci_model_ = &model; | |||
kernel_ex_task_info.InitDumpArgs(nullptr, op_desc); | |||
} | |||
TEST_F(UtestGeExecutor, kernel_ex_InitDumpFlag) { | |||
DavinciModel model(0, g_label_call_back); | |||
model.om_name_ = "testom"; | |||
model.name_ = "test"; | |||
OpDescPtr op_desc = CreateOpDesc("test", "test"); | |||
std::map<std::string, std::set<std::string>> model_dump_properties_map; | |||
std::set<std::string> s; | |||
model_dump_properties_map[DUMP_ALL_MODEL] = s; | |||
DumpProperties dp; | |||
dp.model_dump_properties_map_ = model_dump_properties_map; | |||
model.SetDumpProperties(dp); | |||
KernelExTaskInfo kernel_ex_task_info; | |||
kernel_ex_task_info.davinci_model_ = &model; | |||
kernel_ex_task_info.InitDumpFlag(op_desc); | |||
} | |||
TEST_F(UtestGeExecutor, execute_graph_with_stream) { | |||
VarManager::Instance(0)->Init(0, 0, 0, 0); | |||
map<string, string> options; | |||
options[GRAPH_MEMORY_MAX_SIZE] = "1048576"; | |||
VarManager::Instance(0)->SetMemoryMallocSize(options); | |||
DavinciModel model(0, nullptr); | |||
ComputeGraphPtr graph = make_shared<ComputeGraph>("default"); | |||
GeModelPtr ge_model = make_shared<GeModel>(); | |||
ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph)); | |||
AttrUtils::SetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, 10240); | |||
AttrUtils::SetInt(ge_model, ATTR_MODEL_STREAM_NUM, 1); | |||
shared_ptr<domi::ModelTaskDef> model_task_def = make_shared<domi::ModelTaskDef>(); | |||
ge_model->SetModelTaskDef(model_task_def); | |||
GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT); | |||
TensorUtils::SetSize(tensor, 512); | |||
{ | |||
OpDescPtr op_desc = CreateOpDesc("data", DATA); | |||
op_desc->AddInputDesc(tensor); | |||
op_desc->AddOutputDesc(tensor); | |||
op_desc->SetInputOffset({1024}); | |||
op_desc->SetOutputOffset({1024}); | |||
NodePtr node = graph->AddNode(op_desc); // op_index = 0 | |||
} | |||
{ | |||
OpDescPtr op_desc = CreateOpDesc("square", "Square"); | |||
op_desc->AddInputDesc(tensor); | |||
op_desc->AddOutputDesc(tensor); | |||
op_desc->SetInputOffset({1024}); | |||
op_desc->SetOutputOffset({1024}); | |||
NodePtr node = graph->AddNode(op_desc); // op_index = 1 | |||
domi::TaskDef *task_def = model_task_def->add_task(); | |||
task_def->set_stream_id(0); | |||
task_def->set_type(RT_MODEL_TASK_KERNEL); | |||
domi::KernelDef *kernel_def = task_def->mutable_kernel(); | |||
kernel_def->set_stub_func("stub_func"); | |||
kernel_def->set_args_size(64); | |||
string args(64, '1'); | |||
kernel_def->set_args(args.data(), 64); | |||
domi::KernelContext *context = kernel_def->mutable_context(); | |||
context->set_op_index(op_desc->GetId()); | |||
context->set_kernel_type(2); // ccKernelType::TE | |||
uint16_t args_offset[9] = {0}; | |||
context->set_args_offset(args_offset, 9 * sizeof(uint16_t)); | |||
} | |||
{ | |||
OpDescPtr op_desc = CreateOpDesc("memcpy", MEMCPYASYNC); | |||
op_desc->AddInputDesc(tensor); | |||
op_desc->AddOutputDesc(tensor); | |||
op_desc->SetInputOffset({1024}); | |||
op_desc->SetOutputOffset({5120}); | |||
NodePtr node = graph->AddNode(op_desc); // op_index = 2 | |||
domi::TaskDef *task_def = model_task_def->add_task(); | |||
task_def->set_stream_id(0); | |||
task_def->set_type(RT_MODEL_TASK_MEMCPY_ASYNC); | |||
domi::MemcpyAsyncDef *memcpy_async = task_def->mutable_memcpy_async(); | |||
memcpy_async->set_src(1024); | |||
memcpy_async->set_dst(5120); | |||
memcpy_async->set_dst_max(512); | |||
memcpy_async->set_count(1); | |||
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE); | |||
memcpy_async->set_op_index(op_desc->GetId()); | |||
} | |||
{ | |||
OpDescPtr op_desc = CreateOpDesc("output", NETOUTPUT); | |||
op_desc->AddInputDesc(tensor); | |||
op_desc->SetInputOffset({5120}); | |||
op_desc->SetSrcName( { "memcpy" } ); | |||
op_desc->SetSrcIndex( { 0 } ); | |||
NodePtr node = graph->AddNode(op_desc); // op_index = 3 | |||
} | |||
EXPECT_EQ(model.Assign(ge_model), SUCCESS); | |||
EXPECT_EQ(model.Init(), SUCCESS); | |||
EXPECT_EQ(model.input_addrs_list_.size(), 1); | |||
EXPECT_EQ(model.output_addrs_list_.size(), 1); | |||
EXPECT_EQ(model.task_list_.size(), 2); | |||
OutputData output_data; | |||
vector<Tensor> outputs; | |||
EXPECT_EQ(model.GenOutputTensorInfo(&output_data, outputs), SUCCESS); | |||
GraphExecutor graph_executer; | |||
graph_executer.init_flag_ = true; | |||
GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph); | |||
std::vector<GeTensor> input_tensor; | |||
std::vector<GeTensor> output_tensor; | |||
std::vector<InputOutputDescInfo> output_desc; | |||
InputOutputDescInfo desc0; | |||
output_desc.push_back(desc0); | |||
graph_executer.ExecuteGraphWithStream(0, nullptr, ge_root_model, input_tensor, output_tensor); | |||
} | |||
TEST_F(UtestGeExecutor, get_op_attr) { | |||
shared_ptr<DavinciModel> model = MakeShared<DavinciModel>(1, g_label_call_back); | |||
model->SetId(1); | |||
model->om_name_ = "testom"; | |||
model->name_ = "test"; | |||
shared_ptr<hybrid::HybridDavinciModel> hybrid_model = MakeShared<hybrid::HybridDavinciModel>(); | |||
model->SetId(2); | |||
model->om_name_ = "testom_hybrid"; | |||
model->name_ = "test_hybrid"; | |||
std::shared_ptr<ModelManager> model_manager = ModelManager::GetInstance(); | |||
model_manager->InsertModel(1, model); | |||
model_manager->InsertModel(2, hybrid_model); | |||
OpDescPtr op_desc = CreateOpDesc("test", "test"); | |||
std::vector<std::string> value{"test"}; | |||
ge::AttrUtils::SetListStr(op_desc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, value); | |||
model->SaveSpecifyAttrValues(op_desc); | |||
GeExecutor ge_executor; | |||
GeExecutor::isInit_ = true; | |||
std::string attr_value; | |||
auto ret = ge_executor.GetOpAttr(1, "test", ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, attr_value); | |||
EXPECT_EQ(ret, SUCCESS); | |||
EXPECT_EQ(attr_value, "[4]test"); | |||
ret = ge_executor.GetOpAttr(2, "test", ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, attr_value); | |||
EXPECT_EQ(ret, PARAM_INVALID); | |||
ret = ge_executor.GetOpAttr(3, "test", ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, attr_value); | |||
EXPECT_EQ(ret, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID); | |||
} | |||
} |
@@ -52,10 +52,6 @@ int32_t MsprofReport(uint32_t moduleId, uint32_t type, void *data, uint32_t len) | |||
TEST_F(UtestDavinciModel, init_success) { | |||
DavinciModel model(0, nullptr); | |||
VarManager::Instance(0)->Init(0, 0, 0, 0); | |||
map<string, string> options; | |||
options[GRAPH_MEMORY_MAX_SIZE] = "1048576"; | |||
VarManager::Instance(0)->SetMemoryMallocSize(options); | |||
ComputeGraphPtr graph = make_shared<ComputeGraph>("default"); | |||
ProfilingManager::Instance().is_load_profiling_ = true; | |||
@@ -790,10 +786,6 @@ TEST_F(UtestDavinciModel, init_data_aipp_input_dims_normal) { | |||
// test label_set_task Init | |||
TEST_F(UtestDavinciModel, label_task_success) { | |||
VarManager::Instance(0)->Init(0, 0, 0, 0); | |||
map<string, string> options; | |||
options[GRAPH_MEMORY_MAX_SIZE] = "1048576"; | |||
VarManager::Instance(0)->SetMemoryMallocSize(options); | |||
DavinciModel model(0, nullptr); | |||
ComputeGraphPtr graph = make_shared<ComputeGraph>("default"); | |||
@@ -961,11 +953,6 @@ TEST_F(UtestDavinciModel, simple_test_gmock) { | |||
} | |||
TEST_F(UtestDavinciModel, NnExecute) { | |||
VarManager::Instance(0)->Init(0, 0, 0, 0); | |||
map<string, string> options; | |||
options[GRAPH_MEMORY_MAX_SIZE] = "1048576"; | |||
VarManager::Instance(0)->SetMemoryMallocSize(options); | |||
DavinciModel model(0, nullptr); | |||
ComputeGraphPtr graph = make_shared<ComputeGraph>("default"); | |||
ProfilingManager::Instance().is_load_profiling_ = true; | |||
@@ -19,35 +19,31 @@ | |||
#define protected public | |||
#define private public | |||
#include "graph/manager/graph_var_manager.h" | |||
#include "graph/manager/graph_mem_manager.h" | |||
#include "graph/ge_context.h" | |||
#undef protected | |||
#undef private | |||
namespace ge { | |||
class UtestGraphVarManagerTest : public testing::Test { | |||
protected: | |||
void SetUp() {} | |||
void TearDown() {} | |||
void SetUp() { | |||
VarManagerPool::Instance().Destory(); | |||
} | |||
void TearDown() { | |||
VarManagerPool::Instance().Destory(); | |||
} | |||
}; | |||
TEST_F(UtestGraphVarManagerTest, test_get_total_memory_size) { | |||
size_t total_mem_size = 0; | |||
Status ret = VarManager::Instance(0)->GetTotalMemorySize(total_mem_size); | |||
EXPECT_EQ(total_mem_size, 1024UL * 1024UL * 1024UL); | |||
EXPECT_EQ(ret, SUCCESS); | |||
} | |||
TEST_F(UtestGraphVarManagerTest, test_set_memory_malloc_size_no_related_option) { | |||
const map<string, string> options{}; | |||
Status ret = VarManager::Instance(0)->SetMemoryMallocSize(options); | |||
EXPECT_EQ(VarManager::Instance(0)->SetMemoryMallocSize(options, 1024UL * 1024UL * 1024UL), SUCCESS); | |||
EXPECT_EQ(VarManager::Instance(0)->graph_mem_max_size_, floor(1024UL * 1024UL * 1024UL * (26.0f / 32.0f))); | |||
EXPECT_EQ(VarManager::Instance(0)->var_mem_max_size_, floor(1024UL * 1024UL * 1024UL * (5.0f / 32.0f))); | |||
EXPECT_EQ(ret, SUCCESS); | |||
EXPECT_EQ(VarManager::Instance(0)->Init(0, 0, 0, 0), SUCCESS); | |||
} | |||
TEST_F(UtestGraphVarManagerTest, test_set_memory_malloc_size_with_user_specify_graph_mem_max_size) { | |||
const map<string, string> options{{"ge.graphMemoryMaxSize", "536870912"}}; | |||
Status ret = VarManager::Instance(0)->SetMemoryMallocSize(options); | |||
Status ret = VarManager::Instance(0)->SetMemoryMallocSize(options, 1024UL * 1024UL * 1024UL); | |||
EXPECT_EQ(VarManager::Instance(0)->graph_mem_max_size_, floor(1024UL * 1024UL * 1024UL / 2)); | |||
EXPECT_EQ(VarManager::Instance(0)->var_mem_max_size_, floor(1024UL * 1024UL * 1024UL * (5.0f / 32.0f))); | |||
EXPECT_EQ(ret, SUCCESS); | |||
@@ -55,9 +51,38 @@ TEST_F(UtestGraphVarManagerTest, test_set_memory_malloc_size_with_user_specify_g | |||
TEST_F(UtestGraphVarManagerTest, test_set_memory_malloc_size_with_user_specify_var_mem_max_size) { | |||
const map<string, string> options{{"ge.variableMemoryMaxSize", "536870912"}}; | |||
Status ret = VarManager::Instance(0)->SetMemoryMallocSize(options); | |||
Status ret = VarManager::Instance(0)->SetMemoryMallocSize(options, 1024UL * 1024UL * 1024UL); | |||
EXPECT_EQ(VarManager::Instance(0)->graph_mem_max_size_, floor(1024UL * 1024UL * 1024UL * (26.0f / 32.0f))); | |||
EXPECT_EQ(VarManager::Instance(0)->var_mem_max_size_, floor(1024UL * 1024UL * 1024UL / 2)); | |||
EXPECT_EQ(ret, SUCCESS); | |||
} | |||
TEST_F(UtestGraphVarManagerTest, test_mem_manager_not_set) { | |||
EXPECT_EQ(VarManager::Instance(0)->Init(0, 0, 0, 0), SUCCESS); | |||
EXPECT_EQ(VarManager::Instance(0)->MallocVarMemory(1024), FAILED); | |||
EXPECT_EQ(VarManager::Instance(0)->GetVarMemoryBase(RT_MEMORY_RDMA_HBM), nullptr); | |||
EXPECT_EQ(VarManager::Instance(0)->GetVarMemoryAddr(nullptr, RT_MEMORY_RDMA_HBM), nullptr); | |||
GeTensorDesc tensor_desc; | |||
EXPECT_EQ(VarManager::Instance(0)->AssignVarMem("global_step", tensor_desc, RT_MEMORY_RDMA_HBM), INTERNAL_ERROR); | |||
} | |||
TEST_F(UtestGraphVarManagerTest, test_with_mem_manager) { | |||
const std::vector<rtMemType_t> memory_types({RT_MEMORY_HBM, RT_MEMORY_P2P_DDR}); | |||
EXPECT_EQ(MemManager::Instance().Initialize(memory_types), SUCCESS); | |||
VarManager::Instance(0)->SetMemManager(&MemManager::Instance()); | |||
EXPECT_EQ(VarManager::Instance(0)->Init(0, 0, 0, 0), SUCCESS); | |||
EXPECT_EQ(VarManager::Instance(0)->MallocVarMemory(1024), SUCCESS); | |||
EXPECT_EQ(VarManager::Instance(0)->GetVarMemoryBase(RT_MEMORY_RDMA_HBM), nullptr); | |||
uint8_t logic_addr = 0; | |||
EXPECT_EQ(VarManager::Instance(0)->GetVarMemoryAddr(&logic_addr, RT_MEMORY_RDMA_HBM), &logic_addr); | |||
EXPECT_NE(VarManager::Instance(0)->GetVarMemoryAddr(&logic_addr, RT_MEMORY_HBM), nullptr); | |||
// RdmaPoolAllocator block_bin_ not found. | |||
GeTensorDesc tensor_desc; | |||
EXPECT_EQ(VarManager::Instance(0)->AssignVarMem("global_step", tensor_desc, RT_MEMORY_RDMA_HBM), INTERNAL_ERROR); | |||
} | |||
} // namespace ge |
@@ -15,22 +15,11 @@ | |||
*/ | |||
#include <gtest/gtest.h> | |||
#include <memory> | |||
#include "graph/anchor.h" | |||
#include "graph/attr_value.h" | |||
#include "graph/debug/ge_attr_define.h" | |||
#include "graph/utils/graph_utils.h" | |||
#include "graph/utils/node_utils.h" | |||
#include "graph/utils/op_desc_utils.h" | |||
#include "graph/utils/tensor_utils.h" | |||
#include "omg/omg_inner_types.h" | |||
#define protected public | |||
#define private public | |||
#include "graph/manager/graph_mem_manager.h" | |||
#undef protected | |||
#undef private | |||
using namespace std; | |||
using namespace testing; | |||
@@ -83,7 +72,7 @@ TEST_F(UtestSessionScopeMemAllocator, free_success_session) { | |||
EXPECT_NE(nullptr, ptr); | |||
ptr = MemManager::Instance().SessionScopeMemInstance(RT_MEMORY_HBM).Malloc(100, 0); | |||
EXPECT_NE(nullptr, ptr); | |||
for (auto memory_type : MemManager::Instance().GetAllMemoryType()) { | |||
for (auto memory_type : MemManager::Instance().memory_type_) { | |||
if (RT_MEMORY_P2P_DDR == memory_type) { | |||
EXPECT_NE(MemManager::Instance().SessionScopeMemInstance(memory_type).Free(0), SUCCESS); | |||
} else { | |||
@@ -164,7 +164,7 @@ TEST_F(UtestHcclNodeExecutor, gatheralltoallv_execute) { | |||
std::function<void()> done = []() {}; | |||
ASSERT_EQ(task->ExecuteAsync(*node_state->GetTaskContext(), done), SUCCESS); | |||
if (handle = nullptr) { | |||
if (handle != nullptr) { | |||
dlclose(handle); | |||
} | |||
} | |||
@@ -221,7 +221,7 @@ TEST_F(UtestHcclNodeExecutor, alltoallv_execute) { | |||
std::function<void()> done = []() {}; | |||
ASSERT_EQ(task->ExecuteAsync(*node_state->GetTaskContext(), done), SUCCESS); | |||
if (handle = nullptr) { | |||
if (handle != nullptr) { | |||
dlclose(handle); | |||
} | |||
} | |||
@@ -57,6 +57,7 @@ class SuccessNodeExecutor : public NodeExecutor { | |||
Status Finalize() override { | |||
finalized = true; | |||
return SUCCESS; | |||
} | |||
bool initialized = false; | |||