Browse Source

Add UT

tags/v1.2.0
zhangxiaokun 4 years ago
parent
commit
408496dd18
6 changed files with 1904 additions and 2 deletions
  1. +7
    -2
      tests/ut/ge/CMakeLists.txt
  2. +145
    -0
      tests/ut/ge/graph/load/hccl_task_info_unittest.cc
  3. +142
    -0
      tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc
  4. +1203
    -0
      tests/ut/ge/graph/load/kernel_task_info_unittest.cc
  5. +138
    -0
      tests/ut/ge/graph/load/memcpy_addr_async_task_info_unittest.cc
  6. +269
    -0
      tests/ut/ge/graph/load/memcpy_async_task_info_unittest.cc

+ 7
- 2
tests/ut/ge/CMakeLists.txt View File

@@ -329,7 +329,7 @@ set(COMMON_FORMAT_SRC_FILES
"${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc"
"${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc"
"${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc"
"${GE_CODE_DIR}/ge/common/formats/utils/formats_trans_utils.cc"
"${GE_CODE_DIR}/ge/common/formats/utils/formats_trans_utils.cc"
)

set(GRAPH_OPTIMIZE_COMMON_SRC_FILES
@@ -565,6 +565,11 @@ set(DISTINCT_GRAPH_LOAD_TEST_FILES
"graph/load/new_model_manager_event_manager_unittest.cc"
#"graph/load/output_net_output_unittest.cc"
"graph/load/tbe_handle_store_unittest.cc"
"graph/load/hccl_task_info_unittest.cc"
"graph/load/kernel_ex_task_info_unittest.cc"
"graph/load/kernel_task_info_unittest.cc"
"graph/load/memcpy_addr_async_task_info_unittest.cc"
"graph/load/memcpy_async_task_info_unittest.cc"
#"graph/graph_load_unittest.cc"
"graph/ge_executor_unittest.cc"
)
@@ -914,7 +919,7 @@ target_compile_definitions(ut_libge_distinct_load_utest PRIVATE
google=ascend_private
)

target_link_libraries(ut_libge_distinct_load_utest
target_link_libraries(ut_libge_distinct_load_utest
${COMMON_SHARED_LIBRARIES}
$<BUILD_INTERFACE:intf_pub>
ge_execute_common ge_ut_common_format ge_load_common


+ 145
- 0
tests/ut/ge/graph/load/hccl_task_info_unittest.cc View File

@@ -0,0 +1,145 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>

#define private public
#define protected public

#include "graph/load/new_model_manager/davinci_model.h"
#include "graph/load/new_model_manager/task_info/kernel_task_info.h"
#include "graph/load/new_model_manager/task_info/hccl_task_info.h"

using domi::EventExDef;
using domi::KernelContext;
using domi::KernelDef;
using domi::LogTimeStampDef;
using domi::ModelTaskDef;
using domi::StreamActiveDef;
using domi::TaskDef;

namespace ge {
class UtestHcclTaskInfo : public testing::Test {
protected:
void SetUp() {}

void TearDown() {}
};


// test success GetTaskID
TEST_F(UtestHcclTaskInfo, success_get_task_id) {
domi::ModelTaskDef model_task_def;
domi::TaskDef *task = model_task_def.add_task();
task->set_type(RT_MODEL_TASK_KERNEL);
TaskInfoPtr task_info = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task->type()));

EXPECT_EQ(task_info->GetTaskID(), 0);

KernelTaskInfo kernel_task_info;
EXPECT_EQ(kernel_task_info.GetTaskID(), 0);

HcclTaskInfo hccl_task_info;
EXPECT_EQ(hccl_task_info.GetTaskID(), 0);
}

// test init EventRecordTaskInfo
TEST_F(UtestHcclTaskInfo, success_create_stream) {
DavinciModel *model1 = nullptr;
KernelTaskInfo kernel_task_info;
EXPECT_EQ(kernel_task_info.CreateStream(3, &model, 0), SUCCESS);
}

// test hccl_Distribute
TEST_F(UtestHcclTaskInfo, success_distribute7) {
DavinciModel model(0, nullptr);

domi::ModelTaskDef model_task_def;
domi::TaskDef *task7 = model_task_def.add_task();
task7->set_type(RT_MODEL_TASK_HCCL);
TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task7->type()));
Status ret = task_info7->Init(task7[0], &model);
EXPECT_EQ(FAILED, ret);

std::vector<TaskInfoPtr> task_list;
task_list.push_back(task_info7);
model.task_list_ = task_list;

EXPECT_EQ(task_info7->Release(), SUCCESS);
}

// test hccl_Distribute
TEST_F(UtestHcclTaskInfo, success_distribute7_with_hccl_type) {
DavinciModel model(0, nullptr);
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_ = { stream };

domi::TaskDef task_def;
HcclTaskInfo hccl_task_info;
EXPECT_EQ(hccl_task_info.Init(task_def, nullptr), PARAM_INVALID);


domi::KernelHcclDef *kernel_hccl_def = task_def.mutable_kernel_hccl();
kernel_hccl_def->set_op_index(0);
kernel_hccl_def->set_hccl_type("HcomBroadcast")
model.op_list_[0] = std::make_shared<OpDesc>("FrameworkOp", "FrameworkOp");
EXPECT_EQ(hccl_task_info.Init(task_def, &model), SUCCESS);

task_def.clear_kernel_hccl();
}

// test hccl_GetPrivateDefByTaskDef
TEST_F(UtestHcclTaskInfo, success_hccl_get_private_def_by_task_def) {
DavinciModel model(0, nullptr);

domi::ModelTaskDef model_task_def;
TaskDef *task7 = model_task_def.add_task();
task7->set_type(RT_MODEL_TASK_HCCL);
// for SetStream
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
// for GetPrivateDefByTaskDef
task7->set_ops_kernel_store_ptr(10);
std::string value = "hccl_task";
task7->set_private_def(value);

TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task7->type()));
// for Distribute
EXPECT_EQ(task_info7->Init(task7[0], &model), PARAM_INVALID);

EXPECT_EQ(task_info7->Release(), SUCCESS);
}

// test hccl_task_TransToGETaskInfo
TEST_F(UtestHcclTaskInfo, success_hccl_trans_to_ge_task_info) {
DavinciModel model(0, nullptr);

domi::ModelTaskDef model_task_def;
domi::TaskDef *task7 = model_task_def.add_task();
// for type
task7->set_type(RT_MODEL_TASK_HCCL);
TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task7->type()));

GETaskInfo ge_task;
HcclTaskInfo hccl_task_info;
hccl_task_info.TransToGETaskInfo(ge_task);

EXPECT_EQ(task_info7->Release(), SUCCESS);
}

} // namespace ge

+ 142
- 0
tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc View File

@@ -0,0 +1,142 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>

#define private public
#define protected public

#include "graph/load/new_model_manager/davinci_model.h"

#include "graph/load/new_model_manager/task_info/kernel_ex_task_info.h"
#include "cce/aicpu_engine_struct.h"

namespace ge {
class UtestKernelExTaskInfo : public testing::Test {
protected:
void SetUp() {}

void TearDown() {}
};

// test kernel_ex_task_Release
TEST_F(UtestKernelExTaskInfo, success_kernel_ex_task_init) {
domi::TaskDef task_def;
KernelExTaskInfo kernel_ex_task_info;
EXPECT_EQ(kernel_ex_task_info.Init(task_def, nullptr), PARAM_INVALID);

DavinciModel model(0, nullptr);
EXPECT_EQ(kernel_ex_task_info1.Init(task_def, &model), FAILED);

rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
domi::KernelExDef *kernel_ex_def = task_def.mutable_kernel_ex();
kernel_ex_def->set_op_index(1);
model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp");
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), INTERNAL_ERROR);

kernel_ex_def->clear_op_index();
kernel_ex_def->set_op_index(0);
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED);

kernel_ex_def->set_task_info("KernelEx");
kernel_ex_def->set_task_info_size(1);
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED);


constexpr uint32_t arg_size = sizeof(STR_FWK_OP_KERNEL);
string value1(arg_size, 'a');
kernel_ex_def->set_args_size(arg_size);
kernel_ex_def->set_args(value1);
OpDescPtr v_op_desc = CreateOpDesc("ge_global_step", "Variable");
model.variable_op_list_.push_back(v_op_desc);
model.op_list_[0]->SetWorkspace({100331008}); // offset
model.op_list_[0]->SetWorkspaceBytes({150}); // length
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED);


task_def.clear_kernel_ex();
}

// test kernel_ex_task_Release
TEST_F(UtestKernelExTaskInfo, success_kernel_ex_task_release) {
KernelExTaskInfo kernel_ex_task_info;
EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS);

kernel_ex_task_info.kernel_buf_ = nullptr;
rtMalloc(&kernel_ex_task_info.input_output_addr_, 64, RT_MEMORY_HBM);
EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS);

kernel_ex_task_info.input_output_addr_ = nullptr;
rtMalloc(&kernel_ex_task_info.kernel_buf_, 64, RT_MEMORY_HBM);
EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS);

rtMalloc(&kernel_ex_task_info.kernel_buf_, 64, RT_MEMORY_HBM);
rtMalloc(&kernel_ex_task_info.input_output_addr_, 64, RT_MEMORY_HBM);
EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS);
}

// test kernel_ex_task_Release
TEST_F(UtestKernelExTaskInfo, success_kernel_ex_task_info_copy) {
DavinciModel model(0, nullptr);
model.runtime_param_.mem_base = (uint8_t *)0x12345;
model.runtime_param_.mem_size = 100332000;

rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);

domi::TaskDef task_def;
KernelExTaskInfo kernel_ex_task_info;

domi::KernelExDef *kernel_ex_def = task_def.mutable_kernel_ex();
kernel_ex_def->set_task_info_size(150);
kernel_ex_def->set_op_index(0);
model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp");

EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); // workspace empty.

model.op_list_[0]->SetWorkspace({100331008}); // offset
model.op_list_[0]->SetWorkspaceBytes({0}); // length
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); // workspace addr is null.

model.op_list_[0]->SetWorkspace({100331008}); // offset
model.op_list_[0]->SetWorkspaceBytes({10}); // length
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); // workspace addr is small.

model.op_list_[0]->SetWorkspace({100331008}); // offset
model.op_list_[0]->SetWorkspaceBytes({150}); // length
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), SUCCESS);

task_def.clear_kernel_ex();
model.runtime_param_.mem_base = nullptr;
}

TEST_F(UtestKernelExTaskInfo, kernel_ex_task_info_calculate_args) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;
domi::KernelExDef *kernel_ex_def = task_def.mutable_kernel_ex();
kernel_ex_def->set_op_index(0);
model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp");

AttrUtils::SetStr(model.op_list_[0], ATTR_DYNAMIC_SHAPE_FIXED_ADDR, "Hello Mr Tree");

KernelExTaskInfo kernel_ex_task_info;
EXPECT_EQ(kernel_task_info.CalculateArgs(task_def, &model), SUCCESS);
}

} // namespace ge

+ 1203
- 0
tests/ut/ge/graph/load/kernel_task_info_unittest.cc
File diff suppressed because it is too large
View File


+ 138
- 0
tests/ut/ge/graph/load/memcpy_addr_async_task_info_unittest.cc View File

@@ -0,0 +1,138 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>

#define private public
#define protected public

#include "graph/load/new_model_manager/davinci_model.h"
#include "graph/load/new_model_manager/task_info/memcpy_addr_async_task_info.h"

namespace ge {
class UtestMemcpyAddrAsyncTaskInfo : public testing::Test {
protected:
void SetUp() {}

void TearDown() {}
};

extern OpDescPtr CreateOpDesc(string name, string type);

TEST_F(UtestMemcpyAddrAsyncTaskInfo, success_memcpy_addr_async_task_init) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;
task_def.set_stream_id(0);

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(10);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(10);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_ADDR_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(6);

model.runtime_param_.logic_mem_base = 0x8003000;
model.runtime_param_.logic_weight_base = 0x8008000;
model.runtime_param_.logic_var_base = 0x800e000;
model.runtime_param_.mem_size = 0x5000;
model.runtime_param_.weight_size = 0x6000;
model.runtime_param_.var_size = 0x1000;

// DavinciModel is null
MemcpyAddrAsyncTaskInfo memcpy_addr_async_task_info;
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, nullptr), INTERNAL_ERROR);

// SetStream failed.
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), FAILED);

// GetOpByIndex src failed
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), INTERNAL_ERROR);

// GetRuntimeAddress src failed.
model.op_list_[6] = CreateOpDesc("memcpyaddrasync", MEMCPYADDRASYNC);
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), PARAM_INVALID);

// GetRuntimeAddress dst failed.
memcpy_async->set_src(0x08003000);
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), PARAM_INVALID);

memcpy_async->set_dst(0x08008000);
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), SUCCESS);

task_def.clear_memcpy_async();
}

TEST_F(UtestMemcpyAddrAsyncTaskInfo, success_memcpy_async_task_init_failed) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;

task_def.set_stream_id(0);
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(10);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(10);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_ADDR_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(6);

model.runtime_param_.logic_mem_base = 0x8003000;
model.runtime_param_.logic_weight_base = 0x8008000;
model.runtime_param_.logic_var_base = 0x800e000;
model.runtime_param_.mem_size = 0x5000;
model.runtime_param_.weight_size = 0x6000;
model.runtime_param_.var_size = 0x1000;


GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
model.op_list_[6] = CreateOpDesc("memcpyasync", MEMCPYADDRASYNC);
model.op_list_[6]->AddInputDesc(tensor);
model.op_list_[6]->AddOutputDesc(tensor);
model.op_list_[6]->SetInputOffset({1024});
model.op_list_[6]->SetOutputOffset({5120});

// DavinciModel is null
MemcpyAddrAsyncTaskInfo memcpy_addr_async_task_info;
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), SUCCESS);

task_def.clear_memcpy_async();
}

TEST_F(UtestMemcpyAddrAsyncTaskInfo, success_memcpy_async_calculate_args) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(0x08003000);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(0x08008000);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(0);

// DavinciModel is null
MemcpyAddrAsyncTaskInfo memcpy_addr_async_task_info;
EXPECT_EQ(memcpy_addr_async_task_info.CalculateArgs(task_def, &model), SUCCESS);
}

} // namespace ge

+ 269
- 0
tests/ut/ge/graph/load/memcpy_async_task_info_unittest.cc View File

@@ -0,0 +1,269 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>

#define private public
#define protected public

#include "graph/load/new_model_manager/davinci_model.h"
#include "graph/load/new_model_manager/task_info/memcpy_async_task_info.h"


namespace ge {
class UtestMemcpyAsyncTaskInfo : public testing::Test {
protected:
void SetUp() {}

void TearDown() {}
};

OpDescPtr CreateOpDesc(string name = "", string type = "") {
auto op_desc = std::make_shared<OpDesc>(name, type);
op_desc->SetStreamId(0);
op_desc->SetId(0);

AttrUtils::SetFloat(op_desc, ATTR_NAME_ALPHA, 0);
AttrUtils::SetFloat(op_desc, ATTR_NAME_BETA, 0);

op_desc->SetWorkspace({});
op_desc->SetWorkspaceBytes({});
op_desc->SetInputOffset({});
op_desc->SetOutputOffset({});

AttrUtils::SetListStr(op_desc, ATTR_NAME_WEIGHT_NAME, {});
AttrUtils::SetInt(op_desc, POOLING_ATTR_MODE, 0);
AttrUtils::SetInt(op_desc, POOLING_ATTR_PAD_MODE, 0);
AttrUtils::SetInt(op_desc, POOLING_ATTR_DATA_MODE, 0);
AttrUtils::SetInt(op_desc, POOLING_ATTR_CEIL_MODE, 0);
AttrUtils::SetInt(op_desc, POOLING_ATTR_NAN_OPT, 0);
AttrUtils::SetListInt(op_desc, POOLING_ATTR_WINDOW, {});
AttrUtils::SetListInt(op_desc, POOLING_ATTR_PAD, {});
AttrUtils::SetListInt(op_desc, POOLING_ATTR_STRIDE, {});
AttrUtils::SetListInt(op_desc, ATTR_NAME_ACTIVE_STREAM_LIST, {1, 1});
AttrUtils::SetInt(op_desc, ATTR_NAME_STREAM_SWITCH_COND, 0);
return op_desc;
}

TEST_F(UtestMemcpyAsyncTaskInfo, success_memcpy_async_task_init) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;
task_def.set_stream_id(0);

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(10);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(10);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(6);

model.runtime_param_.logic_mem_base = 0x8003000;
model.runtime_param_.logic_weight_base = 0x8008000;
model.runtime_param_.logic_var_base = 0x800e000;
model.runtime_param_.mem_size = 0x5000;
model.runtime_param_.weight_size = 0x6000;
model.runtime_param_.var_size = 0x1000;

// GetOpByIndex src failed
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), INTERNAL_ERROR);

model.op_list_[6] = CreateOpDesc("memcpyasync", MEMCPYASYNC);
memcpy_async->set_src(0x08008000);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID);

// set OpDesc attr
std::vector<int64_t> memory_type = { RT_MEMORY_TS_4G };
AttrUtils::SetListInt(model.op_list_[6], ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memory_type);
GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
model.op_list_[6]->AddInputDesc(tensor);
model.op_list_[6]->AddOutputDesc(tensor);
memcpy_async->set_dst_max(0);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), FAILED);

memcpy_async->set_dst_max(0);
model.op_list_[6]->SetInputOffset({1024});
model.op_list_[6]->SetOutputOffset({5120});
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID);


task_def.clear_memcpy_async();
}

TEST_F(UtestMemcpyAsyncTaskInfo, success_memcpy_async_task_init_failed) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;
task_def.set_stream_id(0);

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(10);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(10);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(6);

model.runtime_param_.logic_mem_base = 0x8003000;
model.runtime_param_.logic_weight_base = 0x8008000;
model.runtime_param_.logic_var_base = 0x800e000;
model.runtime_param_.mem_size = 0x5000;
model.runtime_param_.weight_size = 0x6000;
model.runtime_param_.var_size = 0x1000;


// DavinciModel is null
MemcpyAsyncTaskInfo memcpy_async_task_info;
EXPECT_EQ(memcpy_async_task_info.Init(task_def, nullptr), PARAM_INVALID);

// SetStream failed
EXPECT_EQ(memcpy_async_task_info.Init(task_def, nullptr), FAILED);

// GetOpByIndex failed
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), INTERNAL_ERROR);

model.op_list_[6] = CreateOpDesc("memcpyasync", MEMCPYASYNC);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID);
memcpy_async->set_src(0x08008000);

EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID);
memcpy_async->set_dst(0x08003000);

// set OpDesc attr
std::vector<int64_t> memory_type = { RT_MEMORY_TS_4G };
AttrUtils::SetListInt(model.op_list_[6], ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memory_type);
memcpy_async->set_dst_max(0);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, nullptr), FAILED);
memcpy_async->set_dst_max(512);


GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
model.op_list_[6]->AddInputDesc(tensor);
model.op_list_[6]->AddOutputDesc(tensor);
model.op_list_[6]->SetInputOffset({1024});
model.op_list_[6]->SetOutputOffset({5120});
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), SUCCESS);

memcpy_async->set_dst(0x08009000);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), SUCCESS);

task_def.clear_memcpy_async();
}

TEST_F(UtestMemcpyAsyncTaskInfo, success_memcpy_async_task_init) {
DavinciModel model(0, nullptr);
model.SetKnownNode(trues);
domi::TaskDef task_def;
task_def.set_stream_id(0);

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(10);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(10);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(6);

model.runtime_param_.logic_mem_base = 0x8003000;
model.runtime_param_.logic_weight_base = 0x8008000;
model.runtime_param_.logic_var_base = 0x800e000;
model.runtime_param_.mem_size = 0x5000;
model.runtime_param_.weight_size = 0x6000;
model.runtime_param_.var_size = 0x1000;

// GetOpByIndex src failed
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), INTERNAL_ERROR);

model.op_list_[6] = CreateOpDesc("memcpyasync", MEMCPYASYNC);
memcpy_async->set_src(0x08008000);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID);

// set OpDesc attr
AttrUtils::SetStr(model.op_list_[6], ATTR_DYNAMIC_SHAPE_FIXED_ADDR, "Hello Mr Tree");
GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
model.op_list_[6]->AddInputDesc(tensor);
model.op_list_[6]->AddOutputDesc(tensor);
memcpy_async->set_dst_max(0);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), SUCCESS);

memcpy_async->set_dst_max(0);
model.op_list_[6]->SetInputOffset({1024});
model.op_list_[6]->SetOutputOffset({5120});
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), SUCCESS);


task_def.clear_memcpy_async();
}

TEST_F(UtestMemcpyAsyncTaskInfo, success_distribute) {
DavinciModel model(0, nullptr);
model.ge_model_ = MakeShared<GeModel>();

auto model_task_def = MakeShared<domi::ModelTaskDef>();
domi::TaskDef *task_def = model_task_def->add_task();
task_def->set_type(RT_MODEL_TASK_MEMCPY_ASYNC);
domi::KernelDef *kernel_def = task_def->mutable_kernel();
domi::KernelContext *ctx = kernel_def->mutable_context();
ctx->set_op_index(0);
model.op_list_[0] = CreateOpDesc("memcpyasync", MEMCPYASYNC);
TaskInfoPtr task_info = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task_def->type()));

model.task_list_ = { task_info };
model.ge_model_->SetModelTaskDef(model_task_def);

EXPECT_EQ(model.DistributeTask(), SUCCESS);
EXPECT_EQ(task_info->Distribute(), SUCCESS);
task_info->Release();
}

TEST_F(UtestMemcpyAsyncTaskInfo, success_memcpy_async_calculate_args) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(0x08003000);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(0x08008000);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(0);

model.op_list_[0] = CreateOpDesc("memcpyasync", MEMCPYASYNC);
AttrUtils::SetStr(model.op_list_[0], ATTR_DYNAMIC_SHAPE_FIXED_ADDR, "Hello Mr Tree");

// DavinciModel is null
MemcpyAsyncTaskInfo memcpy_async_task_info;
EXPECT_EQ(memcpy_async_task_info.CalculateArgs(task_def, &model), SUCCESS);
}

TEST_F(UtestMemcpyAsyncTaskInfo, memcpy_async_update_args) {
DavinciModel model(0, nullptr);

MemcpyAsyncTaskInfo memcpy_async_task_info;
memcpy_async_task_info.davinci_model_ = &model;

EXPECT_EQ(memcpy_async_task_info.UpdateArgs(), SUCCESS);
}

} // namespace ge

Loading…
Cancel
Save