Browse Source

feat(mgb/core): add comp node for cambricon

add testcase for cambricon comp node

GitOrigin-RevId: 7794faa47f
tags/v0.4.0
Megvii Engine Team Xinran Xu 5 years ago
parent
commit
712b87c8c1
15 changed files with 25 additions and 9 deletions
  1. +1
    -0
      CMakeLists.txt
  2. +0
    -1
      dnn/src/CMakeLists.txt
  3. +0
    -1
      dnn/src/common/megcore/common/device_context.cpp
  4. +1
    -0
      dnn/test/CMakeLists.txt
  5. +1
    -0
      python_module/test/run.sh
  6. +1
    -0
      python_module/test/unit/module/.gitattributes
  7. +1
    -0
      src/CMakeLists.txt
  8. +0
    -1
      src/core/impl/comp_node_env.cpp
  9. +0
    -1
      src/core/include/megbrain/comp_node/alloc.h
  10. +1
    -1
      src/core/include/megbrain/comp_node_env.h
  11. +4
    -0
      src/core/include/megbrain_build_config.h.in
  12. +4
    -1
      src/core/test/comp_node.cpp
  13. +3
    -1
      src/core/test/mem_alloc.cpp
  14. +4
    -2
      test/src/helper.cpp
  15. +4
    -0
      test/src/include/megbrain/test/helper.h

+ 1
- 0
CMakeLists.txt View File

@@ -261,6 +261,7 @@ if(MGE_WITH_CUDA)
set(MGE_CUDA_LIBS "${MGE_CUDA_LIBS}") set(MGE_CUDA_LIBS "${MGE_CUDA_LIBS}")
endif() endif()



find_program(CCACHE_BIN ccache) find_program(CCACHE_BIN ccache)
if(CCACHE_BIN) if(CCACHE_BIN)
set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_BIN}) set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_BIN})


+ 0
- 1
dnn/src/CMakeLists.txt View File

@@ -56,4 +56,3 @@ target_link_libraries(megdnn ${MGE_BLAS_LIBS})
if(CMAKE_THREAD_LIBS_INIT) if(CMAKE_THREAD_LIBS_INIT)
target_link_libraries(megdnn Threads::Threads) target_link_libraries(megdnn Threads::Threads)
endif() endif()


+ 0
- 1
dnn/src/common/megcore/common/device_context.cpp View File

@@ -16,7 +16,6 @@
#include "src/cuda/megcore/cuda_device_context.hpp" #include "src/cuda/megcore/cuda_device_context.hpp"
#endif #endif



using namespace megcore; using namespace megcore;
using namespace megdnn; using namespace megdnn;




+ 1
- 0
dnn/test/CMakeLists.txt View File

@@ -26,6 +26,7 @@ if(MGE_WITH_CUDA)
endif() endif()





add_executable(megdnn_test ${SOURCES}) add_executable(megdnn_test ${SOURCES})
target_link_libraries(megdnn_test gtest) target_link_libraries(megdnn_test gtest)
target_link_libraries(megdnn_test megdnn) target_link_libraries(megdnn_test megdnn)


+ 1
- 0
python_module/test/run.sh View File

@@ -9,5 +9,6 @@ pushd $(dirname "${BASH_SOURCE[0]}")/.. >/dev/null
--ignore test/unit/data \ --ignore test/unit/data \
--ignore test/integration/manual \ --ignore test/integration/manual \
--ignore megengine/module/pytorch \ --ignore megengine/module/pytorch \
--ignore test/unit/module/test_external.py \
megengine test megengine test
popd >/dev/null popd >/dev/null

+ 1
- 0
python_module/test/unit/module/.gitattributes View File

@@ -0,0 +1 @@
*.mlu binary

+ 1
- 0
src/CMakeLists.txt View File

@@ -31,6 +31,7 @@ if(MGE_WITH_CUDA AND MGE_WITH_TRT)
list(APPEND SOURCES ${SOURCES_}) list(APPEND SOURCES ${SOURCES_})
endif() endif()



set(MGB_DEF ${MGB_DEF} PARENT_SCOPE) set(MGB_DEF ${MGB_DEF} PARENT_SCOPE)
add_library(megbrain STATIC EXCLUDE_FROM_ALL ${SOURCES}) add_library(megbrain STATIC EXCLUDE_FROM_ALL ${SOURCES})
target_link_libraries(megbrain mgb_opr_param_defs) target_link_libraries(megbrain mgb_opr_param_defs)


+ 0
- 1
src/core/impl/comp_node_env.cpp View File

@@ -22,7 +22,6 @@
#endif #endif
#endif #endif



using namespace mgb; using namespace mgb;


/* =================== MegDNNHandle =================== */ /* =================== MegDNNHandle =================== */


+ 0
- 1
src/core/include/megbrain/comp_node/alloc.h View File

@@ -200,7 +200,6 @@ class DevMemAlloc: virtual public MemAllocBase {
#endif #endif





virtual ~DevMemAlloc() = default; virtual ~DevMemAlloc() = default;


/*! /*!


+ 1
- 1
src/core/include/megbrain/comp_node_env.h View File

@@ -41,7 +41,7 @@
} \ } \
} while (0) } while (0)


#endif //MGB_ENABLE_LOGGING
#endif // MGB_ENABLE_LOGGING


#endif #endif




+ 4
- 0
src/core/include/megbrain_build_config.h.in View File

@@ -97,6 +97,10 @@
#endif #endif




#ifndef MGB_CAMBRICON
#define MGB_CAMBRICON 0
#endif

// whether to enable TensorRT support // whether to enable TensorRT support
#ifndef MGB_ENABLE_TENSOR_RT #ifndef MGB_ENABLE_TENSOR_RT
#define MGB_ENABLE_TENSOR_RT MGB_CUDA #define MGB_ENABLE_TENSOR_RT MGB_CUDA


+ 4
- 1
src/core/test/comp_node.cpp View File

@@ -49,7 +49,8 @@ TEST(TestCompNode, Parse) {
ASSERT_EQ(L::parse("cpu2:23"), make_lc(D::CPU, 2, 23)); ASSERT_EQ(L::parse("cpu2:23"), make_lc(D::CPU, 2, 23));
ASSERT_EQ(L::parse("cpu21:23"), make_lc(D::CPU, 21, 23)); ASSERT_EQ(L::parse("cpu21:23"), make_lc(D::CPU, 21, 23));


ASSERT_EQ(L::parse("xpu"), make_lc(D::UNSPEC, -1, 0));
ASSERT_EQ(L::parse("xpu"), make_lc(D::UNSPEC, -1, 0));
ASSERT_EQ(L::parse("xpux"), make_lc(D::UNSPEC, -1, 0)); ASSERT_EQ(L::parse("xpux"), make_lc(D::UNSPEC, -1, 0));
ASSERT_EQ(L::parse("xpu23"), make_lc(D::UNSPEC, 23, 0)); ASSERT_EQ(L::parse("xpu23"), make_lc(D::UNSPEC, 23, 0));
ASSERT_EQ(L::parse("xpu23:1"), make_lc(D::UNSPEC, 23, 1)); ASSERT_EQ(L::parse("xpu23:1"), make_lc(D::UNSPEC, 23, 1));
@@ -70,6 +71,7 @@ TEST(TestCompNode, Parse) {
ASSERT_THROW(L::parse("cpu2:23x"), MegBrainError); ASSERT_THROW(L::parse("cpu2:23x"), MegBrainError);
ASSERT_THROW(L::parse("heaxgon0"), MegBrainError); ASSERT_THROW(L::parse("heaxgon0"), MegBrainError);
ASSERT_THROW(L::parse("rcom0"), MegBrainError); ASSERT_THROW(L::parse("rcom0"), MegBrainError);
ASSERT_THROW(L::parse("cmabricon0"), MegBrainError);
} }


TEST(TestCompNode, SetDefaultDev) { TEST(TestCompNode, SetDefaultDev) {
@@ -546,6 +548,7 @@ TEST(TestCompNode, MultipleLoad) {
} }
} }



namespace { namespace {
class CompNodeDepedentObjectInst final : public CompNodeDepedentObject { class CompNodeDepedentObjectInst final : public CompNodeDepedentObject {
int *m_dst, *m_timer; int *m_dst, *m_timer;


+ 3
- 1
src/core/test/mem_alloc.cpp View File

@@ -464,6 +464,7 @@ public:
} }
void raw_dev_free(void* ptr) override { MGB_CUDA_CHECK(cudaFree(ptr)); } void raw_dev_free(void* ptr) override { MGB_CUDA_CHECK(cudaFree(ptr)); }
}; };
#endif


using Callback = std::function<void()>; using Callback = std::function<void()>;
void test_free_mem(CompNode cn0, CompNode cn1, DevicePolicy* policy, void test_free_mem(CompNode cn0, CompNode cn1, DevicePolicy* policy,
@@ -529,7 +530,7 @@ void test_gather_other(CompNode cn0, CompNode cn1) {
opr::Sleep::sleep(cn1, 0.7); opr::Sleep::sleep(cn1, 0.7);
func->execute(); func->execute();
} }
#endif
} // namespace } // namespace


#if MGB_CUDA #if MGB_CUDA
@@ -562,4 +563,5 @@ TEST(TestCudaMemAlloc, FreeMem) {
} }
#endif // MGB_CUDA #endif // MGB_CUDA



// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}

+ 4
- 2
test/src/helper.cpp View File

@@ -70,8 +70,8 @@ dtype, RandomDistribution::GAUSSIAN>::operator ()(
auto ptr = ret->ptr<ctype>(); auto ptr = ret->ptr<ctype>();
auto mean = m_mean, std = m_std; auto mean = m_mean, std = m_std;
for (size_t i = 0, it = shape.total_nr_elems(); i < it; i += 2) { for (size_t i = 0, it = shape.total_nr_elems(); i < it; i += 2) {
ctype u1 = (m_rng() + 1.0) / (m_rng.max() + 1.0),
u2 = (m_rng() + 1.0) / (m_rng.max() + 1.0),
ctype u1 = ctype((m_rng() + 1.0) / (m_rng.max() + 1.0)),
u2 = ctype((m_rng() + 1.0) / (m_rng.max() + 1.0)),
r = ctype(std * std::sqrt(-2 * std::log(u1))), r = ctype(std * std::sqrt(-2 * std::log(u1))),
theta = ctype(2 * M_PI * u2), theta = ctype(2 * M_PI * u2),
z0 = ctype(r * std::cos(theta) + mean), z0 = ctype(r * std::cos(theta) + mean),
@@ -105,6 +105,8 @@ namespace mgb {
template class HostTensorGenerator< template class HostTensorGenerator<
dtype::Float32, RandomDistribution::UNIFORM>; dtype::Float32, RandomDistribution::UNIFORM>;
template class HostTensorGenerator< template class HostTensorGenerator<
dtype::Float16, RandomDistribution::GAUSSIAN>;
template class HostTensorGenerator<
dtype::Int8, RandomDistribution::UNIFORM>; dtype::Int8, RandomDistribution::UNIFORM>;
template class HostTensorGenerator< template class HostTensorGenerator<
dtype::Uint8, RandomDistribution::UNIFORM>; dtype::Uint8, RandomDistribution::UNIFORM>;


+ 4
- 0
test/src/include/megbrain/test/helper.h View File

@@ -400,6 +400,9 @@ bool check_gpu_available(size_t num);
//! check whether given number of AMD GPUs is available //! check whether given number of AMD GPUs is available
bool check_amd_gpu_available(size_t num); bool check_amd_gpu_available(size_t num);


//! check whether given number of cambricon devices is available
bool check_cambricon_device_available(size_t num);

//! check current capability >= major.minor //! check current capability >= major.minor
bool check_compute_capability(int major, int minor); bool check_compute_capability(int major, int minor);


@@ -436,6 +439,7 @@ public:
return; \ return; \
} while(0) } while(0)



#if MGB_HAVE_THREAD #if MGB_HAVE_THREAD
#define REQUIRE_THREAD() #define REQUIRE_THREAD()
#else #else


Loading…
Cancel
Save