From 712b87c8c1f704ad0c392d647fa89e1fe71d7b9a Mon Sep 17 00:00:00 2001 From: Megvii Engine Team Date: Mon, 17 Feb 2020 13:22:06 +0800 Subject: [PATCH] feat(mgb/core): add comp node for cambricon add testcase for cambricon comp node GitOrigin-RevId: 7794faa47ffbbd67521fcac2838d46a38c4bfe12 --- CMakeLists.txt | 1 + dnn/src/CMakeLists.txt | 1 - dnn/src/common/megcore/common/device_context.cpp | 1 - dnn/test/CMakeLists.txt | 1 + python_module/test/run.sh | 1 + python_module/test/unit/module/.gitattributes | 1 + src/CMakeLists.txt | 1 + src/core/impl/comp_node_env.cpp | 1 - src/core/include/megbrain/comp_node/alloc.h | 1 - src/core/include/megbrain/comp_node_env.h | 2 +- src/core/include/megbrain_build_config.h.in | 4 ++++ src/core/test/comp_node.cpp | 5 ++++- src/core/test/mem_alloc.cpp | 4 +++- test/src/helper.cpp | 6 ++++-- test/src/include/megbrain/test/helper.h | 4 ++++ 15 files changed, 25 insertions(+), 9 deletions(-) create mode 100644 python_module/test/unit/module/.gitattributes diff --git a/CMakeLists.txt b/CMakeLists.txt index 9c2075de..1d407edb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -261,6 +261,7 @@ if(MGE_WITH_CUDA) set(MGE_CUDA_LIBS "${MGE_CUDA_LIBS}") endif() + find_program(CCACHE_BIN ccache) if(CCACHE_BIN) set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_BIN}) diff --git a/dnn/src/CMakeLists.txt b/dnn/src/CMakeLists.txt index 2defb17e..f0cbbe79 100644 --- a/dnn/src/CMakeLists.txt +++ b/dnn/src/CMakeLists.txt @@ -56,4 +56,3 @@ target_link_libraries(megdnn ${MGE_BLAS_LIBS}) if(CMAKE_THREAD_LIBS_INIT) target_link_libraries(megdnn Threads::Threads) endif() - diff --git a/dnn/src/common/megcore/common/device_context.cpp b/dnn/src/common/megcore/common/device_context.cpp index f66da9a7..ee4114c8 100644 --- a/dnn/src/common/megcore/common/device_context.cpp +++ b/dnn/src/common/megcore/common/device_context.cpp @@ -16,7 +16,6 @@ #include "src/cuda/megcore/cuda_device_context.hpp" #endif - using namespace megcore; using namespace megdnn; diff --git a/dnn/test/CMakeLists.txt b/dnn/test/CMakeLists.txt index ddbba508..c1f40a41 100644 --- a/dnn/test/CMakeLists.txt +++ b/dnn/test/CMakeLists.txt @@ -26,6 +26,7 @@ if(MGE_WITH_CUDA) endif() + add_executable(megdnn_test ${SOURCES}) target_link_libraries(megdnn_test gtest) target_link_libraries(megdnn_test megdnn) diff --git a/python_module/test/run.sh b/python_module/test/run.sh index 4d821340..c1306600 100755 --- a/python_module/test/run.sh +++ b/python_module/test/run.sh @@ -9,5 +9,6 @@ pushd $(dirname "${BASH_SOURCE[0]}")/.. >/dev/null --ignore test/unit/data \ --ignore test/integration/manual \ --ignore megengine/module/pytorch \ + --ignore test/unit/module/test_external.py \ megengine test popd >/dev/null diff --git a/python_module/test/unit/module/.gitattributes b/python_module/test/unit/module/.gitattributes new file mode 100644 index 00000000..816f72d6 --- /dev/null +++ b/python_module/test/unit/module/.gitattributes @@ -0,0 +1 @@ +*.mlu binary diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 7b7f05b7..221b071a 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -31,6 +31,7 @@ if(MGE_WITH_CUDA AND MGE_WITH_TRT) list(APPEND SOURCES ${SOURCES_}) endif() + set(MGB_DEF ${MGB_DEF} PARENT_SCOPE) add_library(megbrain STATIC EXCLUDE_FROM_ALL ${SOURCES}) target_link_libraries(megbrain mgb_opr_param_defs) diff --git a/src/core/impl/comp_node_env.cpp b/src/core/impl/comp_node_env.cpp index 993a83c2..9c8bfb89 100644 --- a/src/core/impl/comp_node_env.cpp +++ b/src/core/impl/comp_node_env.cpp @@ -22,7 +22,6 @@ #endif #endif - using namespace mgb; /* =================== MegDNNHandle =================== */ diff --git a/src/core/include/megbrain/comp_node/alloc.h b/src/core/include/megbrain/comp_node/alloc.h index 0c3c66e5..9d7e51f0 100644 --- a/src/core/include/megbrain/comp_node/alloc.h +++ b/src/core/include/megbrain/comp_node/alloc.h @@ -200,7 +200,6 @@ class DevMemAlloc: virtual public MemAllocBase { #endif - virtual ~DevMemAlloc() = default; /*! diff --git a/src/core/include/megbrain/comp_node_env.h b/src/core/include/megbrain/comp_node_env.h index 1c0b37cb..fd0e846c 100644 --- a/src/core/include/megbrain/comp_node_env.h +++ b/src/core/include/megbrain/comp_node_env.h @@ -41,7 +41,7 @@ } \ } while (0) -#endif //MGB_ENABLE_LOGGING +#endif // MGB_ENABLE_LOGGING #endif diff --git a/src/core/include/megbrain_build_config.h.in b/src/core/include/megbrain_build_config.h.in index aa5612b9..54484842 100644 --- a/src/core/include/megbrain_build_config.h.in +++ b/src/core/include/megbrain_build_config.h.in @@ -97,6 +97,10 @@ #endif +#ifndef MGB_CAMBRICON +#define MGB_CAMBRICON 0 +#endif + // whether to enable TensorRT support #ifndef MGB_ENABLE_TENSOR_RT #define MGB_ENABLE_TENSOR_RT MGB_CUDA diff --git a/src/core/test/comp_node.cpp b/src/core/test/comp_node.cpp index b4559a62..465ca6c8 100644 --- a/src/core/test/comp_node.cpp +++ b/src/core/test/comp_node.cpp @@ -49,7 +49,8 @@ TEST(TestCompNode, Parse) { ASSERT_EQ(L::parse("cpu2:23"), make_lc(D::CPU, 2, 23)); ASSERT_EQ(L::parse("cpu21:23"), make_lc(D::CPU, 21, 23)); - ASSERT_EQ(L::parse("xpu"), make_lc(D::UNSPEC, -1, 0)); + + ASSERT_EQ(L::parse("xpu"), make_lc(D::UNSPEC, -1, 0)); ASSERT_EQ(L::parse("xpux"), make_lc(D::UNSPEC, -1, 0)); ASSERT_EQ(L::parse("xpu23"), make_lc(D::UNSPEC, 23, 0)); ASSERT_EQ(L::parse("xpu23:1"), make_lc(D::UNSPEC, 23, 1)); @@ -70,6 +71,7 @@ TEST(TestCompNode, Parse) { ASSERT_THROW(L::parse("cpu2:23x"), MegBrainError); ASSERT_THROW(L::parse("heaxgon0"), MegBrainError); ASSERT_THROW(L::parse("rcom0"), MegBrainError); + ASSERT_THROW(L::parse("cmabricon0"), MegBrainError); } TEST(TestCompNode, SetDefaultDev) { @@ -546,6 +548,7 @@ TEST(TestCompNode, MultipleLoad) { } } + namespace { class CompNodeDepedentObjectInst final : public CompNodeDepedentObject { int *m_dst, *m_timer; diff --git a/src/core/test/mem_alloc.cpp b/src/core/test/mem_alloc.cpp index ba8ad671..83b07ae3 100644 --- a/src/core/test/mem_alloc.cpp +++ b/src/core/test/mem_alloc.cpp @@ -464,6 +464,7 @@ public: } void raw_dev_free(void* ptr) override { MGB_CUDA_CHECK(cudaFree(ptr)); } }; +#endif using Callback = std::function; void test_free_mem(CompNode cn0, CompNode cn1, DevicePolicy* policy, @@ -529,7 +530,7 @@ void test_gather_other(CompNode cn0, CompNode cn1) { opr::Sleep::sleep(cn1, 0.7); func->execute(); } -#endif + } // namespace #if MGB_CUDA @@ -562,4 +563,5 @@ TEST(TestCudaMemAlloc, FreeMem) { } #endif // MGB_CUDA + // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} diff --git a/test/src/helper.cpp b/test/src/helper.cpp index d88429cd..d8acb574 100644 --- a/test/src/helper.cpp +++ b/test/src/helper.cpp @@ -70,8 +70,8 @@ dtype, RandomDistribution::GAUSSIAN>::operator ()( auto ptr = ret->ptr(); auto mean = m_mean, std = m_std; for (size_t i = 0, it = shape.total_nr_elems(); i < it; i += 2) { - ctype u1 = (m_rng() + 1.0) / (m_rng.max() + 1.0), - u2 = (m_rng() + 1.0) / (m_rng.max() + 1.0), + ctype u1 = ctype((m_rng() + 1.0) / (m_rng.max() + 1.0)), + u2 = ctype((m_rng() + 1.0) / (m_rng.max() + 1.0)), r = ctype(std * std::sqrt(-2 * std::log(u1))), theta = ctype(2 * M_PI * u2), z0 = ctype(r * std::cos(theta) + mean), @@ -104,6 +104,8 @@ namespace mgb { dtype::Float32, RandomDistribution::GAUSSIAN>; template class HostTensorGenerator< dtype::Float32, RandomDistribution::UNIFORM>; + template class HostTensorGenerator< + dtype::Float16, RandomDistribution::GAUSSIAN>; template class HostTensorGenerator< dtype::Int8, RandomDistribution::UNIFORM>; template class HostTensorGenerator< diff --git a/test/src/include/megbrain/test/helper.h b/test/src/include/megbrain/test/helper.h index d8b5fcfb..eaea3e2e 100644 --- a/test/src/include/megbrain/test/helper.h +++ b/test/src/include/megbrain/test/helper.h @@ -400,6 +400,9 @@ bool check_gpu_available(size_t num); //! check whether given number of AMD GPUs is available bool check_amd_gpu_available(size_t num); +//! check whether given number of cambricon devices is available +bool check_cambricon_device_available(size_t num); + //! check current capability >= major.minor bool check_compute_capability(int major, int minor); @@ -436,6 +439,7 @@ public: return; \ } while(0) + #if MGB_HAVE_THREAD #define REQUIRE_THREAD() #else -- GitLab