diff --git a/CMakeLists.txt b/CMakeLists.txt index 9c2075deec6344db3f56effc01ff4f62460ae20a..1d407edb35425aca609802df4672ecf6104bddbd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -261,6 +261,7 @@ if(MGE_WITH_CUDA) set(MGE_CUDA_LIBS "${MGE_CUDA_LIBS}") endif() + find_program(CCACHE_BIN ccache) if(CCACHE_BIN) set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_BIN}) diff --git a/dnn/src/CMakeLists.txt b/dnn/src/CMakeLists.txt index 2defb17e39c4e9eddaed2a0c7f6c2f0f6f3c6cc6..f0cbbe7994e9f54d06e613e6535f49e128695021 100644 --- a/dnn/src/CMakeLists.txt +++ b/dnn/src/CMakeLists.txt @@ -56,4 +56,3 @@ target_link_libraries(megdnn ${MGE_BLAS_LIBS}) if(CMAKE_THREAD_LIBS_INIT) target_link_libraries(megdnn Threads::Threads) endif() - diff --git a/dnn/src/common/megcore/common/device_context.cpp b/dnn/src/common/megcore/common/device_context.cpp index f66da9a79affcde2858b1df81600d7e78b51e5e7..ee4114c8141b4f4aa96e0aad17660e37b4a32dcd 100644 --- a/dnn/src/common/megcore/common/device_context.cpp +++ b/dnn/src/common/megcore/common/device_context.cpp @@ -16,7 +16,6 @@ #include "src/cuda/megcore/cuda_device_context.hpp" #endif - using namespace megcore; using namespace megdnn; diff --git a/dnn/test/CMakeLists.txt b/dnn/test/CMakeLists.txt index ddbba508aeca3b6c6b804eaf906f1250d3830f19..c1f40a4183700fdef779917e46508b95195193ec 100644 --- a/dnn/test/CMakeLists.txt +++ b/dnn/test/CMakeLists.txt @@ -26,6 +26,7 @@ if(MGE_WITH_CUDA) endif() + add_executable(megdnn_test ${SOURCES}) target_link_libraries(megdnn_test gtest) target_link_libraries(megdnn_test megdnn) diff --git a/python_module/test/run.sh b/python_module/test/run.sh index 4d821340471966a931b1b3345fb33e3b07badec3..c13066003372d719aeeeb804259ee8cf62869be6 100755 --- a/python_module/test/run.sh +++ b/python_module/test/run.sh @@ -9,5 +9,6 @@ pushd $(dirname "${BASH_SOURCE[0]}")/.. >/dev/null --ignore test/unit/data \ --ignore test/integration/manual \ --ignore megengine/module/pytorch \ + --ignore test/unit/module/test_external.py \ megengine test popd >/dev/null diff --git a/python_module/test/unit/module/.gitattributes b/python_module/test/unit/module/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..816f72d606d559356d757eea2817f579b9ebbeef --- /dev/null +++ b/python_module/test/unit/module/.gitattributes @@ -0,0 +1 @@ +*.mlu binary diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 7b7f05b79695d28853c97208acfbbf265514327b..221b071a33e22921f22c1ae7905d8d73c2315a3a 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -31,6 +31,7 @@ if(MGE_WITH_CUDA AND MGE_WITH_TRT) list(APPEND SOURCES ${SOURCES_}) endif() + set(MGB_DEF ${MGB_DEF} PARENT_SCOPE) add_library(megbrain STATIC EXCLUDE_FROM_ALL ${SOURCES}) target_link_libraries(megbrain mgb_opr_param_defs) diff --git a/src/core/impl/comp_node_env.cpp b/src/core/impl/comp_node_env.cpp index 993a83c2aca523ff06d2292e807bbff4df7889f0..9c8bfb899e46c2c266439c62e782f471608de012 100644 --- a/src/core/impl/comp_node_env.cpp +++ b/src/core/impl/comp_node_env.cpp @@ -22,7 +22,6 @@ #endif #endif - using namespace mgb; /* =================== MegDNNHandle =================== */ diff --git a/src/core/include/megbrain/comp_node/alloc.h b/src/core/include/megbrain/comp_node/alloc.h index 0c3c66e5d40445872572b32a2f54a908d9745adb..9d7e51f064331b486f5b13568cbeb43e4dc9678c 100644 --- a/src/core/include/megbrain/comp_node/alloc.h +++ b/src/core/include/megbrain/comp_node/alloc.h @@ -200,7 +200,6 @@ class DevMemAlloc: virtual public MemAllocBase { #endif - virtual ~DevMemAlloc() = default; /*! diff --git a/src/core/include/megbrain/comp_node_env.h b/src/core/include/megbrain/comp_node_env.h index 1c0b37cb65731f8206c287491086de19f9d58932..fd0e846c7c560a38f140ece0b4fdd1e762a6c2b1 100644 --- a/src/core/include/megbrain/comp_node_env.h +++ b/src/core/include/megbrain/comp_node_env.h @@ -41,7 +41,7 @@ } \ } while (0) -#endif //MGB_ENABLE_LOGGING +#endif // MGB_ENABLE_LOGGING #endif diff --git a/src/core/include/megbrain_build_config.h.in b/src/core/include/megbrain_build_config.h.in index aa5612b9b0cabcf4e7018ee99a3e9823d4eefeaf..544848423acd48ae200f1332198d7d5a57d3d633 100644 --- a/src/core/include/megbrain_build_config.h.in +++ b/src/core/include/megbrain_build_config.h.in @@ -97,6 +97,10 @@ #endif +#ifndef MGB_CAMBRICON +#define MGB_CAMBRICON 0 +#endif + // whether to enable TensorRT support #ifndef MGB_ENABLE_TENSOR_RT #define MGB_ENABLE_TENSOR_RT MGB_CUDA diff --git a/src/core/test/comp_node.cpp b/src/core/test/comp_node.cpp index b4559a62fc4243d0a3e3ac351f6adb7f98fce277..465ca6c8fa4ed14701cebfbac4231b67cf4d7fd3 100644 --- a/src/core/test/comp_node.cpp +++ b/src/core/test/comp_node.cpp @@ -49,7 +49,8 @@ TEST(TestCompNode, Parse) { ASSERT_EQ(L::parse("cpu2:23"), make_lc(D::CPU, 2, 23)); ASSERT_EQ(L::parse("cpu21:23"), make_lc(D::CPU, 21, 23)); - ASSERT_EQ(L::parse("xpu"), make_lc(D::UNSPEC, -1, 0)); + + ASSERT_EQ(L::parse("xpu"), make_lc(D::UNSPEC, -1, 0)); ASSERT_EQ(L::parse("xpux"), make_lc(D::UNSPEC, -1, 0)); ASSERT_EQ(L::parse("xpu23"), make_lc(D::UNSPEC, 23, 0)); ASSERT_EQ(L::parse("xpu23:1"), make_lc(D::UNSPEC, 23, 1)); @@ -70,6 +71,7 @@ TEST(TestCompNode, Parse) { ASSERT_THROW(L::parse("cpu2:23x"), MegBrainError); ASSERT_THROW(L::parse("heaxgon0"), MegBrainError); ASSERT_THROW(L::parse("rcom0"), MegBrainError); + ASSERT_THROW(L::parse("cmabricon0"), MegBrainError); } TEST(TestCompNode, SetDefaultDev) { @@ -546,6 +548,7 @@ TEST(TestCompNode, MultipleLoad) { } } + namespace { class CompNodeDepedentObjectInst final : public CompNodeDepedentObject { int *m_dst, *m_timer; diff --git a/src/core/test/mem_alloc.cpp b/src/core/test/mem_alloc.cpp index ba8ad6714a3cec2fd9001bedb31e8192c8c92013..83b07ae39a19a0ebe0b724c25ac1c5e923cbb520 100644 --- a/src/core/test/mem_alloc.cpp +++ b/src/core/test/mem_alloc.cpp @@ -464,6 +464,7 @@ public: } void raw_dev_free(void* ptr) override { MGB_CUDA_CHECK(cudaFree(ptr)); } }; +#endif using Callback = std::function; void test_free_mem(CompNode cn0, CompNode cn1, DevicePolicy* policy, @@ -529,7 +530,7 @@ void test_gather_other(CompNode cn0, CompNode cn1) { opr::Sleep::sleep(cn1, 0.7); func->execute(); } -#endif + } // namespace #if MGB_CUDA @@ -562,4 +563,5 @@ TEST(TestCudaMemAlloc, FreeMem) { } #endif // MGB_CUDA + // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} diff --git a/test/src/helper.cpp b/test/src/helper.cpp index d88429cdb4b2fd55023b802cf4ca37f04fc58953..d8acb57487967bf1d383df133d681950b676b9e3 100644 --- a/test/src/helper.cpp +++ b/test/src/helper.cpp @@ -70,8 +70,8 @@ dtype, RandomDistribution::GAUSSIAN>::operator ()( auto ptr = ret->ptr(); auto mean = m_mean, std = m_std; for (size_t i = 0, it = shape.total_nr_elems(); i < it; i += 2) { - ctype u1 = (m_rng() + 1.0) / (m_rng.max() + 1.0), - u2 = (m_rng() + 1.0) / (m_rng.max() + 1.0), + ctype u1 = ctype((m_rng() + 1.0) / (m_rng.max() + 1.0)), + u2 = ctype((m_rng() + 1.0) / (m_rng.max() + 1.0)), r = ctype(std * std::sqrt(-2 * std::log(u1))), theta = ctype(2 * M_PI * u2), z0 = ctype(r * std::cos(theta) + mean), @@ -104,6 +104,8 @@ namespace mgb { dtype::Float32, RandomDistribution::GAUSSIAN>; template class HostTensorGenerator< dtype::Float32, RandomDistribution::UNIFORM>; + template class HostTensorGenerator< + dtype::Float16, RandomDistribution::GAUSSIAN>; template class HostTensorGenerator< dtype::Int8, RandomDistribution::UNIFORM>; template class HostTensorGenerator< diff --git a/test/src/include/megbrain/test/helper.h b/test/src/include/megbrain/test/helper.h index d8b5fcfb4868021d2918384f6b862a4d998a18ea..eaea3e2efb22254a19311ca18adf774c77d09289 100644 --- a/test/src/include/megbrain/test/helper.h +++ b/test/src/include/megbrain/test/helper.h @@ -400,6 +400,9 @@ bool check_gpu_available(size_t num); //! check whether given number of AMD GPUs is available bool check_amd_gpu_available(size_t num); +//! check whether given number of cambricon devices is available +bool check_cambricon_device_available(size_t num); + //! check current capability >= major.minor bool check_compute_capability(int major, int minor); @@ -436,6 +439,7 @@ public: return; \ } while(0) + #if MGB_HAVE_THREAD #define REQUIRE_THREAD() #else