diff --git a/CMakeLists.txt b/CMakeLists.txt index 83a48da4d270ad03515e13d9c78450c162719c0b..363bbe0735fe095a333ef10f2a03de232bc25658 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -295,7 +295,6 @@ if(MGE_WITH_TEST) endif() if(MGE_BUILD_IMPERATIVE_RT) - add_compile_definitions(MGB_ENABLE_IMPERATIVE_RUNTIME) set(CMAKE_CXX_STANDARD 17) endif() @@ -711,7 +710,6 @@ endif() set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MARCH}") -set(MGB_ENABLE_IMPERATIVE ${MGE_BUILD_IMPERATIVE_RT}) # Write out megbrain_build_config.h # It defines macros needed by both megbrain and dnn configure_file(src/megbrain_build_config.h.in ${CMAKE_CURRENT_BINARY_DIR}/genfiles/megbrain_build_config.h) diff --git a/src/core/impl/graph/cg_impl.h b/src/core/impl/graph/cg_impl.h index 4f7457dd04b68437849a5ee4c369d51e2c434be9..8c24785f04942fd6451c1491abb3e3468f15a408 100644 --- a/src/core/impl/graph/cg_impl.h +++ b/src/core/impl/graph/cg_impl.h @@ -125,9 +125,7 @@ public: template static ComputingGraphImpl* downcast(T* ptr) = delete; inline static ComputingGraphImpl* downcast(ComputingGraph* graph) { - #ifdef MGB_ENABLE_IMPERATIVE_RUNTIME mgb_assert(!graph->options().imperative_proxy_graph); - #endif return static_cast(graph); } diff --git a/src/megbrain_build_config.h.in b/src/megbrain_build_config.h.in index 30daf3e310b3acaf677fd71b5d5c02e1d050483c..6caf1c08f80679527b9ee2ef545029ddec81efe1 100644 --- a/src/megbrain_build_config.h.in +++ b/src/megbrain_build_config.h.in @@ -34,8 +34,6 @@ #cmakedefine01 MGB_ENABLE_FBS_SERIALIZATION #cmakedefine01 MGB_IS_DEV -#cmakedefine01 MGB_ENABLE_IMPERATIVE - // DNN related flags // Platform macro's #cmakedefine01 MEGDNN_WITH_CUDA diff --git a/src/opr/impl/dnn/batch_norm.cpp b/src/opr/impl/dnn/batch_norm.cpp index 6461573bb38f0043c0774d711eb27d9086f08ba1..52a9c774ab79e802b3f611c99a95048db3eea5cc 100644 --- a/src/opr/impl/dnn/batch_norm.cpp +++ b/src/opr/impl/dnn/batch_norm.cpp @@ -140,7 +140,6 @@ void BatchNormForward::scn_do_execute() { auto &&y = output(4)->dev_tensor(); mgb_assert(x.layout().is_contiguous() && y.layout().is_contiguous()); -#if MGB_ENABLE_IMPERATIVE if (input().size() == 5) { // need running mean/variance auto &&o0 = output(0)->dev_tensor(), &&o1 = output(1)->dev_tensor(), @@ -163,7 +162,6 @@ void BatchNormForward::scn_do_execute() { && o1.raw_ptr() == i1.raw_ptr()); } } -#endif auto scale = input(1)->dev_tensor().as_megdnn(); auto bias = input(2)->dev_tensor().as_megdnn(); auto mean = output(0)->dev_tensor().as_megdnn();