From 40bab1ed66147a09babe34b76489a06332a8458b Mon Sep 17 00:00:00 2001 From: Megvii Engine Team Date: Sun, 25 Apr 2021 17:21:54 +0800 Subject: [PATCH] feat(log): opt log, enable mgb sdk log at opt build more info: 16cd674c56 * change MGE_OVERRIDE_LOG_LEVEL to RUNTIME_OVERRIDE_LOG_LEVEL * use ::std::getenv not MGB_GETENV for special ENV GitOrigin-RevId: ee0f9c0f72e627c331c00100f6a21adc927081df --- src/core/impl/common.cpp | 33 ++++--- src/core/impl/comp_node/cpu/comp_node.cpp | 6 +- src/core/impl/comp_node_env.cpp | 15 ++-- src/core/impl/graph/cg_impl.cpp | 2 +- src/core/impl/graph/cg_impl_seq.cpp | 2 +- src/core/include/megbrain/common.h | 38 ++++---- src/gopt/impl/inference.cpp | 89 ++++++++++++------- src/gopt/impl/tensor_reformat.cpp | 50 +++++++---- src/megbrain_build_config.h.in | 2 +- src/opr/impl/basic_arith.cpp | 20 ++--- src/opr/impl/basic_arith.sereg.h | 9 +- src/opr/impl/blas.cpp | 6 +- src/opr/impl/indexing.cpp | 28 ++++-- src/opr/impl/internal/megdnn_opr_wrapper.cpp | 12 +-- src/opr/impl/rand.cpp | 8 +- src/opr/impl/search_policy/algo_chooser.cpp | 44 ++++----- src/opr/impl/search_policy/profiler.cpp | 5 ++ src/opr/impl/tensor_gen.cpp | 8 +- src/opr/include/megbrain/opr/indexing.h | 2 +- .../opr/internal/megdnn_opr_wrapper.h | 4 +- src/opr/include/megbrain/opr/rand.h | 2 +- .../megbrain/opr/search_policy/algo_chooser.h | 6 +- src/opr/include/megbrain/opr/tensor_gen.h | 4 +- src/plugin/impl/var_sanity_check.cpp | 10 +++ src/serialization/impl/extern_c_opr.cpp | 6 +- src/serialization/impl/opr_load_dump.cpp | 3 +- 26 files changed, 241 insertions(+), 173 deletions(-) diff --git a/src/core/impl/common.cpp b/src/core/impl/common.cpp index c79c39107..805f90ff6 100644 --- a/src/core/impl/common.cpp +++ b/src/core/impl/common.cpp @@ -23,6 +23,7 @@ #ifdef __ANDROID__ #include +#include #endif using namespace mgb; @@ -32,11 +33,19 @@ LogLevel config_default_log_level() { auto default_level = LogLevel::ERROR; //! env to config LogLevel //! DEBUG = 0, INFO = 1, WARN = 2, ERROR = 3, NO_LOG = 4 - //! for example , export MGE_OVERRIDE_LOG_LEVEL=0, means set LogLevel to + //! for example , export RUNTIME_OVERRIDE_LOG_LEVEL=0, means set LogLevel to //! DEBUG - if (auto env = MGB_GETENV("MGE_OVERRIDE_LOG_LEVEL")) + if (auto env = ::std::getenv("RUNTIME_OVERRIDE_LOG_LEVEL")) default_level = static_cast(std::stoi(env)); +#ifdef __ANDROID__ + //! special for Android prop, attention: getprop may need permission + char buf[PROP_VALUE_MAX]; + if (__system_property_get("RUNTIME_OVERRIDE_LOG_LEVEL", buf) > 0) { + default_level = static_cast(atoi(buf)); + } +#endif + return default_level; } @@ -155,7 +164,7 @@ void default_log_handler(LogLevel level, default: android_level = ANDROID_LOG_ERROR; } - __android_log_vprint(android_level, "megbrain", fmt, ap); + __android_log_vprint(android_level, "runtime", fmt, ap); #endif #undef HDR_FMT @@ -185,7 +194,7 @@ class MegDNNLogHandler { return; } - std::string new_fmt{"[megdnn] "}; + std::string new_fmt{"[dnn] "}; new_fmt.append(fmt); log_handler(mgb_level, file, func, line, new_fmt.c_str(), ap); } @@ -238,9 +247,17 @@ namespace { #endif // MGB_ENABLE_LOGGING LogLevel mgb::set_log_level(LogLevel level) { - if (auto env = MGB_GETENV("MGE_OVERRIDE_LOG_LEVEL")) + if (auto env = ::std::getenv("RUNTIME_OVERRIDE_LOG_LEVEL")) level = static_cast(std::stoi(env)); +#ifdef __ANDROID__ + //! special for Android prop, attention: getprop may need permission + char buf[PROP_VALUE_MAX]; + if (__system_property_get("RUNTIME_OVERRIDE_LOG_LEVEL", buf) > 0) { + level = static_cast(atoi(buf)); + } +#endif + auto ret = min_log_level; min_log_level = level; return ret; @@ -256,7 +273,6 @@ LogHandler mgb::set_log_handler(LogHandler handler) { return ret; } -#if MGB_ASSERT_LOC void mgb::__assert_fail__( const char *file, int line, const char *func, const char *expr, const char *msg_fmt, ...) { @@ -273,11 +289,6 @@ void mgb::__assert_fail__( } mgb_throw_raw(AssertionError{msg}); } -#else -void mgb::__assert_fail__() { - mgb_throw(AssertionError, "assertion failed"); -} -#endif #if MGB_ENABLE_LOGGING && !MGB_ENABLE_EXCEPTION void mgb::__on_exception_throw__(const std::exception &exc) { diff --git a/src/core/impl/comp_node/cpu/comp_node.cpp b/src/core/impl/comp_node/cpu/comp_node.cpp index d5d3bf4b4..e5d96cce9 100644 --- a/src/core/impl/comp_node/cpu/comp_node.cpp +++ b/src/core/impl/comp_node/cpu/comp_node.cpp @@ -759,7 +759,7 @@ public: #else mgb_throw(MegBrainError, "Atlas comp_node used but " - "MGB_ATLAS not enabled"); + "ATLAS BUILD not enabled"); #endif } else if (dest_impl->env().property().type == DeviceType::CAMBRICON) { @@ -769,7 +769,7 @@ public: #else mgb_throw(MegBrainError, "Cambricon comp_node used but " - "MGB_CAMBRICON not enabled"); + "CAMBRICON BUILD not enabled"); #endif } else { @@ -1035,7 +1035,7 @@ void CpuCompNode::CpuDispatchableBase::EventImpl::do_device_wait_by( return m_comp_node_impl->sync(); #else mgb_throw(MegBrainError, - "Atlas comp_node used but MGB_ATLAS not enabled"); + "Atlas comp_node used but ATLAS BUILD not enabled"); #endif } else if (cn_impl->env().property().type == CompNode::DeviceType::CAMBRICON) { diff --git a/src/core/impl/comp_node_env.cpp b/src/core/impl/comp_node_env.cpp index 41594ed90..1c6546415 100644 --- a/src/core/impl/comp_node_env.cpp +++ b/src/core/impl/comp_node_env.cpp @@ -51,14 +51,13 @@ MegDNNHandle& MegDNNHandle::get(const CompNodeEnv& env) { MegDNNHandle::MegDNNHandle(const CompNodeEnv& env) { auto megdnn_version = megdnn::get_version(); - mgb_throw_if( - megdnn_version.major != MEGDNN_MAJOR || - megdnn_version.minor < MEGDNN_MINOR, - SystemError, - "incompatible megdnn version: compiled with %d.%d, get %d.%d.%d " - "at runtime", - MEGDNN_MAJOR, MEGDNN_MINOR, megdnn_version.major, - megdnn_version.minor, megdnn_version.patch); + mgb_throw_if(megdnn_version.major != MEGDNN_MAJOR || + megdnn_version.minor < MEGDNN_MINOR, + SystemError, + "incompatible dnn version: compiled with %d.%d, get %d.%d.%d " + "at runtime", + MEGDNN_MAJOR, MEGDNN_MINOR, megdnn_version.major, + megdnn_version.minor, megdnn_version.patch); bool init = false; #if MGB_CUDA if (env.property().type == CompNode::DeviceType::CUDA) { diff --git a/src/core/impl/graph/cg_impl.cpp b/src/core/impl/graph/cg_impl.cpp index f49d9362a..25386ee76 100644 --- a/src/core/impl/graph/cg_impl.cpp +++ b/src/core/impl/graph/cg_impl.cpp @@ -880,7 +880,7 @@ std::string ComputingGraphImpl::get_mem_allocation_info() const { return objlist->to_string(); #endif // MGB_ENABLE_JSON - mgb_log_warn("mgb is not configured with MGB_ENABLE_JSON on," + mgb_log_warn("target is not configured with JSON BUILD on," "get_mem_allocation_info returns null string"); return std::string(); } diff --git a/src/core/impl/graph/cg_impl_seq.cpp b/src/core/impl/graph/cg_impl_seq.cpp index 1ccead74b..2a2587e77 100644 --- a/src/core/impl/graph/cg_impl_seq.cpp +++ b/src/core/impl/graph/cg_impl_seq.cpp @@ -619,7 +619,7 @@ void ComputingGraphImpl::MegDNNDtorCheck::enable() { mgb_assert(!m_enabled); m_enabled = true; auto cb_dnn = [](megdnn::OperatorBase* opr) { - mgb_log_error("unexpected destruction of megdnn opr %p", opr); + mgb_log_error("unexpected destruction of dnn opr %p", opr); mgb_trap(); }; auto cb_mem = [](size_t alloc_size, bool, void* ptr) { diff --git a/src/core/include/megbrain/common.h b/src/core/include/megbrain/common.h index 04a59d527..e999515e1 100644 --- a/src/core/include/megbrain/common.h +++ b/src/core/include/megbrain/common.h @@ -108,34 +108,33 @@ void __on_exception_throw__(const std::exception &exc) } while(0) // assert +void __assert_fail__(const char* file, int line, const char* func, + const char* expr, const char* msg_fmt = 0, ...) + __attribute__((format(printf, 5, 6), noreturn)); #if MGB_ASSERT_LOC /*! * \brief extended assert * extra diagnostics message (in printf format) could be printed when assertion * fails; the asserted expression is guaranteed to be evaluated */ -#define mgb_assert(expr, msg...) \ - do { \ - if (mgb_unlikely(!(expr))) \ - ::mgb::__assert_fail__(__FILE__, __LINE__, \ - __PRETTY_FUNCTION__, # expr, ##msg); \ - } while(0) -void __assert_fail__( - const char *file, int line, const char *func, - const char *expr, const char *msg_fmt = 0, ...) - __attribute__((format(printf, 5, 6), noreturn)); +#define mgb_assert(expr, msg...) \ + do { \ + if (mgb_unlikely(!(expr))) \ + ::mgb::__assert_fail__(__FILE__, __LINE__, __PRETTY_FUNCTION__, \ + #expr, ##msg); \ + } while (0) #else -#define mgb_assert(expr, msg...) \ - do { \ - if (mgb_unlikely(!(expr))) \ - ::mgb::__assert_fail__(); \ - } while(0) -void __assert_fail__() __attribute__((noreturn)); -#endif // MGB_ASSERT_LOC +#define mgb_assert(expr, msg...) \ + do { \ + if (mgb_unlikely(!(expr))) \ + ::mgb::__assert_fail__( \ + "about location info, please build with debug", __LINE__, \ + NULL, #expr, ##msg); \ + } while (0) +#endif // MGB_ASSERT_LOC /* ================ logging ================ */ -//! caused by need remve some words at opt release -#if MGB_ENABLE_LOGGING +#if MGB_ASSERT_LOC #define mgb_log_debug(fmt...) \ _mgb_do_log(::mgb::LogLevel::DEBUG, __FILE__, __func__, __LINE__, fmt) #define mgb_log(fmt...) \ @@ -154,7 +153,6 @@ void __assert_fail__() __attribute__((noreturn)); _mgb_do_log(::mgb::LogLevel::WARN, "", "", __LINE__, fmt) #define mgb_log_error(fmt...) \ _mgb_do_log(::mgb::LogLevel::ERROR, LOC, "", __LINE__, fmt) -#undef LOC #endif enum class LogLevel { DEBUG, INFO, WARN, ERROR, NO_LOG }; diff --git a/src/gopt/impl/inference.cpp b/src/gopt/impl/inference.cpp index 1defadb94..f9d14bc0b 100644 --- a/src/gopt/impl/inference.cpp +++ b/src/gopt/impl/inference.cpp @@ -1045,7 +1045,8 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { INTER_WEIGHT_DENSEI_DOT; return megdnn::param::RelayoutFormat::Mode::INTER_WEIGHT_DENSEI; } else { - mgb_assert(conv_mode == megdnn::param::Convolution::Sparse::GROUP); + mgb_throw_if(conv_mode != megdnn::param::Convolution::Sparse::GROUP, + MegBrainError, "mode error"); if (filter->shape()[1] == 1 && filter->shape()[2] == 1) { return megdnn::param::RelayoutFormat::Mode::INTER_WEIGHT_CHANI; } else { @@ -1081,9 +1082,11 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { const VarNodeArray& new_inp) { mgb_assert(opr->input().size() == new_inp.size()); auto& conv_opr = opr->cast_final_safe(); - mgb_assert(conv_opr.param().format == - megdnn::param::Convolution::Format::NCHW, - "ConvertFormat Pass only support converting NCHW to NHWCD4"); + mgb_throw_if( + conv_opr.param().format != + megdnn::param::Convolution::Format::NCHW, + MegBrainError, + "ConvertFormat Pass only support converting NCHW to NHWCD4"); VarNode *conv_src = nullptr, *conv_weights = nullptr; if (new_inp[0]->shape().ndim == 4) { // new input src is NCHW @@ -1094,8 +1097,9 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { icpg = new_inp[1]->shape()[1]; ocpg = new_inp[1]->shape()[0]; } else { - mgb_assert(conv_opr.param().sparse == - megdnn::param::Convolution::Sparse::GROUP); + mgb_throw_if(conv_opr.param().sparse != + megdnn::param::Convolution::Sparse::GROUP, + MegBrainError, "ERROR mode"); group = new_inp[1]->shape()[0]; icpg = new_inp[1]->shape()[2]; ocpg = new_inp[1]->shape()[1]; @@ -1117,8 +1121,9 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { megdnn::param::Convolution::Sparse::DENSE) { ocpg = new_inp[1]->shape()[0]; } else { - mgb_assert(conv_opr.param().sparse == - megdnn::param::Convolution::Sparse::GROUP); + mgb_throw_if(conv_opr.param().sparse != + megdnn::param::Convolution::Sparse::GROUP, + MegBrainError, "ERROR mode"); size_t icpg = new_inp[1]->shape()[2]; ocpg = new_inp[1]->shape()[1]; if (icpg == 1 && ocpg == 1) { @@ -1176,9 +1181,11 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { const VarNodeArray& new_inp) { mgb_assert(opr->input().size() == new_inp.size()); auto& conv_bias_opr = opr->cast_final_safe(); - mgb_assert(conv_bias_opr.param().format == - megdnn::param::ConvBias::Format::NCHW, - "ConvertFormat Pass only support converting NCHW to NHWCD4"); + mgb_throw_if( + conv_bias_opr.param().format != + megdnn::param::ConvBias::Format::NCHW, + MegBrainError, + "ConvertFormat Pass only support converting NCHW to NHWCD4"); VarNode *conv_bias_src = nullptr, *conv_bias_weights = nullptr, *conv_bias_bias = nullptr; if (new_inp[0]->shape().ndim == 4) { @@ -1190,8 +1197,9 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { icpg = new_inp[1]->shape()[1]; ocpg = new_inp[1]->shape()[0]; } else { - mgb_assert(conv_bias_opr.param().sparse == - megdnn::param::ConvBias::Sparse::GROUP); + mgb_throw_if(conv_bias_opr.param().sparse != + megdnn::param::ConvBias::Sparse::GROUP, + MegBrainError, "mode error"); group = new_inp[1]->shape()[0]; icpg = new_inp[1]->shape()[2]; ocpg = new_inp[1]->shape()[1]; @@ -1213,8 +1221,9 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { megdnn::param::ConvBias::Sparse::DENSE) { ocpg = new_inp[1]->shape()[0]; } else { - mgb_assert(conv_bias_opr.param().sparse == - megdnn::param::ConvBias::Sparse::GROUP); + mgb_throw_if(conv_bias_opr.param().sparse != + megdnn::param::ConvBias::Sparse::GROUP, + MegBrainError, "ERROR mode"); size_t icpg = new_inp[1]->shape()[2]; ocpg = new_inp[1]->shape()[1]; if (icpg == 1 && ocpg == 1) { @@ -1293,9 +1302,11 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { const VarNodeArray& new_inp) { mgb_assert(opr->input().size() == new_inp.size()); auto& deconv_opr = opr->cast_final_safe(); - mgb_assert(deconv_opr.param().format == - megdnn::param::Convolution::Format::NCHW, - "ConvertFormat Pass only support converting NCHW to NHWCD4"); + mgb_throw_if( + deconv_opr.param().format != + megdnn::param::Convolution::Format::NCHW, + MegBrainError, + "ConvertFormat Pass only support converting NCHW to NHWCD4"); VarNode *deconv_src = nullptr, *deconv_weights = nullptr; if (new_inp[1]->shape().ndim == 4) { // new input src is NCHW @@ -1306,8 +1317,9 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { icpg = new_inp[0]->shape()[0]; ocpg = new_inp[0]->shape()[1]; } else { - mgb_assert(deconv_opr.param().sparse == - megdnn::param::Convolution::Sparse::GROUP); + mgb_throw_if(deconv_opr.param().sparse != + megdnn::param::Convolution::Sparse::GROUP, + MegBrainError, "mode error"); group = new_inp[0]->shape()[0]; icpg = new_inp[0]->shape()[1]; ocpg = new_inp[0]->shape()[2]; @@ -1329,8 +1341,9 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { megdnn::param::Convolution::Sparse::DENSE) { ocpg = new_inp[0]->shape()[1]; } else { - mgb_assert(deconv_opr.param().sparse == - megdnn::param::Convolution::Sparse::GROUP); + mgb_throw_if(deconv_opr.param().sparse != + megdnn::param::Convolution::Sparse::GROUP, + MegBrainError, "mode error"); ocpg = new_inp[0]->shape()[2]; } @@ -1393,9 +1406,11 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { return opr_shallow_copy; } auto& resize_opr = opr->cast_final_safe(); - mgb_assert(resize_opr.param().format == - megdnn::param::Resize::Format::NCHW, - "ConvertFormat Pass only support converting NCHW to NHWCD4"); + mgb_throw_if( + resize_opr.param().format != + megdnn::param::Resize::Format::NCHW, + MegBrainError, + "ConvertFormat Pass only support converting NCHW to NHWCD4"); VarNode* inp = nullptr; if (new_inp[0]->shape().ndim == 4) { auto param = megdnn::param::RelayoutFormat(); @@ -1425,9 +1440,11 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { return opr_shallow_copy; } auto& warp_opr = opr->cast_final_safe(); - mgb_assert(warp_opr.param().format == - megdnn::param::WarpPerspective::Format::NCHW, - "ConvertFormat Pass only support converting NCHW to NHWCD4"); + mgb_throw_if( + warp_opr.param().format != + megdnn::param::WarpPerspective::Format::NCHW, + MegBrainError, + "ConvertFormat Pass only support converting NCHW to NHWCD4"); VarNode* inp = nullptr; if (new_inp[0]->shape().ndim == 4) { // new input src is NCHW @@ -1466,9 +1483,11 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { return opr_shallow_copy; } auto& warp_opr = opr->cast_final_safe(); - mgb_assert(warp_opr.param().format == - megdnn::param::WarpAffine::Format::NCHW, - "ConvertFormat Pass only support converting NCHW to NHWCD4"); + mgb_throw_if( + warp_opr.param().format != + megdnn::param::WarpAffine::Format::NCHW, + MegBrainError, + "ConvertFormat Pass only support converting NCHW to NHWCD4"); VarNode* inp = nullptr; if (new_inp[0]->shape().ndim == 4) { // new input src is NCHW @@ -1499,9 +1518,11 @@ std::unique_ptr ConvertFormatPass::make_nhwcd4_converter() { return opr_shallow_copy; } auto& pooling_opr = opr->cast_final_safe(); - mgb_assert(pooling_opr.param().format == - megdnn::param::Pooling::Format::NCHW, - "ConvertFormat Pass only support converting NCHW to NHWCD4"); + mgb_throw_if( + pooling_opr.param().format != + megdnn::param::Pooling::Format::NCHW, + MegBrainError, + "ConvertFormat Pass only support converting NCHW to NHWCD4"); VarNode* inp = nullptr; if (new_inp[0]->shape().ndim == 4) { // new input src is NCHW diff --git a/src/gopt/impl/tensor_reformat.cpp b/src/gopt/impl/tensor_reformat.cpp index 893c63ee8..b3fe04e32 100644 --- a/src/gopt/impl/tensor_reformat.cpp +++ b/src/gopt/impl/tensor_reformat.cpp @@ -1465,7 +1465,8 @@ std::unique_ptr EnableNCHW4Pass::make_nchw4_converter() { return {weight_to_nchw4_mode_dense, src_to_nchw4_mode}; } } else { - mgb_assert(conv_mode == megdnn::param::Convolution::Sparse::GROUP); + mgb_throw_if(conv_mode != megdnn::param::Convolution::Sparse::GROUP, + MegBrainError, "mode error"); mgb_assert(filter->shape().ndim == 5, "The origin filter if not NCHW mode"); size_t IC = filter->shape()[2]; @@ -2018,7 +2019,8 @@ void EnableNchwxxPass::fill_opr_convert_fun(size_t pack_c_size) { ret.second = hybrid_nchw_nchwxx; } } else { - mgb_assert(conv_mode == megdnn::param::Convolution::Sparse::GROUP); + mgb_throw_if(conv_mode != megdnn::param::Convolution::Sparse::GROUP, + MegBrainError, "mode error"); size_t group = filter->shape()[0]; size_t ocpg = filter->shape()[1]; size_t icpg = filter->shape()[2]; @@ -2038,9 +2040,11 @@ void EnableNchwxxPass::fill_opr_convert_fun(size_t pack_c_size) { const VarNodeArray& new_inp) { mgb_assert(opr->input().size() == new_inp.size()); auto& conv_opr = opr->cast_final_safe(); - mgb_assert(conv_opr.param().format == - megdnn::param::Convolution::Format::NCHW, - "ConvertFormat Pass only support converting NCHW to NCHWXX"); + mgb_throw_if( + conv_opr.param().format != + megdnn::param::Convolution::Format::NCHW, + MegBrainError, + "ConvertFormat Pass only support converting NCHW to NCHWXX"); bool valid_nchw_nchw44 = nchw_nchwxx_valid(conv_opr, new_inp, pack_c_size); auto is_trans = test_trans_nchwxx( @@ -2118,9 +2122,11 @@ void EnableNchwxxPass::fill_opr_convert_fun(size_t pack_c_size) { mgb_assert(opr->input().size() <= 3, "nchwxx does not support conv_bias fuse Z right now"); auto& conv_bias_opr = opr->cast_final_safe(); - mgb_assert(conv_bias_opr.param().format == - megdnn::param::ConvBias::Format::NCHW, - "ConvertFormat Pass only support converting NCHW to NCHWXX"); + mgb_throw_if( + conv_bias_opr.param().format != + megdnn::param::ConvBias::Format::NCHW, + MegBrainError, + "ConvertFormat Pass only support converting NCHW to NCHWXX"); bool valid_nchw_nchw44 = nchw_nchwxx_valid(conv_bias_opr, new_inp, pack_c_size, conv_bias_opr.param().nonlineMode); @@ -2244,9 +2250,11 @@ void EnableNchwxxPass::fill_opr_convert_fun(size_t pack_c_size) { const VarNodeArray& new_inp) { mgb_assert(opr->input().size() == new_inp.size()); auto& pooling_opr = opr->cast_final_safe(); - mgb_assert(pooling_opr.param().format == - megdnn::param::Pooling::Format::NCHW, - "ConvertFormat Pass only support converting NCHW to NCHWxx"); + mgb_throw_if( + pooling_opr.param().format != + megdnn::param::Pooling::Format::NCHW, + MegBrainError, + "ConvertFormat Pass only support converting NCHW to NCHWxx"); VarNode* inp = new_inp[0]; //! if input is nchwxx if (inp->shape().ndim == 5) { @@ -2433,7 +2441,8 @@ EnableNchw44DotPass::make_nchw44_dot_converter() { } } } else { - mgb_assert(conv_mode == megdnn::param::Convolution::Sparse::GROUP); + mgb_throw_if(conv_mode != megdnn::param::Convolution::Sparse::GROUP, + MegBrainError, "mode error"); size_t group = filter->shape()[0]; size_t ocpg = filter->shape()[1]; size_t icpg = filter->shape()[2]; @@ -2462,10 +2471,11 @@ EnableNchw44DotPass::make_nchw44_dot_converter() { const VarNodeArray& new_inp) { mgb_assert(opr->input().size() == new_inp.size()); auto& conv_opr = opr->cast_final_safe(); - mgb_assert(conv_opr.param().format == - megdnn::param::Convolution::Format::NCHW, - "ConvertFormat Pass only support converting NCHW to " - "NCHW44_DOT"); + mgb_throw_if(conv_opr.param().format != + megdnn::param::Convolution::Format::NCHW, + MegBrainError, + "ConvertFormat Pass only support converting NCHW to " + "NCHW44_DOT"); bool valid_nchw_nchw44 = nchw_nchwxx_valid( conv_opr, new_inp, pack_c_size, megdnn::param::ConvBias::NonlineMode::IDENTITY, true); @@ -2543,9 +2553,11 @@ EnableNchw44DotPass::make_nchw44_dot_converter() { mgb_assert(opr->input().size() <= 3, "nchwxx-dot does not support conv_bias fuse Z right now"); auto& conv_bias_opr = opr->cast_final_safe(); - mgb_assert(conv_bias_opr.param().format == - megdnn::param::ConvBias::Format::NCHW, - "ConvertFormat Pass only support converting NCHW to NCHWXX"); + mgb_throw_if( + conv_bias_opr.param().format != + megdnn::param::ConvBias::Format::NCHW, + MegBrainError, + "ConvertFormat Pass only support converting NCHW to NCHWXX"); bool valid_nchw_nchw44 = nchw_nchwxx_valid(conv_bias_opr, new_inp, pack_c_size, conv_bias_opr.param().nonlineMode, true); diff --git a/src/megbrain_build_config.h.in b/src/megbrain_build_config.h.in index 1deb8256f..4407817b1 100644 --- a/src/megbrain_build_config.h.in +++ b/src/megbrain_build_config.h.in @@ -127,7 +127,7 @@ // whether to enbale configuing megbrain internals through env vars #ifndef MGB_ENABLE_GETENV -#define MGB_ENABLE_GETENV 1 +#define MGB_ENABLE_GETENV MGB_ASSERT_LOC #endif // whether to remove unnecessary features when used for serving diff --git a/src/opr/impl/basic_arith.cpp b/src/opr/impl/basic_arith.cpp index bf6f2ab7d..aa6b0c584 100644 --- a/src/opr/impl/basic_arith.cpp +++ b/src/opr/impl/basic_arith.cpp @@ -343,24 +343,24 @@ void Elemwise::mem_plan_fwd_in2out_writable() { } void Elemwise::scn_do_execute() { - auto &&inp = input(); - megdnn::TensorNDArray megdnn_inp; - mgb_assert(megdnn_inp.capacity() >= inp.size(), - "heap allocation in elemwise exec"); - megdnn_inp.resize(inp.size()); - for (size_t i = 0; i < inp.size(); ++ i) { + auto&& inp = input(); + megdnn::TensorNDArray dnn_inp; + mgb_assert(dnn_inp.capacity() >= inp.size(), + "heap allocation in elemwise exec"); + dnn_inp.resize(inp.size()); + for (size_t i = 0; i < inp.size(); ++i) { if (inp[i]->dev_tensor().empty()) { mgb_assert(output(0)->dev_tensor().empty()); return; } - megdnn_inp[i] = (inp[i]->dev_tensor().as_megdnn()); + dnn_inp[i] = (inp[i]->dev_tensor().as_megdnn()); } mgb_assert(!output(0)->dev_tensor().empty()); megdnn_opr()->param() = param(); - call_megdnn_opr_exec( - comp_node(), megdnn_inp, output(0)->dev_tensor().as_megdnn(), - megdnn_opr(), this); + call_megdnn_opr_exec(comp_node(), dnn_inp, + output(0)->dev_tensor().as_megdnn(), megdnn_opr(), + this); } void Elemwise::init_output_static_infer_desc() { diff --git a/src/opr/impl/basic_arith.sereg.h b/src/opr/impl/basic_arith.sereg.h index 84c086aa5..d8180c3ae 100644 --- a/src/opr/impl/basic_arith.sereg.h +++ b/src/opr/impl/basic_arith.sereg.h @@ -126,10 +126,11 @@ namespace serialization { MGB_MARK_USED_VAR(graph); SymbolVar target_shape; if (inputs.size() == 1) { - mgb_assert(param.axis >= - -megdnn::param::OptionalAxisV1::MAX_NDIM && - param.axis < - megdnn::param::OptionalAxisV1::MAX_NDIM); + mgb_throw_if( + param.axis < -megdnn::param::OptionalAxisV1::MAX_NDIM || + param.axis >= + megdnn::param::OptionalAxisV1::MAX_NDIM, + MegBrainError, "DIM error"); } else { mgb_assert(inputs.size() == 2); target_shape = inputs[1]; diff --git a/src/opr/impl/blas.cpp b/src/opr/impl/blas.cpp index 32ddf4eb4..336f330fb 100644 --- a/src/opr/impl/blas.cpp +++ b/src/opr/impl/blas.cpp @@ -470,9 +470,9 @@ MGB_DYN_TYPE_OBJ_FINAL_IMPL(SVD); SVD::SVD(VarNode* src, const Param& param, const OperatorNodeConfig& config) : Super(OperatorNodeBaseCtorParam{src->owner_graph(), config, "svd", {src}}) { - mgb_assert(src->dtype() == megdnn::dtype::Float32(), - "Singular Value Decomposition on non-float32 tensors is " - "not supoorted."); + mgb_throw_if(src->dtype() != megdnn::dtype::Float32(), MegDNNError, + "Singular Value Decomposition on non-float32 tensors is not " + "supoorted."); init_megdnn_opr(*this, param); add_input({src}); diff --git a/src/opr/impl/indexing.cpp b/src/opr/impl/indexing.cpp index b6e4e41f1..11426900d 100644 --- a/src/opr/impl/indexing.cpp +++ b/src/opr/impl/indexing.cpp @@ -187,12 +187,12 @@ template Opr& mixin::IndexingMultiAxisVecMegDNNOprHolder::megdnn_opr( cg::SingleCNOperatorNodeBase& self) { auto comp_node = self.comp_node(); - if (!m_megdnn_opr || m_megdnn_opr.comp_node() != comp_node) { - m_megdnn_opr = intl::create_megdnn_opr(comp_node); - m_megdnn_opr->set_error_tracker( + if (!m_dnn_opr || m_dnn_opr.comp_node() != comp_node) { + m_dnn_opr = intl::create_megdnn_opr(comp_node); + m_dnn_opr->set_error_tracker( static_cast(&self)); } - return *m_megdnn_opr; + return *m_dnn_opr; } template @@ -228,7 +228,7 @@ template void mixin::IndexingMultiAxisVecMegDNNOprHolder::record_megdnn_opr( mgb::cg::GraphExecutable::ExecDependencyArray& deps) { deps.emplace_back( - std::make_unique(std::move(m_megdnn_opr))); + std::make_unique(std::move(m_dnn_opr))); } /* ==================== MultiAxisVecFancyIndexingHelper ==================== */ @@ -258,14 +258,24 @@ intl::MultiAxisVecFancyIndexingHelper::make_megdnn_index_desc( } } if (all_scalar) { - mgb_log_warn("%s{%s}: no vector indexer; consider using Subtensor " +#if MGB_ENABLE_GETENV + mgb_log_warn( + "%s{%s}: no vector indexer; consider using Subtensor " "family for better performance; you can set " "MGB_THROW_ON_SCALAR_IDX to throw an exception to help " "tracking the related operator", cname(), dyn_typeinfo()->name); - mgb_throw_if(MGB_GETENV("MGB_THROW_ON_SCALAR_IDX"), - MegBrainError, "vector-indexing operator used with all " - "scalar indices"); +#else + mgb_log_warn( + "%s{%s}: no vector indexer; consider using Subtensor " + "family for better performance", + cname(), dyn_typeinfo()->name); +#endif +#if MGB_ENABLE_GETENV + mgb_throw_if(MGB_GETENV("MGB_THROW_ON_SCALAR_IDX"), MegBrainError, + "vector-indexing operator used with all " + "scalar indices"); +#endif } // always set m_scalar_idx_warn_printed to be true, so we do not print diff --git a/src/opr/impl/internal/megdnn_opr_wrapper.cpp b/src/opr/impl/internal/megdnn_opr_wrapper.cpp index 8c6acb88c..a920dcbe7 100644 --- a/src/opr/impl/internal/megdnn_opr_wrapper.cpp +++ b/src/opr/impl/internal/megdnn_opr_wrapper.cpp @@ -377,21 +377,21 @@ MegDNNOprHolder::~MegDNNOprHolder() noexcept = default; void MegDNNOprHolder::mixin_init_output_comp_node(OperatorNodeBase &self) { SingleCNOperatorNode::mixin_init_output_comp_node(self); create_megdnn_opr(); - mgb_assert(m_megdnn_opr); - m_megdnn_opr->set_error_tracker(&self); + mgb_assert(m_dnn_opr); + m_dnn_opr->set_error_tracker(&self); } void MegDNNOprHolder::mixin_on_output_comp_node_stream_changed( OperatorNodeBase &self) { SingleCNOperatorNode::mixin_on_output_comp_node_stream_changed(self); create_megdnn_opr(); - mgb_assert(m_megdnn_opr); - m_megdnn_opr->set_error_tracker(&self); + mgb_assert(m_dnn_opr); + m_dnn_opr->set_error_tracker(&self); } void MegDNNOprHolder::set_megdnn_opr( std::unique_ptr self) { - m_megdnn_opr = std::move(self); + m_dnn_opr = std::move(self); } void MegDNNOprHolder::record_megdnn_opr( @@ -402,7 +402,7 @@ void MegDNNOprHolder::record_megdnn_opr( void MegDNNOprHolder::record_megdnn_opr( cg::GraphExecutable::ExecDependencyArray& deps) { - record_megdnn_opr(std::move(m_megdnn_opr), deps); + record_megdnn_opr(std::move(m_dnn_opr), deps); } /* ================== MegDNNOprHolderBwdStaticInfer ================== */ diff --git a/src/opr/impl/rand.cpp b/src/opr/impl/rand.cpp index 7bc0c7248..1045a235e 100644 --- a/src/opr/impl/rand.cpp +++ b/src/opr/impl/rand.cpp @@ -59,10 +59,10 @@ cg::OperatorNodeBase::NodeProp* RNGOprBase::do_make_node_prop() const { } void RNGOprBase::ensure_megdnn_opr() { - if (!m_megdnn_opr || m_megdnn_opr.comp_node() != comp_node()) { + if (!m_dnn_opr || m_dnn_opr.comp_node() != comp_node()) { // activate comp_node for curandCreateGenerator in create_megdnn_opr comp_node().activate(); - m_megdnn_opr = create_megdnn_opr(); + m_dnn_opr = create_megdnn_opr(); } } @@ -76,7 +76,7 @@ void RNGOprBase::init_output_static_infer_desc() { auto infer_wk = [this](TensorShape &dest, const InpVal &inp) { ensure_megdnn_opr(); dest.ndim = 1; - dest.shape[0] = m_megdnn_opr->get_workspace_in_bytes( + dest.shape[0] = m_dnn_opr->get_workspace_in_bytes( {inp.val.at(0).shape(), output(0)->dtype()}); return true; }; @@ -87,7 +87,7 @@ void RNGOprBase::init_output_static_infer_desc() { } void RNGOprBase::scn_do_execute() { - m_megdnn_opr->exec( + m_dnn_opr->exec( output(0)->dev_tensor().as_megdnn(), get_megdnn_workspace_from_var(output(1))); } diff --git a/src/opr/impl/search_policy/algo_chooser.cpp b/src/opr/impl/search_policy/algo_chooser.cpp index 9df0320f2..4ac6a7f7b 100644 --- a/src/opr/impl/search_policy/algo_chooser.cpp +++ b/src/opr/impl/search_policy/algo_chooser.cpp @@ -332,7 +332,7 @@ AlgoChooser::AlgoChooserHelper::AlgoChooserHelper( const megdnn::param::ExecutionPolicy& execution_policy, bool allow_weight_preprocess) : m_layouts{layouts}, - m_megdnn_opr{megdnn_opr}, + m_dnn_opr{megdnn_opr}, m_param{param_str}, m_base_mgb_opr{mgb_opr}, m_cn{cn}, @@ -356,15 +356,15 @@ AlgoChooser::AlgoChooserHelper::choose_by_heuristic( owner_graph(), m_cn, m_execution_policy.workspace_limit); auto attr = extract_algo_attribute(selected_strategy); policy.algo = - APPLY(m_megdnn_opr->get_algorithm_info_heuristic( + APPLY(m_dnn_opr->get_algorithm_info_heuristic( args..., workspace_limit, attr.first, attr.second), m_layouts) .desc; - Algorithm* algo = m_megdnn_opr->get_algorithm_from_desc(policy.algo); + Algorithm* algo = m_dnn_opr->get_algorithm_from_desc(policy.algo); mgb_assert(algo, "Unknown algo description"); std::vector&& sub_items = algo->get_subopr_list( - to_layout_array(m_layouts), m_megdnn_opr); + to_layout_array(m_layouts), m_dnn_opr); FOREACH_OPR_TYPE_DISPATCH(sub_items, { auto&& megdnn_opr = intl::create_megdnn_opr<_Opr>(m_cn); @@ -389,7 +389,7 @@ AlgoChooser::AlgoChooserHelper::choose_by_profile( const ExecutionStrategy& selected_strategy, bool enable_update) const { MIDOUT_B(Opr, midout_iv(MGB_HASH_STR("choose_by_profile"))) if (owner_graph()->options().no_profiling_on_shape_change) { - auto policy = m_megdnn_opr->execution_policy(); + auto policy = m_dnn_opr->execution_policy(); if (policy.algo.valid()) { return policy; } @@ -439,9 +439,9 @@ typename AlgoChooser::ImplAlgoDesc AlgoChooser::AlgoChooserHelper::get_profile_result_from_cache( const ExecutionStrategy& selected_strategy) const { MIDOUT_B(Opr, midout_iv(MGB_HASH_STR("get_profile_result_from_cache"))) - AlgoChooserProfileCache cache(m_cn, profile_name(m_megdnn_opr).c_str()); + AlgoChooserProfileCache cache(m_cn, profile_name(m_dnn_opr).c_str()); - typename Opr::Param origin_param = m_megdnn_opr->param(); + typename Opr::Param origin_param = m_dnn_opr->param(); AlgoChooserProfileCache::Key cache_key{m_layouts.data(), m_layouts.size(), &origin_param, sizeof(origin_param)}; auto&& rst = cache.get(cache_key); @@ -504,7 +504,7 @@ void AlgoChooser::AlgoChooserHelper::construct_execution_policy( std::string layouts_str = format_fixlayouts( m_layouts, arity_in, arity_out); std::string msg = ssprintf( - "(mbg_opr : %s, layouts %s, with attribute(%s) and " + "(opr : %s, layouts %s, with attribute(%s) and " "without attribute(%s)", m_base_mgb_opr->dyn_typeinfo()->name, layouts_str.c_str(), @@ -526,7 +526,7 @@ void AlgoChooser::AlgoChooserHelper::construct_execution_policy( owner_graph(), m_cn, m_execution_policy.workspace_limit); auto attr = extract_algo_attribute(selected_strategy); - policy.algo = APPLY(m_megdnn_opr->get_algorithm_info_heuristic( + policy.algo = APPLY(m_dnn_opr->get_algorithm_info_heuristic( args..., workspace_limit, attr.first, attr.second), m_layouts) @@ -539,10 +539,10 @@ void AlgoChooser::AlgoChooserHelper::construct_execution_policy( } } - Algorithm* algo = m_megdnn_opr->get_algorithm_from_desc(policy.algo); + Algorithm* algo = m_dnn_opr->get_algorithm_from_desc(policy.algo); mgb_assert(algo, "Unknown algo description"); std::vector&& sub_items = algo->get_subopr_list( - to_layout_array(m_layouts), m_megdnn_opr); + to_layout_array(m_layouts), m_dnn_opr); FOREACH_OPR_TYPE_DISPATCH(sub_items, { auto&& megdnn_opr = intl::create_megdnn_opr<_Opr>(m_cn); @@ -571,11 +571,11 @@ template size_t AlgoChooser::AlgoChooserHelper::get_workspace_size_bytes( const ImplExecutionPolicy& policy) const { MIDOUT_B(Opr, midout_iv(MGB_HASH_STR("get_workspace_size_bytes"))) - m_megdnn_opr->execution_policy() = policy; + m_dnn_opr->execution_policy() = policy; size_t result; if_constexpr()>( [&](auto _) { - auto&& opr = _(m_megdnn_opr); + auto&& opr = _(m_dnn_opr); auto prep = this->construct_fake_preprocess_filter(); PreprocessFilter* prep_ptr = prep.valid() ? &prep.val() : nullptr; @@ -587,7 +587,7 @@ size_t AlgoChooser::AlgoChooserHelper::get_workspace_size_bytes( }, /* else */ [&](auto _) { - result = APPLY(_(m_megdnn_opr)->get_workspace_in_bytes(args...), + result = APPLY(_(m_dnn_opr)->get_workspace_in_bytes(args...), m_layouts); }); return result; @@ -600,7 +600,7 @@ AlgoChooser::AlgoChooserHelper::get_all_candidates() const { MIDOUT_B(Opr, midout_iv(MGB_HASH_STR("get_all_candidates"))) auto heu = choose_by_heuristic(m_execution_policy.strategy); auto&& ret = - APPLY(m_megdnn_opr->get_all_algorithms_info(args...), m_layouts); + APPLY(m_dnn_opr->get_all_algorithms_info(args...), m_layouts); bool found = false; for (size_t i = 0; i < ret.size(); ++i) { if (ret[i].desc == heu.algo) { @@ -610,7 +610,7 @@ AlgoChooser::AlgoChooserHelper::get_all_candidates() const { } } - Algorithm* palgo = m_megdnn_opr->get_algorithm_from_desc(heu.algo); + Algorithm* palgo = m_dnn_opr->get_algorithm_from_desc(heu.algo); mgb_assert(palgo, "Unknown algo description"); mgb_assert(found, "algo %s got by heuristic not found in " @@ -644,10 +644,10 @@ AlgoChooser::AlgoChooserHelper::profile_single_algo( mgb_assert(param.shapes.size() == m_layouts.size()); for (size_t i = 0; i < param.shapes.size(); ++i) param.shapes[i] = m_layouts[i]; - param.opr_param = m_megdnn_opr->param(); + param.opr_param = m_dnn_opr->param(); param.allow_weight_preprocess = m_allow_weight_preprocess; - Algorithm* palgo = m_megdnn_opr->get_algorithm_from_desc(policy.algo); + Algorithm* palgo = m_dnn_opr->get_algorithm_from_desc(policy.algo); mgb_assert(palgo, "can not find algo when profile single algo"); auto rst = TimedProfiler::profile(param, timeout); @@ -691,7 +691,7 @@ void AlgoChooser::AlgoChooserHelper::profile( policy.algo = algo.desc; //! check negative attribute : skip negative attribute - auto palgo = m_megdnn_opr->get_algorithm_from_desc(policy.algo); + auto palgo = m_dnn_opr->get_algorithm_from_desc(policy.algo); if (palgo->contain_attribute_any(target_attr.second)) { mgb_log_debug( "skip algo %s, which matches the profile strategy required " @@ -748,12 +748,12 @@ void AlgoChooser::AlgoChooserHelper::profile( mgb_assert(!prof_rst.empty(), "%s", msg.c_str()); FixedTensorLayouts origin_layouts = m_layouts; - typename Opr::Param origin_param = m_megdnn_opr->param(); + typename Opr::Param origin_param = m_dnn_opr->param(); AlgoChooserProfileCache::Key cache_key{origin_layouts.data(), origin_layouts.size(), &origin_param, sizeof(origin_param)}; - AlgoChooserProfileCache cache(m_cn, profile_name(m_megdnn_opr).c_str()); + AlgoChooserProfileCache cache(m_cn, profile_name(m_dnn_opr).c_str()); cache.put(cache_key, prof_rst); MIDOUT_E } @@ -766,7 +766,7 @@ AlgoChooser::AlgoChooserHelper::construct_fake_preprocess_filter() const { if_constexpr()>([&](auto _) { if (!m_allow_weight_preprocess) return; - auto opr = _(m_megdnn_opr); + auto opr = _(m_dnn_opr); auto layouts = APPLY(opr->deduce_preprocessed_filter_layout(args...), m_layouts); //! No preprocess layout means no need weight preprocess diff --git a/src/opr/impl/search_policy/profiler.cpp b/src/opr/impl/search_policy/profiler.cpp index 01d7dbd40..b462f7b19 100644 --- a/src/opr/impl/search_policy/profiler.cpp +++ b/src/opr/impl/search_policy/profiler.cpp @@ -312,10 +312,15 @@ typename TimedProfiler::TResult TimedProfiler::prof_impl( double next_report_time = 0.5; while (!ev_end->finished()) { if (timer.get_secs() >= next_report_time) { +#if MGB_ENABLE_GETENV mgb_log_warn( "profiling conv algo %s already took %.3f/%.3f secs" " (limit can be set by MGB_CONV_PROFILING_TIMEOUT) ", algo->name(), timer.get_secs(), param.actual_timeout); +#else + mgb_log_warn("profiling conv algo %s already took %.3f/%.3f secs", + algo->name(), timer.get_secs(), param.actual_timeout); +#endif next_report_time = timer.get_secs() + 1; } using namespace std::literals; diff --git a/src/opr/impl/tensor_gen.cpp b/src/opr/impl/tensor_gen.cpp index 53a409a06..c70a1f46e 100644 --- a/src/opr/impl/tensor_gen.cpp +++ b/src/opr/impl/tensor_gen.cpp @@ -111,7 +111,7 @@ void Linspace::scn_do_execute() { stop.dtype(), stop.raw_ptr()).get_cast(); auto cn = comp_node(); - auto &&opr = m_megdnn_opr; + auto &&opr = m_dnn_opr; if (!opr || opr.comp_node() != cn) opr = intl::create_megdnn_opr(cn); opr->param() = {startv, stopv, m_param.endpoint}; @@ -122,7 +122,7 @@ void Linspace::scn_do_execute() { void Linspace::record_execute_deps(ExecDependencyArray& deps) { deps.emplace_back( - std::make_unique(std::move(m_megdnn_opr))); + std::make_unique(std::move(m_dnn_opr))); } #if MGB_ENABLE_GRAD @@ -184,7 +184,7 @@ cg::OperatorNodeBase::NodeProp* Eye::do_make_node_prop() const { void Eye::scn_do_execute() { auto cn = comp_node(); - auto &&opr = m_megdnn_opr; + auto &&opr = m_dnn_opr; if (!opr || opr.comp_node() != cn) { opr = intl::create_megdnn_opr(cn); opr->param() = m_param; @@ -196,7 +196,7 @@ void Eye::scn_do_execute() { void Eye::record_execute_deps(ExecDependencyArray& deps) { deps.emplace_back( - std::make_unique(std::move(m_megdnn_opr))); + std::make_unique(std::move(m_dnn_opr))); } #if MGB_ENABLE_GRAD diff --git a/src/opr/include/megbrain/opr/indexing.h b/src/opr/include/megbrain/opr/indexing.h index 60c655e31..a7ebbe24b 100644 --- a/src/opr/include/megbrain/opr/indexing.h +++ b/src/opr/include/megbrain/opr/indexing.h @@ -88,7 +88,7 @@ namespace mixin { template class IndexingMultiAxisVecMegDNNOprHolder { - intl::UniqPtrWithCN m_megdnn_opr; + intl::UniqPtrWithCN m_dnn_opr; protected: Opr& megdnn_opr(cg::SingleCNOperatorNodeBase& self); diff --git a/src/opr/include/megbrain/opr/internal/megdnn_opr_wrapper.h b/src/opr/include/megbrain/opr/internal/megdnn_opr_wrapper.h index 916488ddc..71ea7b6f7 100644 --- a/src/opr/include/megbrain/opr/internal/megdnn_opr_wrapper.h +++ b/src/opr/include/megbrain/opr/internal/megdnn_opr_wrapper.h @@ -136,7 +136,7 @@ namespace mixin { virtual void create_megdnn_opr() = 0; megdnn::OperatorBase* megdnn_opr() const { - return m_megdnn_opr.get(); + return m_dnn_opr.get(); } void set_megdnn_opr(std::unique_ptr opr); @@ -146,7 +146,7 @@ namespace mixin { cg::GraphExecutable::ExecDependencyArray& deps); private: - std::unique_ptr m_megdnn_opr; + std::unique_ptr m_dnn_opr; }; class MegDNNOprHolderBwdStaticInfer: public MegDNNOprHolder { diff --git a/src/opr/include/megbrain/opr/rand.h b/src/opr/include/megbrain/opr/rand.h index b1508a583..bbf6f05e2 100644 --- a/src/opr/include/megbrain/opr/rand.h +++ b/src/opr/include/megbrain/opr/rand.h @@ -23,7 +23,7 @@ namespace opr { namespace intl { MGB_DEFINE_CLS_WITH_SUPER(RNGOprBase, cg::SingleCNOperatorNodeBase) // { - UniqPtrWithCN m_megdnn_opr; + UniqPtrWithCN m_dnn_opr; void ensure_megdnn_opr(); void init_output_static_infer_desc() override; diff --git a/src/opr/include/megbrain/opr/search_policy/algo_chooser.h b/src/opr/include/megbrain/opr/search_policy/algo_chooser.h index 60a7e6ff1..06dcad3cd 100644 --- a/src/opr/include/megbrain/opr/search_policy/algo_chooser.h +++ b/src/opr/include/megbrain/opr/search_policy/algo_chooser.h @@ -69,7 +69,7 @@ public: using FixedTensorLayouts = std::array; class AlgoChooserHelper { FixedTensorLayouts m_layouts; - Opr* m_megdnn_opr; + Opr* m_dnn_opr; std::string m_param; const cg::OperatorNodeBase* m_base_mgb_opr; CompNode m_cn; @@ -84,7 +84,7 @@ public: const megdnn::param::ExecutionPolicy& execution_policy, bool allow_weight_preprocess); - Opr* megdnn_opr() const { return m_megdnn_opr; } + Opr* megdnn_opr() const { return m_dnn_opr; } const cg::OperatorNodeBase* mgb_opr() const { return m_base_mgb_opr; } @@ -106,7 +106,7 @@ public: megdnn::Algorithm* get_algorithm_from_desc( const megdnn::Algorithm::Info::Desc& desc) const { - return m_megdnn_opr->get_algorithm_from_desc(desc); + return m_dnn_opr->get_algorithm_from_desc(desc); } const FixedTensorLayouts& layouts() const { return m_layouts; } diff --git a/src/opr/include/megbrain/opr/tensor_gen.h b/src/opr/include/megbrain/opr/tensor_gen.h index 75f0d7b35..28d6f8757 100644 --- a/src/opr/include/megbrain/opr/tensor_gen.h +++ b/src/opr/include/megbrain/opr/tensor_gen.h @@ -72,7 +72,7 @@ MGB_DEFINE_OPR_CLASS(Linspace, cg::SingleCNOperatorNodeBase) // { private: const Param m_param; - intl::UniqPtrWithCN m_megdnn_opr; + intl::UniqPtrWithCN m_dnn_opr; void scn_do_execute() override; void init_output_static_infer_desc() override; @@ -97,7 +97,7 @@ MGB_DEFINE_OPR_CLASS(Eye, cg::SingleCNOperatorNodeBase) // { private: const Param m_param; - intl::UniqPtrWithCN m_megdnn_opr; + intl::UniqPtrWithCN m_dnn_opr; void scn_do_execute() override; void init_output_static_infer_desc() override; diff --git a/src/plugin/impl/var_sanity_check.cpp b/src/plugin/impl/var_sanity_check.cpp index 87662c6ea..9bb4b00ba 100644 --- a/src/plugin/impl/var_sanity_check.cpp +++ b/src/plugin/impl/var_sanity_check.cpp @@ -279,6 +279,7 @@ void VarSanityCheck::check_single_input(bool add_debug_log, } if (checksum != checksum_expect) { +#if MGB_ENABLE_GETENV mgb_throw(Error, "var sanity check failed: var: %s" " (checksum: expect=%s got=%s); receiver: %s{%s}(%zu);" @@ -288,6 +289,15 @@ void VarSanityCheck::check_single_input(bool add_debug_log, str(checksum_expect).c_str(), str(checksum).c_str(), recv_opr->cname(), recv_opr->dyn_typeinfo()->name, recv_opr->id(), var->id(), !add_debug_log); +#else + mgb_throw(Error, + "var sanity check failed: var: %s" + " (checksum: expect=%s got=%s); receiver: %s{%s}(%zu);", + cg::dump_var_info({var}).c_str(), + str(checksum_expect).c_str(), str(checksum).c_str(), + recv_opr->cname(), recv_opr->dyn_typeinfo()->name, + recv_opr->id()); +#endif } } diff --git a/src/serialization/impl/extern_c_opr.cpp b/src/serialization/impl/extern_c_opr.cpp index cdc36e034..ec34ffaf8 100644 --- a/src/serialization/impl/extern_c_opr.cpp +++ b/src/serialization/impl/extern_c_opr.cpp @@ -292,7 +292,7 @@ ExternCOprRunner::ExternCOprRunner(std::string& name, auto size_diff = sizeof(MGBOprDesc) - m_desc->size; is_loader_support_dynamic_param = (0 == size_diff) ? true : false; mgb_assert(0 == size_diff || sizeof(ExternCOprParam*) == size_diff, - "invalid MGBOprDesc size: expect=%zu got=%u, may caused by " + "invalid OprDesc size: expect=%zu got=%u, may caused by " "extern_c_opr.h mismatch, please confirm that the " "extern_c_opr.h used when compiling the loader is consistent " "with the runtime caller build used", @@ -531,8 +531,8 @@ cg::OperatorNodeBase* ExternCOprRunner::shallow_copy( } MGBTensorShape ExternCOprRunner::tensor_shape_to_c(const TensorShape& shape) { - mgb_assert(shape.ndim <= MGB_TENSOR_MAX_NDIM, "shape ndim too large: %zu", - shape.ndim); + mgb_throw_if(shape.ndim > MGB_TENSOR_MAX_NDIM, MegBrainError, + "shape ndim too large: %zu", shape.ndim); MGBTensorShape ret; ret.ndim = shape.ndim; for (size_t i = 0; i < shape.ndim; ++i) { diff --git a/src/serialization/impl/opr_load_dump.cpp b/src/serialization/impl/opr_load_dump.cpp index 19aacb90d..7331ca519 100644 --- a/src/serialization/impl/opr_load_dump.cpp +++ b/src/serialization/impl/opr_load_dump.cpp @@ -41,7 +41,8 @@ DType OprLoadContextRawPOD::read_param() { if (m_check_param_tag) { uint32_t tag; read_raw(&tag, sizeof(tag)); - mgb_assert(tag == megdnn::param::FakeSerializedDType::TAG); + mgb_throw_if(tag != megdnn::param::FakeSerializedDType::TAG, + MegBrainError, "ERROR tag"); } return serialization::deserialize_dtype( [this](void* data, size_t len) { read_raw(data, len); }); -- GitLab