From fae406aebd9d3646af77ad7139103c7db1b7d8bb Mon Sep 17 00:00:00 2001 From: Wilber Date: Tue, 29 Dec 2020 15:12:38 +0800 Subject: [PATCH] [Inference] FLAGS_call_statck is turned on default when ON_INFER=ON (#29800) * [Inference] FLAGS_call_statck is turned on default when ON_INFER=ON * cherry-pick 29828 --- .../analysis/passes/memory_optimize_pass.cc | 2 ++ .../inference/api/demo_ci/CMakeLists.txt | 36 ++++++++++++++----- paddle/fluid/platform/flags.cc | 8 ++++- paddle/fluid/platform/port.h | 1 + 4 files changed, 37 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc b/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc index defa0a525f6..5132b3b5e72 100644 --- a/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc +++ b/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc @@ -96,6 +96,7 @@ void MemoryOptimizePass::CollectVarMemorySize( const int fake_batch_size = 1; auto valid_var = [&](framework::ir::Node* node) -> bool { + // lod operator reuse may cause unknown errors. std::set invalid_op = {"while", "conditional_block", "tensorrt_engine", @@ -103,6 +104,7 @@ void MemoryOptimizePass::CollectVarMemorySize( "merge_lod_tensor_infer", "merge_lod_tensor", "equal", + "sequence_pool", "lod_reset"}; for (auto* tmp : node->inputs) { CHECK(tmp->IsOp()); diff --git a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt index b7e8f40e408..a09f5776c71 100644 --- a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt +++ b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt @@ -83,14 +83,24 @@ if (USE_TENSORRT AND WITH_GPU) endif() set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include) set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib) -endif() - -if (NOT WIN32) - if (USE_TENSORRT AND WITH_GPU) - include_directories("${TENSORRT_INCLUDE_DIR}") - link_directories("${TENSORRT_LIB_DIR}") + file(READ ${TENSORRT_INCLUDE_DIR}/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS) + string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION + "${TENSORRT_VERSION_FILE_CONTENTS}") + if("${TENSORRT_MAJOR_VERSION}" STREQUAL "") + file(READ ${TENSORRT_INCLUDE_DIR}/NvInferVersion.h TENSORRT_VERSION_FILE_CONTENTS) + string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION + "${TENSORRT_VERSION_FILE_CONTENTS}") endif() -endif(NOT WIN32) + if("${TENSORRT_MAJOR_VERSION}" STREQUAL "") + message(SEND_ERROR "Failed to detect TensorRT version.") + endif() + string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1" + TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}") + message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. " + "Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ") + include_directories("${TENSORRT_INCLUDE_DIR}") + link_directories("${TENSORRT_LIB_DIR}") +endif() if(WITH_MKL) set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml") @@ -147,14 +157,17 @@ endif(NOT WIN32) if(WITH_GPU) if(NOT WIN32) if (USE_TENSORRT) - set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_STATIC_LIBRARY_SUFFIX}) - set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX}) + set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}) + set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX}) else() if(USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX}) + if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7) + set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_STATIC_LIBRARY_SUFFIX}) + endif() endif() set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} ) @@ -172,6 +185,11 @@ if(WIN32) COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) + if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7) + add_custom_command(TARGET ${DEMO_NAME} POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_SHARED_LIBRARY_SUFFIX} + ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}) + endif() endif() if(WITH_MKL) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD diff --git a/paddle/fluid/platform/flags.cc b/paddle/fluid/platform/flags.cc index 378071964fc..20be80b1761 100644 --- a/paddle/fluid/platform/flags.cc +++ b/paddle/fluid/platform/flags.cc @@ -498,8 +498,14 @@ DEFINE_bool(use_mkldnn, false, "Use MKLDNN to run"); * If FLAGS_call_stack_level == 2, the python stack, c++ stack, and error * message summary will be shown. */ +#ifdef PADDLE_ON_INFERENCE +static const int32_t kDefaultCallStackLevel = 2; +#else +static const int32_t kDefaultCallStackLevel = 1; +#endif + DEFINE_int32( - call_stack_level, 1, + call_stack_level, kDefaultCallStackLevel, "Determine the call stack to print when error or exeception happens." // TODO(zhiqiu): implement logic of FLAGS_call_stack_level==0 // "If FLAGS_call_stack_level == 0, only the error message summary will be " diff --git a/paddle/fluid/platform/port.h b/paddle/fluid/platform/port.h index b2f26ba9581..453bea625b0 100644 --- a/paddle/fluid/platform/port.h +++ b/paddle/fluid/platform/port.h @@ -47,6 +47,7 @@ static void *dlsym(void *handle, const char *symbol_name) { found_symbol = GetProcAddress((HMODULE)handle, symbol_name); if (found_symbol == NULL) { + LOG(ERROR) << "Load symbol " << symbol_name << " failed."; throw std::runtime_error(std::string(symbol_name) + " not found."); } return reinterpret_cast(found_symbol); -- GitLab