未验证 提交 b0556764 编写于 作者: P Pei Yang 提交者: GitHub

[Paddle-TRT] Add trt runtime version check (#32443)

* add trt runtime version check

* use different wrap, and change to major version check
上级 541d702d
......@@ -20,6 +20,7 @@
#include "paddle/fluid/inference/analysis/helper.h"
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/engine.h"
#include "paddle/fluid/inference/tensorrt/helper.h"
#include "paddle/fluid/inference/tensorrt/op_teller.h"
namespace paddle {
......@@ -321,11 +322,20 @@ void TensorRtSubgraphPass::CreateTensorRTOp(
opt_input_shape = {};
}
if (min_input_shape.size() > 0 && TRT_VERSION > 6000) {
auto to_major_version = [&](int full_version) -> float {
return (full_version / 100) / 10.0;
};
const float compile_time_trt_version = to_major_version(TRT_VERSION);
const float run_time_trt_version =
to_major_version(tensorrt::GetInferLibVersion());
if (compile_time_trt_version != run_time_trt_version) {
LOG_FIRST_N(WARNING, 1)
<< "The Paddle lib links the " << TRT_VERSION << " version TensorRT, "
<< "make sure the runtime TensorRT you are using is no less than this "
"version, otherwise, there might be Segfault!";
<< "The Paddle Inference library is compiled with "
<< compile_time_trt_version << " version TensorRT, "
<< "but the runtime TensorRT you are using is " << run_time_trt_version
<< " version. "
"This might cause serious compatibility issues. We strongly "
"recommend using the same TRT version at runtime.";
}
// Setting the disable_trt_plugin_fp16 to true means that TRT plugin will not
......
......@@ -60,6 +60,9 @@ static nvinfer1::IRuntime* createInferRuntime(nvinfer1::ILogger* logger) {
static nvinfer1::IPluginRegistry* GetPluginRegistry() {
return static_cast<nvinfer1::IPluginRegistry*>(dy::getPluginRegistry());
}
static int GetInferLibVersion() {
return static_cast<int>(dy::getInferLibVersion());
}
#endif
// A logger for create TensorRT infer builder.
......
......@@ -27,7 +27,8 @@ void* tensorrt_plugin_dso_handle;
#define DEFINE_WRAP(__name) DynLoad__##__name __name
TENSORRT_RAND_ROUTINE_EACH(DEFINE_WRAP);
TENSORRT_RAND_ROUTINE_EACH_POINTER(DEFINE_WRAP);
TENSORRT_RAND_ROUTINE_EACH_NON_POINTER(DEFINE_WRAP);
TENSORRT_PLUGIN_RAND_ROUTINE_EACH(DEFINE_WRAP);
void* GetDsoHandle(const std::string& dso_name) {
......
......@@ -37,7 +37,7 @@ void* GetTensorRtPluginHandle();
extern std::once_flag tensorrt_plugin_dso_flag;
extern void* tensorrt_plugin_dso_handle;
#define DECLARE_DYNAMIC_LOAD_TENSORRT_WRAP(__name) \
#define DECLARE_DYNAMIC_LOAD_TENSORRT_POINTER_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
void* operator()(Args... args) { \
......@@ -55,6 +55,23 @@ extern void* tensorrt_plugin_dso_handle;
}; \
extern DynLoad__##__name __name
#define DECLARE_DYNAMIC_LOAD_TENSORRT_NON_POINTER_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
auto operator()(Args... args) -> DECLARE_TYPE(__name, args...) { \
std::call_once(tensorrt_dso_flag, []() { \
tensorrt_dso_handle = paddle::platform::dynload::GetTensorRtHandle(); \
}); \
static void* p_##__name = dlsym(tensorrt_dso_handle, #__name); \
PADDLE_ENFORCE_NOT_NULL(p_##__name, \
platform::errors::Unavailable( \
"Load tensorrt api %s failed", #__name)); \
using tensorrt_func = decltype(&::__name); \
return reinterpret_cast<tensorrt_func>(p_##__name)(args...); \
} \
}; \
extern DynLoad__##__name __name
#define DECLARE_DYNAMIC_LOAD_TENSORRT_PLUGIN_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
......@@ -76,20 +93,25 @@ extern void* tensorrt_plugin_dso_handle;
#ifdef NV_TENSORRT_MAJOR
#if (NV_TENSORRT_MAJOR >= 6)
#define TENSORRT_RAND_ROUTINE_EACH(__macro) \
#define TENSORRT_RAND_ROUTINE_EACH_POINTER(__macro) \
__macro(createInferBuilder_INTERNAL); \
__macro(createInferRuntime_INTERNAL); \
__macro(getPluginRegistry);
#else
#define TENSORRT_RAND_ROUTINE_EACH(__macro) \
#define TENSORRT_RAND_ROUTINE_EACH_POINTER(__macro) \
__macro(createInferBuilder_INTERNAL); \
__macro(createInferRuntime_INTERNAL);
#endif
#define TENSORRT_RAND_ROUTINE_EACH_NON_POINTER(__macro) \
__macro(getInferLibVersion);
#define TENSORRT_PLUGIN_RAND_ROUTINE_EACH(__macro) \
__macro(initLibNvInferPlugins);
TENSORRT_RAND_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_TENSORRT_WRAP)
TENSORRT_RAND_ROUTINE_EACH_POINTER(DECLARE_DYNAMIC_LOAD_TENSORRT_POINTER_WRAP)
TENSORRT_RAND_ROUTINE_EACH_NON_POINTER(
DECLARE_DYNAMIC_LOAD_TENSORRT_NON_POINTER_WRAP)
TENSORRT_PLUGIN_RAND_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_TENSORRT_PLUGIN_WRAP)
#endif // end of NV_TENSORRT_MAJOR
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册