diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 452636e067f502a17babe713a9d4585233de0642..246cfc44e81dc4b7e2d556cceb7901f07bffb0b6 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -21,6 +21,7 @@ #include "paddle/fluid/inference/utils/table_printer.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/errors.h" #include "paddle/phi/backends/cpu/cpu_info.h" #include "paddle/utils/string/split.h" @@ -101,10 +102,10 @@ void AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb, precision_mode == Precision::kBf16) { enable_gpu_mixed_ = true; } else { - LOG(ERROR) - << "The Paddle-GPU inference currently only supports " - "float32/float16/bfloat16 precision. Please check the parameters " - "you specified in EnableUseGpu or enable_use_gpu function."; + PADDLE_THROW(platform::errors::InvalidArgument( + "The Paddle-GPU inference currently only supports " + "float32/float16/bfloat16 precision. Please check the parameters " + "you specified in EnableUseGpu or enable_use_gpu function.")); } #else LOG(ERROR) << "Please use PaddlePaddle with GPU version."; @@ -696,7 +697,7 @@ void AnalysisConfig::EnableTensorRtEngine( AnalysisConfig::Precision precision_mode, bool use_static, bool use_calib_mode) { -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) +#ifdef PADDLE_WITH_TENSORRT if (!use_gpu()) { LOG(ERROR) << "To use TensorRT engine, please call EnableUseGpu() first"; return; @@ -712,8 +713,8 @@ void AnalysisConfig::EnableTensorRtEngine( Update(); #else - LOG(ERROR) - << "To use TensorRT engine, please compile inference lib with GPU first."; + PADDLE_THROW(platform::errors::PreconditionNotMet( + "To use Paddle-TensorRT, please compile with TENSORRT first.")); #endif } @@ -1249,7 +1250,7 @@ std::string AnalysisConfig::Summary() { os.InsertRow({"use_gpu", use_gpu_ ? "true" : "false"}); if (use_gpu_) { os.InsertRow({"gpu_device_id", std::to_string(gpu_device_id_)}); - os.InsertRow({"enable_gpu_mixed_", std::to_string(enable_gpu_mixed_)}); + os.InsertRow({"enable_gpu_mixed", std::to_string(enable_gpu_mixed_)}); os.InsertRow({"memory_pool_init_size", std::to_string(memory_pool_init_size_mb_) + "MB"}); os.InsertRow( diff --git a/paddle/fluid/inference/api/api_tester.cc b/paddle/fluid/inference/api/api_tester.cc index cc6527a7e554e89fc9339a0f063c4c4a28c66c57..cc35cf431914cccd9639236e21c56d722d14c6f1 100644 --- a/paddle/fluid/inference/api/api_tester.cc +++ b/paddle/fluid/inference/api/api_tester.cc @@ -87,7 +87,9 @@ TEST(paddle_inference_api, UpdateDllFlag) { TEST(paddle_inference_api, AnalysisConfigCopyCtor) { AnalysisConfig cfg1; cfg1.EnableUseGpu(10); +#ifdef PADDLE_WITH_TENSORRT cfg1.EnableTensorRtEngine(); +#endif std::string delete_pass("skip_layernorm_fuse_pass"); cfg1.pass_builder()->DeletePass(delete_pass); AnalysisConfig cfg2(cfg1);