From 36980306b00cc4db6b043348be27b534085db5cf Mon Sep 17 00:00:00 2001 From: zhupengyang Date: Thu, 9 Jun 2022 14:52:32 +0800 Subject: [PATCH] disable lite gpu (#43178) --- cmake/external/lite.cmake | 5 +---- paddle/fluid/inference/lite/engine.cc | 4 ---- .../fluid/inference/lite/test_tensor_utils.cc | 10 --------- .../tests/api/lite_mul_model_test.cc | 22 ------------------- .../inference/tests/api/lite_resnet50_test.cc | 6 ++--- 5 files changed, 3 insertions(+), 44 deletions(-) diff --git a/cmake/external/lite.cmake b/cmake/external/lite.cmake index 0031757467f..206cbf1d8f8 100644 --- a/cmake/external/lite.cmake +++ b/cmake/external/lite.cmake @@ -106,7 +106,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR) else() set(LITE_BUILD_COMMAND $(MAKE) publish_inference -j) set(LITE_OPTIONAL_ARGS -DWITH_MKL=ON - -DLITE_WITH_CUDA=${WITH_GPU} + -DLITE_WITH_CUDA=OFF -DWITH_MKLDNN=OFF -DLITE_WITH_X86=ON -DLITE_WITH_PROFILE=OFF @@ -115,9 +115,6 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR) -DWITH_PYTHON=OFF -DWITH_TESTING=OFF -DLITE_BUILD_EXTRA=ON - -DCUDNN_ROOT=${CUDNN_ROOT} - -DLITE_WITH_STATIC_CUDA=OFF - -DCUDA_ARCH_NAME=${CUDA_ARCH_NAME} -DLITE_WITH_XPU=${LITE_WITH_XPU} -DXPU_SDK_URL=${XPU_BASE_URL} -DXPU_SDK_ENV=${XPU_SDK_ENV} diff --git a/paddle/fluid/inference/lite/engine.cc b/paddle/fluid/inference/lite/engine.cc index cd78cfecd86..c43e5c39bfe 100644 --- a/paddle/fluid/inference/lite/engine.cc +++ b/paddle/fluid/inference/lite/engine.cc @@ -12,10 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#define LITE_WITH_CUDA 1 -#endif - #ifdef LITE_SUBGRAPH_WITH_XPU #define LITE_WITH_XPU 1 #endif diff --git a/paddle/fluid/inference/lite/test_tensor_utils.cc b/paddle/fluid/inference/lite/test_tensor_utils.cc index b0c7c7448a5..4e240ef9e40 100644 --- a/paddle/fluid/inference/lite/test_tensor_utils.cc +++ b/paddle/fluid/inference/lite/test_tensor_utils.cc @@ -151,22 +151,12 @@ TEST(LiteEngineOp, TensorCopyAsync) { auto* ctx_cpu = platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); test_tensor_copy(*ctx_cpu); -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - auto* ctx_gpu = - platform::DeviceContextPool::Instance().Get(platform::CUDAPlace(0)); - test_tensor_copy(*ctx_gpu); -#endif } TEST(LiteEngineOp, TensorShare) { auto* ctx_cpu = platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); test_tensor_share(*ctx_cpu); -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - auto* ctx_gpu = - platform::DeviceContextPool::Instance().Get(platform::CUDAPlace(0)); - test_tensor_share(*ctx_gpu); -#endif } } // namespace utils diff --git a/paddle/fluid/inference/tests/api/lite_mul_model_test.cc b/paddle/fluid/inference/tests/api/lite_mul_model_test.cc index 9211ea246a5..6aaad4d4545 100644 --- a/paddle/fluid/inference/tests/api/lite_mul_model_test.cc +++ b/paddle/fluid/inference/tests/api/lite_mul_model_test.cc @@ -119,35 +119,13 @@ TEST(AnalysisPredictor, lite_xpu) { } #endif -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -TEST(AnalysisPredictor, thread_local_stream) { - const size_t thread_num = 5; - std::vector threads(thread_num); - Barrier barrier(thread_num); - for (size_t i = 0; i < threads.size(); ++i) { - threads[i] = std::thread([&barrier, i]() { - AnalysisConfig config; - config.EnableUseGpu(100, 0); - config.SetModel(FLAGS_infer_model + "/" + "mul_model"); - config.EnableGpuMultiStream(); - test_predictor(config, &barrier); - test_predictor_zero_copy(config); - }); - } - for (auto& th : threads) { - th.join(); - } -} - TEST(AnalysisPredictor, lite_engine) { AnalysisConfig config; - config.EnableUseGpu(100, 0); config.SetModel(FLAGS_infer_model + "/" + "mul_model"); config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32); test_predictor(config); test_predictor_zero_copy(config); } -#endif } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tests/api/lite_resnet50_test.cc b/paddle/fluid/inference/tests/api/lite_resnet50_test.cc index 59bbaa2b78f..e585257e485 100644 --- a/paddle/fluid/inference/tests/api/lite_resnet50_test.cc +++ b/paddle/fluid/inference/tests/api/lite_resnet50_test.cc @@ -22,10 +22,9 @@ limitations under the License. */ namespace paddle { namespace inference { -TEST(AnalysisPredictor, use_gpu) { +TEST(AnalysisPredictor, use_cpu) { std::string model_dir = FLAGS_infer_model + "/" + "model"; AnalysisConfig config; - config.EnableUseGpu(100, 0); config.SetModel(model_dir + "/model", model_dir + "/params"); config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32, true); @@ -73,10 +72,9 @@ TEST(AnalysisPredictor, use_gpu) { namespace paddle_infer { -TEST(Predictor, use_gpu) { +TEST(Predictor, use_cpu) { std::string model_dir = FLAGS_infer_model + "/" + "model"; Config config; - config.EnableUseGpu(100, 0); config.SetModel(model_dir + "/model", model_dir + "/params"); config.EnableLiteEngine(PrecisionType::kFloat32); -- GitLab