diff --git a/cmake/external/lite.cmake b/cmake/external/lite.cmake index b994f407604b42785e9776d610142d52194b6d67..1d5dd6ae8f425872d691f20b3617f75340bfdb52 100644 --- a/cmake/external/lite.cmake +++ b/cmake/external/lite.cmake @@ -115,7 +115,7 @@ if(NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR) set(LITE_BUILD_COMMAND $(MAKE) publish_inference -j) set(LITE_OPTIONAL_ARGS -DWITH_MKL=ON - -DLITE_WITH_CUDA=${WITH_GPU} + -DLITE_WITH_CUDA=OFF -DWITH_MKLDNN=OFF -DLITE_WITH_X86=ON -DLITE_WITH_PROFILE=OFF @@ -124,9 +124,6 @@ if(NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR) -DWITH_PYTHON=OFF -DWITH_TESTING=OFF -DLITE_BUILD_EXTRA=ON - -DCUDNN_ROOT=${CUDNN_ROOT} - -DLITE_WITH_STATIC_CUDA=OFF - -DCUDA_ARCH_NAME=${CUDA_ARCH_NAME} -DLITE_WITH_XPU=${LITE_WITH_XPU} -DXPU_SDK_URL=${XPU_BASE_URL} -DXPU_SDK_ENV=${XPU_SDK_ENV} diff --git a/paddle/fluid/inference/lite/engine.cc b/paddle/fluid/inference/lite/engine.cc index 8f8f68b170b62ceab743e824fd157d1f74e91da9..615a90cdf5798fb9143b52cbcfbddbd35547e116 100644 --- a/paddle/fluid/inference/lite/engine.cc +++ b/paddle/fluid/inference/lite/engine.cc @@ -12,10 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#define LITE_WITH_CUDA 1 -#endif - #ifdef LITE_SUBGRAPH_WITH_XPU #define LITE_WITH_XPU 1 #endif diff --git a/paddle/fluid/inference/lite/test_tensor_utils.cc b/paddle/fluid/inference/lite/test_tensor_utils.cc index 09a6cda62b3528874b02bafbd859f65a051fd6c3..43e1d8770c37c3429646747b4c5721214a5a4440 100644 --- a/paddle/fluid/inference/lite/test_tensor_utils.cc +++ b/paddle/fluid/inference/lite/test_tensor_utils.cc @@ -152,22 +152,12 @@ TEST(LiteEngineOp, TensorCopyAsync) { auto* ctx_cpu = platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); test_tensor_copy(*ctx_cpu); -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - auto* ctx_gpu = - platform::DeviceContextPool::Instance().Get(platform::CUDAPlace(0)); - test_tensor_copy(*ctx_gpu); -#endif } TEST(LiteEngineOp, TensorShare) { auto* ctx_cpu = platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); test_tensor_share(*ctx_cpu); -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - auto* ctx_gpu = - platform::DeviceContextPool::Instance().Get(platform::CUDAPlace(0)); - test_tensor_share(*ctx_gpu); -#endif } } // namespace utils diff --git a/paddle/fluid/inference/tests/api/lite_mul_model_test.cc b/paddle/fluid/inference/tests/api/lite_mul_model_test.cc index 1adbf0ec7a552dc418d4ea4fc875f3f95d938278..1677d00ac1f44757da1754a7b5b77b66cbadb34c 100644 --- a/paddle/fluid/inference/tests/api/lite_mul_model_test.cc +++ b/paddle/fluid/inference/tests/api/lite_mul_model_test.cc @@ -120,35 +120,13 @@ TEST(AnalysisPredictor, lite_xpu) { } #endif -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -TEST(AnalysisPredictor, thread_local_stream) { - const size_t thread_num = 5; - std::vector threads(thread_num); - Barrier barrier(thread_num); - for (size_t i = 0; i < threads.size(); ++i) { - threads[i] = std::thread([&barrier, i]() { - AnalysisConfig config; - config.EnableUseGpu(100, 0); - config.SetModel(FLAGS_infer_model + "/" + "mul_model"); - config.EnableGpuMultiStream(); - test_predictor(config, &barrier); - test_predictor_zero_copy(config); - }); - } - for (auto& th : threads) { - th.join(); - } -} - TEST(AnalysisPredictor, lite_engine) { AnalysisConfig config; - config.EnableUseGpu(100, 0); config.SetModel(FLAGS_infer_model + "/" + "mul_model"); config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32); test_predictor(config); test_predictor_zero_copy(config); } -#endif } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tests/api/lite_resnet50_test.cc b/paddle/fluid/inference/tests/api/lite_resnet50_test.cc index 169d0b9987d793f048eedca2f3a885f772afa7db..b519a7f9b6ceaa297fcb7cc9a0660ccfa29b0f65 100644 --- a/paddle/fluid/inference/tests/api/lite_resnet50_test.cc +++ b/paddle/fluid/inference/tests/api/lite_resnet50_test.cc @@ -23,10 +23,9 @@ limitations under the License. */ namespace paddle { namespace inference { -TEST(AnalysisPredictor, use_gpu) { +TEST(AnalysisPredictor, use_cpu) { std::string model_dir = FLAGS_infer_model + "/" + "model"; AnalysisConfig config; - config.EnableUseGpu(100, 0); config.SetModel(model_dir + "/model", model_dir + "/params"); config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32, true); @@ -74,10 +73,9 @@ TEST(AnalysisPredictor, use_gpu) { namespace paddle_infer { -TEST(Predictor, use_gpu) { +TEST(Predictor, use_cpu) { std::string model_dir = FLAGS_infer_model + "/" + "model"; Config config; - config.EnableUseGpu(100, 0); config.SetModel(model_dir + "/model", model_dir + "/params"); config.EnableLiteEngine(PrecisionType::kFloat32);