diff --git a/cmake/external/lite.cmake b/cmake/external/lite.cmake index 0031757467f3784c5fe72182f7aaf06b90acd6c2..206cbf1d8f8d18d30d4008f13b60e0ee5d1ad85a 100644 --- a/cmake/external/lite.cmake +++ b/cmake/external/lite.cmake @@ -106,7 +106,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR) else() set(LITE_BUILD_COMMAND $(MAKE) publish_inference -j) set(LITE_OPTIONAL_ARGS -DWITH_MKL=ON - -DLITE_WITH_CUDA=${WITH_GPU} + -DLITE_WITH_CUDA=OFF -DWITH_MKLDNN=OFF -DLITE_WITH_X86=ON -DLITE_WITH_PROFILE=OFF @@ -115,9 +115,6 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR) -DWITH_PYTHON=OFF -DWITH_TESTING=OFF -DLITE_BUILD_EXTRA=ON - -DCUDNN_ROOT=${CUDNN_ROOT} - -DLITE_WITH_STATIC_CUDA=OFF - -DCUDA_ARCH_NAME=${CUDA_ARCH_NAME} -DLITE_WITH_XPU=${LITE_WITH_XPU} -DXPU_SDK_URL=${XPU_BASE_URL} -DXPU_SDK_ENV=${XPU_SDK_ENV} diff --git a/paddle/fluid/inference/lite/engine.cc b/paddle/fluid/inference/lite/engine.cc index cd78cfecd863577e61bd90e70cecfcc3e562f1da..c43e5c39bfeeb59947506b98cb2abb83c45e6ea8 100644 --- a/paddle/fluid/inference/lite/engine.cc +++ b/paddle/fluid/inference/lite/engine.cc @@ -12,10 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#define LITE_WITH_CUDA 1 -#endif - #ifdef LITE_SUBGRAPH_WITH_XPU #define LITE_WITH_XPU 1 #endif diff --git a/paddle/fluid/inference/lite/test_tensor_utils.cc b/paddle/fluid/inference/lite/test_tensor_utils.cc index b0c7c7448a50ef11b4c457c8f4c55196ceb0d453..4e240ef9e40b3f72232dbfa9ca019c8e16e4bd62 100644 --- a/paddle/fluid/inference/lite/test_tensor_utils.cc +++ b/paddle/fluid/inference/lite/test_tensor_utils.cc @@ -151,22 +151,12 @@ TEST(LiteEngineOp, TensorCopyAsync) { auto* ctx_cpu = platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); test_tensor_copy(*ctx_cpu); -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - auto* ctx_gpu = - platform::DeviceContextPool::Instance().Get(platform::CUDAPlace(0)); - test_tensor_copy(*ctx_gpu); -#endif } TEST(LiteEngineOp, TensorShare) { auto* ctx_cpu = platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); test_tensor_share(*ctx_cpu); -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - auto* ctx_gpu = - platform::DeviceContextPool::Instance().Get(platform::CUDAPlace(0)); - test_tensor_share(*ctx_gpu); -#endif } } // namespace utils diff --git a/paddle/fluid/inference/tests/api/lite_mul_model_test.cc b/paddle/fluid/inference/tests/api/lite_mul_model_test.cc index 9211ea246a5c5e0cdc75e6fef72ae0e4e40d69af..6aaad4d4545d694077e676277e022afa0bae5ccb 100644 --- a/paddle/fluid/inference/tests/api/lite_mul_model_test.cc +++ b/paddle/fluid/inference/tests/api/lite_mul_model_test.cc @@ -119,35 +119,13 @@ TEST(AnalysisPredictor, lite_xpu) { } #endif -#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -TEST(AnalysisPredictor, thread_local_stream) { - const size_t thread_num = 5; - std::vector threads(thread_num); - Barrier barrier(thread_num); - for (size_t i = 0; i < threads.size(); ++i) { - threads[i] = std::thread([&barrier, i]() { - AnalysisConfig config; - config.EnableUseGpu(100, 0); - config.SetModel(FLAGS_infer_model + "/" + "mul_model"); - config.EnableGpuMultiStream(); - test_predictor(config, &barrier); - test_predictor_zero_copy(config); - }); - } - for (auto& th : threads) { - th.join(); - } -} - TEST(AnalysisPredictor, lite_engine) { AnalysisConfig config; - config.EnableUseGpu(100, 0); config.SetModel(FLAGS_infer_model + "/" + "mul_model"); config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32); test_predictor(config); test_predictor_zero_copy(config); } -#endif } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tests/api/lite_resnet50_test.cc b/paddle/fluid/inference/tests/api/lite_resnet50_test.cc index 59bbaa2b78fb00428aa61bde3c65998721e757f3..e585257e485626e3b149e5b1341e19cb5c098e25 100644 --- a/paddle/fluid/inference/tests/api/lite_resnet50_test.cc +++ b/paddle/fluid/inference/tests/api/lite_resnet50_test.cc @@ -22,10 +22,9 @@ limitations under the License. */ namespace paddle { namespace inference { -TEST(AnalysisPredictor, use_gpu) { +TEST(AnalysisPredictor, use_cpu) { std::string model_dir = FLAGS_infer_model + "/" + "model"; AnalysisConfig config; - config.EnableUseGpu(100, 0); config.SetModel(model_dir + "/model", model_dir + "/params"); config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32, true); @@ -73,10 +72,9 @@ TEST(AnalysisPredictor, use_gpu) { namespace paddle_infer { -TEST(Predictor, use_gpu) { +TEST(Predictor, use_cpu) { std::string model_dir = FLAGS_infer_model + "/" + "model"; Config config; - config.EnableUseGpu(100, 0); config.SetModel(model_dir + "/model", model_dir + "/params"); config.EnableLiteEngine(PrecisionType::kFloat32);