未验证 提交 811d57d8 编写于 作者: Z zhupengyang 提交者: GitHub

disable lite gpu (#43177)

上级 971e4791
......@@ -115,7 +115,7 @@ if(NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
set(LITE_BUILD_COMMAND $(MAKE) publish_inference -j)
set(LITE_OPTIONAL_ARGS
-DWITH_MKL=ON
-DLITE_WITH_CUDA=${WITH_GPU}
-DLITE_WITH_CUDA=OFF
-DWITH_MKLDNN=OFF
-DLITE_WITH_X86=ON
-DLITE_WITH_PROFILE=OFF
......@@ -124,9 +124,6 @@ if(NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
-DWITH_PYTHON=OFF
-DWITH_TESTING=OFF
-DLITE_BUILD_EXTRA=ON
-DCUDNN_ROOT=${CUDNN_ROOT}
-DLITE_WITH_STATIC_CUDA=OFF
-DCUDA_ARCH_NAME=${CUDA_ARCH_NAME}
-DLITE_WITH_XPU=${LITE_WITH_XPU}
-DXPU_SDK_URL=${XPU_BASE_URL}
-DXPU_SDK_ENV=${XPU_SDK_ENV}
......
......@@ -12,10 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#define LITE_WITH_CUDA 1
#endif
#ifdef LITE_SUBGRAPH_WITH_XPU
#define LITE_WITH_XPU 1
#endif
......
......@@ -152,22 +152,12 @@ TEST(LiteEngineOp, TensorCopyAsync) {
auto* ctx_cpu =
platform::DeviceContextPool::Instance().Get(platform::CPUPlace());
test_tensor_copy(*ctx_cpu);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
auto* ctx_gpu =
platform::DeviceContextPool::Instance().Get(platform::CUDAPlace(0));
test_tensor_copy(*ctx_gpu);
#endif
}
TEST(LiteEngineOp, TensorShare) {
auto* ctx_cpu =
platform::DeviceContextPool::Instance().Get(platform::CPUPlace());
test_tensor_share(*ctx_cpu);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
auto* ctx_gpu =
platform::DeviceContextPool::Instance().Get(platform::CUDAPlace(0));
test_tensor_share(*ctx_gpu);
#endif
}
} // namespace utils
......
......@@ -120,35 +120,13 @@ TEST(AnalysisPredictor, lite_xpu) {
}
#endif
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
TEST(AnalysisPredictor, thread_local_stream) {
const size_t thread_num = 5;
std::vector<std::thread> threads(thread_num);
Barrier barrier(thread_num);
for (size_t i = 0; i < threads.size(); ++i) {
threads[i] = std::thread([&barrier, i]() {
AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.SetModel(FLAGS_infer_model + "/" + "mul_model");
config.EnableGpuMultiStream();
test_predictor(config, &barrier);
test_predictor_zero_copy(config);
});
}
for (auto& th : threads) {
th.join();
}
}
TEST(AnalysisPredictor, lite_engine) {
AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.SetModel(FLAGS_infer_model + "/" + "mul_model");
config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32);
test_predictor(config);
test_predictor_zero_copy(config);
}
#endif
} // namespace inference
} // namespace paddle
......@@ -23,10 +23,9 @@ limitations under the License. */
namespace paddle {
namespace inference {
TEST(AnalysisPredictor, use_gpu) {
TEST(AnalysisPredictor, use_cpu) {
std::string model_dir = FLAGS_infer_model + "/" + "model";
AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.SetModel(model_dir + "/model", model_dir + "/params");
config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32, true);
......@@ -74,10 +73,9 @@ TEST(AnalysisPredictor, use_gpu) {
namespace paddle_infer {
TEST(Predictor, use_gpu) {
TEST(Predictor, use_cpu) {
std::string model_dir = FLAGS_infer_model + "/" + "model";
Config config;
config.EnableUseGpu(100, 0);
config.SetModel(model_dir + "/model", model_dir + "/params");
config.EnableLiteEngine(PrecisionType::kFloat32);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册