未验证 提交 f935ca8a 编写于 作者: W Wilber 提交者: GitHub

[lite-xpu-subgraph] Fix xpu compile and test xpu ci. (#27932)

上级 5d73bfdb
......@@ -22,7 +22,7 @@ if(XPU_SDK_ROOT)
set(LITE_WITH_XPU ON)
include_directories("${XPU_SDK_ROOT}/XTDK/include")
include_directories("${XPU_SDK_ROOT}/XTCL/include")
add_definitions(-DPADDLE_WITH_XPU)
add_definitions(-DLITE_SUBGRAPH_WITH_XPU)
LINK_DIRECTORIES("${XPU_SDK_ROOT}/XTDK/shlib/")
LINK_DIRECTORIES("${XPU_SDK_ROOT}/XTDK/runtime/shlib/")
endif()
......
......@@ -381,7 +381,7 @@ void AnalysisConfig::Update() {
}
if (use_xpu_) {
#ifndef PADDLE_WITH_XPU
#ifndef LITE_SUBGRAPH_WITH_XPU
PADDLE_THROW(platform::errors::Unavailable(
"You tried to use an XPU device, but Paddle was not compiled "
"with XPU-runtime."));
......
......@@ -4,6 +4,6 @@ endif()
cc_library(lite_op_teller SRCS op_teller.cc DEPS lite_full_static framework_proto device_context boost xxhash)
cc_library(lite_engine SRCS engine.cc DEPS lite_full_static framework_proto ${XPU_DEPS})
cc_library(lite_tensor_utils SRCS tensor_utils.cc DEPS memcpy lite_full_static framework_proto boost device_context)
cc_library(lite_tensor_utils SRCS tensor_utils.cc DEPS memcpy lite_full_static framework_proto boost device_context ${XPU_DEPS})
cc_test(test_lite_engine SRCS test_engine.cc DEPS lite_engine protobuf framework_proto glog gtest analysis)
cc_test(test_lite_tensor_utils SRCS test_tensor_utils.cc DEPS lite_engine lite_tensor_utils)
......@@ -16,7 +16,7 @@
#define LITE_WITH_CUDA 1
#endif
#ifdef PADDLE_WITH_XPU
#ifdef LITE_SUBGRAPH_WITH_XPU
#define LITE_WITH_XPU 1
#endif
......@@ -59,7 +59,7 @@ paddle::lite_api::PaddlePredictor* EngineManager::Create(
cfg.cpu_math_library_num_threads);
#endif
#ifdef PADDLE_WITH_XPU
#ifdef LITE_SUBGRAPH_WITH_XPU
lite_cxx_config.set_xpu_workspace_l3_size_per_thread(
cfg.xpu_l3_workspace_size);
#endif
......
......@@ -26,7 +26,11 @@ namespace inference {
TEST(AnalysisPredictor, use_gpu) {
std::string model_dir = FLAGS_infer_model + "/" + "model";
AnalysisConfig config;
#if defined(PADDLE_WITH_CUDA)
config.EnableUseGpu(100, 0);
#elif defined(LITE_SUBGRAPH_WITH_XPU)
config.EnableXpu(100);
#endif
config.SetModel(model_dir + "/model", model_dir + "/params");
config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32, true);
......@@ -40,7 +44,7 @@ TEST(AnalysisPredictor, use_gpu) {
std::vector<float> input(input_num, 1);
PaddleTensor in;
in.shape = {1, 3, 318, 318};
in.shape = {batch, channel, height, width};
in.data =
PaddleBuf(static_cast<void*>(input.data()), input_num * sizeof(float));
in.dtype = PaddleDType::FLOAT32;
......@@ -92,7 +96,7 @@ TEST(Predictor, use_gpu) {
auto input_names = predictor->GetInputNames();
auto input_t = predictor->GetInputHandle(input_names[0]);
input_t->Reshape({1, 3, 318, 318});
input_t->Reshape({batch, channel, height, width});
input_t->CopyFromCpu(input.data());
predictor->Run();
......
......@@ -443,6 +443,8 @@ void BindAnalysisConfig(py::module *m) {
.def("params_file", &AnalysisConfig::params_file)
.def("enable_use_gpu", &AnalysisConfig::EnableUseGpu,
py::arg("memory_pool_init_size_mb"), py::arg("device_id") = 0)
.def("enable_xpu", &AnalysisConfig::EnableXpu,
py::arg("l3_workspace_size"))
.def("disable_gpu", &AnalysisConfig::DisableGpu)
.def("use_gpu", &AnalysisConfig::use_gpu)
.def("gpu_device_id", &AnalysisConfig::gpu_device_id)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册