From 6296674540b4f0496703ff24f3142d0f83ecf2cd Mon Sep 17 00:00:00 2001 From: Jiawei Wang Date: Tue, 26 Oct 2021 13:20:22 +0800 Subject: [PATCH] Merge pull request #1436 from ShiningZhang/dev support set xpu id --- paddle_inference/paddle/include/paddle_engine.h | 1 + python/paddle_serving_app/local_predict.py | 1 + python/pipeline/local_service_handler.py | 4 ++++ 3 files changed, 6 insertions(+) diff --git a/paddle_inference/paddle/include/paddle_engine.h b/paddle_inference/paddle/include/paddle_engine.h index 7cc8120f..c76147b6 100644 --- a/paddle_inference/paddle/include/paddle_engine.h +++ b/paddle_inference/paddle/include/paddle_engine.h @@ -266,6 +266,7 @@ class PaddleInferenceEngine : public EngineCore { if (engine_conf.has_use_xpu() && engine_conf.use_xpu()) { // 2 MB l3 cache config.EnableXpu(2 * 1024 * 1024); + config.SetXpuDeviceId(gpu_id); } if (engine_conf.has_enable_memory_optimization() && diff --git a/python/paddle_serving_app/local_predict.py b/python/paddle_serving_app/local_predict.py index afe4ba62..7de41953 100644 --- a/python/paddle_serving_app/local_predict.py +++ b/python/paddle_serving_app/local_predict.py @@ -219,6 +219,7 @@ class LocalPredictor(object): if use_xpu: # 2MB l3 cache config.enable_xpu(8 * 1024 * 1024) + config.set_xpu_device_id(gpu_id) # set cpu low precision if not use_gpu and not use_lite: if precision_type == paddle_infer.PrecisionType.Int8: diff --git a/python/pipeline/local_service_handler.py b/python/pipeline/local_service_handler.py index d04b9654..d9df5e30 100644 --- a/python/pipeline/local_service_handler.py +++ b/python/pipeline/local_service_handler.py @@ -280,6 +280,10 @@ class LocalServiceHandler(object): server.set_gpuid(gpuid) # TODO: support arm or arm + xpu later server.set_device(self._device_name) + if self._use_xpu: + server.set_xpu() + if self._use_lite: + server.set_lite() server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_num_threads(thread_num) -- GitLab