提交 62966745 编写于 作者: J Jiawei Wang 提交者: bjjwwang

Merge pull request #1436 from ShiningZhang/dev

support set xpu id
上级 fe8f5038
......@@ -266,6 +266,7 @@ class PaddleInferenceEngine : public EngineCore {
if (engine_conf.has_use_xpu() && engine_conf.use_xpu()) {
// 2 MB l3 cache
config.EnableXpu(2 * 1024 * 1024);
config.SetXpuDeviceId(gpu_id);
}
if (engine_conf.has_enable_memory_optimization() &&
......
......@@ -219,6 +219,7 @@ class LocalPredictor(object):
if use_xpu:
# 2MB l3 cache
config.enable_xpu(8 * 1024 * 1024)
config.set_xpu_device_id(gpu_id)
# set cpu low precision
if not use_gpu and not use_lite:
if precision_type == paddle_infer.PrecisionType.Int8:
......
......@@ -280,6 +280,10 @@ class LocalServiceHandler(object):
server.set_gpuid(gpuid)
# TODO: support arm or arm + xpu later
server.set_device(self._device_name)
if self._use_xpu:
server.set_xpu()
if self._use_lite:
server.set_lite()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册