Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
f46311b0
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
f46311b0
编写于
10月 22, 2021
作者:
W
Wilber
提交者:
GitHub
10月 22, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support lite xpu choose device id (#36610)
上级
ff06df6d
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
9 addition
and
0 deletion
+9
-0
paddle/fluid/inference/analysis/argument.h
paddle/fluid/inference/analysis/argument.h
+1
-0
paddle/fluid/inference/analysis/ir_pass_manager.cc
paddle/fluid/inference/analysis/ir_pass_manager.cc
+1
-0
paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc
.../fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc
+2
-0
paddle/fluid/inference/api/analysis_predictor.cc
paddle/fluid/inference/api/analysis_predictor.cc
+1
-0
paddle/fluid/inference/lite/engine.cc
paddle/fluid/inference/lite/engine.cc
+1
-0
paddle/fluid/inference/lite/engine.h
paddle/fluid/inference/lite/engine.h
+3
-0
未找到文件。
paddle/fluid/inference/analysis/argument.h
浏览文件 @
f46311b0
...
...
@@ -238,6 +238,7 @@ struct Argument {
DECL_ARGUMENT_FIELD
(
xpu_autotune_file
,
XpuAutotuneFile
,
std
::
string
);
DECL_ARGUMENT_FIELD
(
xpu_precision
,
XpuPrecision
,
std
::
string
);
DECL_ARGUMENT_FIELD
(
xpu_adaptive_seqlen
,
XpuAdaptiveSeqlen
,
bool
);
DECL_ARGUMENT_FIELD
(
xpu_device_id
,
XpuDeviceId
,
int
);
DECL_ARGUMENT_FIELD
(
use_nnadapter
,
UseNNAdapter
,
bool
);
DECL_ARGUMENT_FIELD
(
nnadapter_model_cache_dir
,
NNAdapterModelCacheDir
,
...
...
paddle/fluid/inference/analysis/ir_pass_manager.cc
浏览文件 @
f46311b0
...
...
@@ -202,6 +202,7 @@ void IRPassManager::CreatePasses(Argument *argument,
new
std
::
string
(
argument
->
xpu_autotune_file
()));
pass
->
Set
(
"precision"
,
new
std
::
string
(
argument
->
xpu_precision
()));
pass
->
Set
(
"adaptive_seqlen"
,
new
bool
(
argument
->
xpu_adaptive_seqlen
()));
pass
->
Set
(
"xpu_device_id"
,
new
int
(
argument
->
xpu_device_id
()));
// NNAdapter Related
pass
->
Set
(
"use_nnadapter"
,
new
bool
(
argument
->
use_nnadapter
()));
pass
->
Set
(
"nnadapter_model_cache_dir"
,
...
...
paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc
浏览文件 @
f46311b0
...
...
@@ -243,6 +243,7 @@ void LiteSubgraphPass::SetUpEngine(
bool
use_gpu
=
Get
<
bool
>
(
"use_gpu"
);
bool
enable_int8
=
Get
<
bool
>
(
"enable_int8"
);
bool
use_xpu
=
Get
<
bool
>
(
"use_xpu"
);
int
xpu_device_id
=
Get
<
int
>
(
"xpu_device_id"
);
int
xpu_l3_workspace_size
=
Get
<
int
>
(
"xpu_l3_workspace_size"
);
int
cpu_math_library_num_threads
=
Get
<
int
>
(
"cpu_math_library_num_threads"
);
bool
locked
=
Get
<
bool
>
(
"locked"
);
...
...
@@ -305,6 +306,7 @@ void LiteSubgraphPass::SetUpEngine(
};
config
.
cpu_math_library_num_threads
=
cpu_math_library_num_threads
;
config
.
xpu_l3_workspace_size
=
xpu_l3_workspace_size
;
config
.
device_id
=
xpu_device_id
;
config
.
locked
=
locked
;
config
.
autotune
=
autotune
;
config
.
autotune_file
=
autotune_file
;
...
...
paddle/fluid/inference/api/analysis_predictor.cc
浏览文件 @
f46311b0
...
...
@@ -619,6 +619,7 @@ void AnalysisPredictor::PrepareArgument() {
argument_
.
SetXpuAutotuneFile
(
config_
.
xpu_autotune_file_
);
argument_
.
SetXpuPrecision
(
config_
.
xpu_precision_
);
argument_
.
SetXpuAdaptiveSeqlen
(
config_
.
xpu_adaptive_seqlen_
);
argument_
.
SetXpuDeviceId
(
config_
.
xpu_device_id_
);
// NNAdapter related
argument_
.
SetUseNNAdapter
(
config_
.
NNAdapter
().
use_nnadapter
);
argument_
.
SetNNAdapterDeviceNames
(
...
...
paddle/fluid/inference/lite/engine.cc
浏览文件 @
f46311b0
...
...
@@ -67,6 +67,7 @@ paddle::lite_api::PaddlePredictor* EngineManager::Create(
lite_cxx_config
.
set_xpu_conv_autotune
(
cfg
.
autotune
,
cfg
.
autotune_file
);
lite_cxx_config
.
set_xpu_multi_encoder_method
(
cfg
.
precision
,
cfg
.
adaptive_seqlen
);
lite_cxx_config
.
set_xpu_dev_per_thread
(
cfg
.
device_id
);
#endif
#ifdef LITE_SUBGRAPH_WITH_NPU
...
...
paddle/fluid/inference/lite/engine.h
浏览文件 @
f46311b0
...
...
@@ -39,6 +39,9 @@ struct EngineConfig {
std
::
vector
<
std
::
string
>
neglected_passes
;
lite_api
::
LiteModelType
model_type
{
lite_api
::
LiteModelType
::
kProtobuf
};
bool
model_from_memory
{
true
};
// TODO(wilber): now only works for xpu, lite gpu can support device_id or
// not?
int
device_id
=
0
;
// for xpu
size_t
xpu_l3_workspace_size
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录