Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
54ae8e45
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
54ae8e45
编写于
5月 18, 2018
作者:
Q
Qiao Longfei
提交者:
GitHub
5月 18, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #10741 from jacquesqiao/inferencer-support-multi-gpu
Inferencer support parallel_executor
上级
67b8a300
d2d671e3
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
42 addition
and
28 deletion
+42
-28
python/paddle/fluid/inferencer.py
python/paddle/fluid/inferencer.py
+27
-11
python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py
...d/tests/book/high-level-api/fit_a_line/test_fit_a_line.py
+1
-1
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py
...-level-api/recognize_digits/test_recognize_digits_conv.py
+1
-1
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py
...h-level-api/recognize_digits/test_recognize_digits_mlp.py
+1
-1
python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py
.../book/high-level-api/word2vec/no_test_word2vec_new_api.py
+6
-8
python/paddle/fluid/trainer.py
python/paddle/fluid/trainer.py
+6
-6
未找到文件。
python/paddle/fluid/inferencer.py
浏览文件 @
54ae8e45
...
@@ -12,11 +12,14 @@
...
@@ -12,11 +12,14 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
contextlib
import
core
import
core
import
executor
import
executor
import
framework
import
framework
import
io
import
io
import
parallel_executor
import
unique_name
import
unique_name
from
trainer
import
check_and_get_place
from
trainer
import
check_and_get_place
...
@@ -24,40 +27,53 @@ __all__ = ['Inferencer', ]
...
@@ -24,40 +27,53 @@ __all__ = ['Inferencer', ]
class
Inferencer
(
object
):
class
Inferencer
(
object
):
def
__init__
(
self
,
infer_func
,
param_path
,
place
=
None
):
def
__init__
(
self
,
infer_func
,
param_path
,
place
=
None
,
parallel
=
False
):
"""
"""
:param infer_func: a function that will return predict Variable
:param infer_func: a function that will return predict Variable
:param param_path: the path where the inference model is saved by fluid.io.save_params
:param param_path: the path where the inference model is saved by fluid.io.save_params
:param place: place to do the inference
:param place: place to do the inference
:param parallel: use parallel_executor to run the inference, it will use multi CPU/GPU.
"""
"""
self
.
param_path
=
param_path
self
.
param_path
=
param_path
self
.
scope
=
core
.
Scope
()
self
.
scope
=
core
.
Scope
()
self
.
parallel
=
parallel
self
.
place
=
check_and_get_place
(
place
)
self
.
inference_program
=
framework
.
Program
()
self
.
inference_program
=
framework
.
Program
()
with
framework
.
program_guard
(
self
.
inference_program
):
with
framework
.
program_guard
(
self
.
inference_program
):
with
unique_name
.
guard
():
with
unique_name
.
guard
():
self
.
predict_var
=
infer_func
()
self
.
predict_var
=
infer_func
()
self
.
exe
=
executor
.
Executor
(
check_and_get_place
(
place
))
with
self
.
_prog_and_scope_guard
():
with
executor
.
scope_guard
(
self
.
scope
):
# load params from param_path into scope
# load params from param_path into scope
io
.
load_params
(
self
.
exe
,
param_path
,
self
.
inference_program
)
io
.
load_params
(
executor
.
Executor
(
self
.
place
),
param_path
)
if
parallel
:
with
self
.
_prog_and_scope_guard
():
self
.
exe
=
parallel_executor
.
ParallelExecutor
(
use_cuda
=
isinstance
(
self
.
place
,
core
.
CUDAPlace
),
loss_name
=
self
.
predict_var
.
name
)
else
:
self
.
exe
=
executor
.
Executor
(
self
.
place
)
def
infer
(
self
,
inputs
,
return_numpy
=
True
):
def
infer
(
self
,
inputs
):
"""
"""
:param inputs: a map of {"input_name": input_var} that will be feed into the inference program
:param inputs: a map of {"input_name": input_var} that will be feed into the inference program
to get the predict value
to get the predict value
:param return_numpy: if return numpy value for row tensor
:return: the predict value of the inference model
:return: the predict value of the inference model
"""
"""
if
not
isinstance
(
inputs
,
dict
):
if
not
isinstance
(
inputs
,
dict
):
raise
ValueError
(
raise
ValueError
(
"inputs should be a map of {'input_name': input_var}"
)
"inputs should be a map of {'input_name': input_var}"
)
with
executor
.
scope_guard
(
self
.
scope
):
with
self
.
_prog_and_scope_guard
():
results
=
self
.
exe
.
run
(
self
.
inference_program
,
results
=
self
.
exe
.
run
(
feed
=
inputs
,
feed
=
inputs
,
fetch_list
=
[
self
.
predict_var
.
name
])
fetch_list
=
[
self
.
predict_var
],
return_numpy
=
return_numpy
)
return
results
return
results
@
contextlib
.
contextmanager
def
_prog_and_scope_guard
(
self
):
with
framework
.
program_guard
(
main_program
=
self
.
inference_program
):
with
executor
.
scope_guard
(
self
.
scope
):
yield
python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py
浏览文件 @
54ae8e45
...
@@ -94,7 +94,7 @@ def infer(use_cuda, inference_program, save_dirname=None):
...
@@ -94,7 +94,7 @@ def infer(use_cuda, inference_program, save_dirname=None):
tensor_x
=
numpy
.
random
.
uniform
(
0
,
10
,
[
batch_size
,
13
]).
astype
(
"float32"
)
tensor_x
=
numpy
.
random
.
uniform
(
0
,
10
,
[
batch_size
,
13
]).
astype
(
"float32"
)
results
=
inferencer
.
infer
({
'x'
:
tensor_x
})
results
=
inferencer
.
infer
({
'x'
:
tensor_x
})
print
(
"infer results: "
,
results
[
0
]
)
print
(
"infer results: "
,
numpy
.
array
(
results
[
0
])
)
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py
浏览文件 @
54ae8e45
...
@@ -112,7 +112,7 @@ def infer(use_cuda, inference_program, save_dirname=None):
...
@@ -112,7 +112,7 @@ def infer(use_cuda, inference_program, save_dirname=None):
results
=
inferencer
.
infer
({
'img'
:
tensor_img
})
results
=
inferencer
.
infer
({
'img'
:
tensor_img
})
print
(
"infer results: "
,
results
[
0
]
)
print
(
"infer results: "
,
numpy
.
array
(
results
[
0
])
)
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py
浏览文件 @
54ae8e45
...
@@ -93,7 +93,7 @@ def infer(use_cuda, inference_program, save_dirname=None):
...
@@ -93,7 +93,7 @@ def infer(use_cuda, inference_program, save_dirname=None):
results
=
inferencer
.
infer
({
'img'
:
tensor_img
})
results
=
inferencer
.
infer
({
'img'
:
tensor_img
})
print
(
"infer results: "
,
results
[
0
]
)
print
(
"infer results: "
,
numpy
.
array
(
results
[
0
])
)
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py
浏览文件 @
54ae8e45
...
@@ -127,14 +127,12 @@ def infer(use_cuda, inference_program, save_path):
...
@@ -127,14 +127,12 @@ def infer(use_cuda, inference_program, save_path):
third_word
=
create_random_lodtensor
(
lod
,
place
,
low
=
0
,
high
=
dict_size
-
1
)
third_word
=
create_random_lodtensor
(
lod
,
place
,
low
=
0
,
high
=
dict_size
-
1
)
fourth_word
=
create_random_lodtensor
(
lod
,
place
,
low
=
0
,
high
=
dict_size
-
1
)
fourth_word
=
create_random_lodtensor
(
lod
,
place
,
low
=
0
,
high
=
dict_size
-
1
)
result
=
inferencer
.
infer
(
result
=
inferencer
.
infer
({
{
'firstw'
:
first_word
,
'firstw'
:
first_word
,
'secondw'
:
second_word
,
'secondw'
:
second_word
,
'thirdw'
:
third_word
,
'thirdw'
:
third_word
,
'forthw'
:
fourth_word
'forthw'
:
fourth_word
})
},
return_numpy
=
False
)
print
(
np
.
array
(
result
[
0
]))
print
(
np
.
array
(
result
[
0
]))
...
...
python/paddle/fluid/trainer.py
浏览文件 @
54ae8e45
...
@@ -12,18 +12,18 @@
...
@@ -12,18 +12,18 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
contextlib
import
os
import
os
import
core
import
core
import
framework
import
executor
import
data_feeder
import
data_feeder
import
contextlib
import
executor
import
framework
import
io
import
io
import
unique_name
import
parallel_executor
# optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module
# optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module
import
optimizer
as
opt_module
import
optimizer
as
opt_module
import
parallel_executor
from
transpiler
import
distribute_transpiler
from
transpiler
import
distribute_transpiler
__all__
=
[
__all__
=
[
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录