Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
606dfb13
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
606dfb13
编写于
9月 25, 2018
作者:
Y
Yu Yang
提交者:
GitHub
9月 25, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #13442 from reyoung/feature/remove_trainer_api
Move trainer to contrib
上级
cce056ca
e104e706
变更
18
展开全部
隐藏空白更改
内联
并排
Showing
18 changed file
with
1544 addition
and
1415 deletion
+1544
-1415
paddle/fluid/API.spec
paddle/fluid/API.spec
+0
-13
python/paddle/fluid/__init__.py
python/paddle/fluid/__init__.py
+0
-9
python/paddle/fluid/contrib/inferencer.py
python/paddle/fluid/contrib/inferencer.py
+112
-0
python/paddle/fluid/contrib/trainer.py
python/paddle/fluid/contrib/trainer.py
+1258
-0
python/paddle/fluid/inferencer.py
python/paddle/fluid/inferencer.py
+2
-98
python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py
...d/tests/book/high-level-api/fit_a_line/test_fit_a_line.py
+13
-3
python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py
.../image_classification/test_image_classification_resnet.py
+15
-4
python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py
...api/image_classification/test_image_classification_vgg.py
+15
-4
python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py
.../label_semantic_roles/test_label_semantic_roles_newapi.py
+14
-4
python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py
...level-api/machine_translation/test_machine_translation.py
+14
-3
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py
...-level-api/recognize_digits/test_recognize_digits_conv.py
+16
-8
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py
...h-level-api/recognize_digits/test_recognize_digits_mlp.py
+15
-7
python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py
...-api/recommender_system/test_recommender_system_newapi.py
+13
-3
python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py
...pi/understand_sentiment/test_understand_sentiment_conv.py
+14
-4
python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py
...rstand_sentiment/test_understand_sentiment_dynamic_rnn.py
+14
-4
python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py
...stand_sentiment/test_understand_sentiment_stacked_lstm.py
+14
-4
python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py
...sts/book/high-level-api/word2vec/test_word2vec_new_api.py
+13
-3
python/paddle/fluid/trainer.py
python/paddle/fluid/trainer.py
+2
-1244
未找到文件。
paddle/fluid/API.spec
浏览文件 @
606dfb13
...
...
@@ -35,19 +35,6 @@ paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None,
paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False))
paddle.fluid.global_scope ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.scope_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.Trainer.__init__ ArgSpec(args=['self', 'train_func', 'optimizer_func', 'param_path', 'place', 'parallel', 'checkpoint_config'], varargs=None, keywords=None, defaults=(None, None, False, None))
paddle.fluid.Trainer.save_inference_model ArgSpec(args=['self', 'param_path', 'feeded_var_names', 'target_var_indexes'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Trainer.save_params ArgSpec(args=['self', 'param_path'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Trainer.stop ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Trainer.test ArgSpec(args=['self', 'reader', 'feed_order'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Trainer.train ArgSpec(args=['self', 'num_epochs', 'event_handler', 'reader', 'feed_order'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.BeginEpochEvent.__init__ ArgSpec(args=['self', 'epoch_id'], varargs=None, keywords=None, defaults=None)
paddle.fluid.EndEpochEvent.__init__ ArgSpec(args=['self', 'epoch_id'], varargs=None, keywords=None, defaults=None)
paddle.fluid.BeginStepEvent.__init__ ArgSpec(args=['self', 'epoch_id', 'step_id'], varargs=None, keywords=None, defaults=None)
paddle.fluid.EndStepEvent.__init__ ArgSpec(args=['self', 'epoch_id', 'step_id', 'metrics'], varargs=None, keywords=None, defaults=None)
paddle.fluid.CheckpointConfig.__init__ ArgSpec(args=['self', 'checkpoint_dir', 'max_num_checkpoints', 'epoch_interval', 'step_interval'], varargs=None, keywords=None, defaults=(None, 3, 1, 10))
paddle.fluid.Inferencer.__init__ ArgSpec(args=['self', 'infer_func', 'param_path', 'place', 'parallel'], varargs=None, keywords=None, defaults=(None, False))
paddle.fluid.Inferencer.infer ArgSpec(args=['self', 'inputs', 'return_numpy'], varargs=None, keywords=None, defaults=(True,))
paddle.fluid.DistributeTranspiler.__init__ ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.DistributeTranspiler.get_pserver_program ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
paddle.fluid.DistributeTranspiler.get_pserver_programs ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
...
...
python/paddle/fluid/__init__.py
浏览文件 @
606dfb13
...
...
@@ -19,17 +19,8 @@ from .framework import *
# import all class inside executor into fluid module
from
.
import
executor
from
.executor
import
*
from
.
import
trainer
from
.trainer
import
Trainer
from
.trainer
import
BeginEpochEvent
from
.trainer
import
EndEpochEvent
from
.trainer
import
BeginStepEvent
from
.trainer
import
EndStepEvent
from
.trainer
import
CheckpointConfig
from
.
import
inferencer
from
.inferencer
import
Inferencer
from
.
import
io
from
.
import
evaluator
...
...
python/paddle/fluid/contrib/inferencer.py
0 → 100644
浏览文件 @
606dfb13
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
contextlib
from
..
import
core
from
..
import
executor
from
..
import
framework
from
..
import
io
from
..
import
parallel_executor
from
..
import
unique_name
from
.trainer
import
check_and_get_place
__all__
=
[
'Inferencer'
,
]
class
Inferencer
(
object
):
"""
Inferencer High Level API.
Args:
infer_func (Python func): Infer function that will return predict Variable
param_path (str): The path where the inference model is saved by fluid.io.save_params
place (Place): place to do the inference
parallel (bool): use parallel_executor to run the inference, it will use multi CPU/GPU.
Examples:
.. code-block:: python
def inference_program():
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
return y_predict
place = fluid.CPUPlace()
inferencer = fluid.Inferencer(
infer_func=inference_program, param_path="/tmp/model", place=place)
"""
def
__init__
(
self
,
infer_func
,
param_path
,
place
=
None
,
parallel
=
False
):
self
.
param_path
=
param_path
self
.
scope
=
core
.
Scope
()
self
.
parallel
=
parallel
self
.
place
=
check_and_get_place
(
place
)
self
.
inference_program
=
framework
.
Program
()
with
framework
.
program_guard
(
self
.
inference_program
):
with
unique_name
.
guard
():
self
.
predict_var
=
infer_func
()
with
self
.
_prog_and_scope_guard
():
# load params from param_path into scope
io
.
load_params
(
executor
.
Executor
(
self
.
place
),
param_path
)
if
parallel
:
with
self
.
_prog_and_scope_guard
():
self
.
exe
=
parallel_executor
.
ParallelExecutor
(
use_cuda
=
isinstance
(
self
.
place
,
core
.
CUDAPlace
),
loss_name
=
self
.
predict_var
.
name
)
else
:
self
.
exe
=
executor
.
Executor
(
self
.
place
)
self
.
inference_program
=
self
.
inference_program
.
clone
(
for_test
=
True
)
def
infer
(
self
,
inputs
,
return_numpy
=
True
):
"""
Do Inference for Inputs
Args:
inputs (map): a map of {"input_name": input_var} that will be feed into the inference program
return_numpy (bool): transform return value into numpy or not
Returns:
Tensor or Numpy: the predict value of the inference model for the inputs
Examples:
.. code-block:: python
tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32")
results = inferencer.infer({'x': tensor_x})
"""
if
not
isinstance
(
inputs
,
dict
):
raise
ValueError
(
"inputs should be a map of {'input_name': input_var}"
)
with
self
.
_prog_and_scope_guard
():
results
=
self
.
exe
.
run
(
feed
=
inputs
,
fetch_list
=
[
self
.
predict_var
.
name
],
return_numpy
=
return_numpy
)
return
results
@
contextlib
.
contextmanager
def
_prog_and_scope_guard
(
self
):
with
framework
.
program_guard
(
main_program
=
self
.
inference_program
):
with
executor
.
scope_guard
(
self
.
scope
):
yield
python/paddle/fluid/contrib/trainer.py
0 → 100644
浏览文件 @
606dfb13
此差异已折叠。
点击以展开。
python/paddle/fluid/inferencer.py
浏览文件 @
606dfb13
...
...
@@ -12,101 +12,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
contextlib
from
.
import
core
from
.
import
executor
from
.
import
framework
from
.
import
io
from
.
import
parallel_executor
from
.
import
unique_name
from
.trainer
import
check_and_get_place
__all__
=
[
'Inferencer'
,
]
class
Inferencer
(
object
):
"""
Inferencer High Level API.
Args:
infer_func (Python func): Infer function that will return predict Variable
param_path (str): The path where the inference model is saved by fluid.io.save_params
place (Place): place to do the inference
parallel (bool): use parallel_executor to run the inference, it will use multi CPU/GPU.
Examples:
.. code-block:: python
def inference_program():
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
return y_predict
place = fluid.CPUPlace()
inferencer = fluid.Inferencer(
infer_func=inference_program, param_path="/tmp/model", place=place)
"""
def
__init__
(
self
,
infer_func
,
param_path
,
place
=
None
,
parallel
=
False
):
self
.
param_path
=
param_path
self
.
scope
=
core
.
Scope
()
self
.
parallel
=
parallel
self
.
place
=
check_and_get_place
(
place
)
self
.
inference_program
=
framework
.
Program
()
with
framework
.
program_guard
(
self
.
inference_program
):
with
unique_name
.
guard
():
self
.
predict_var
=
infer_func
()
with
self
.
_prog_and_scope_guard
():
# load params from param_path into scope
io
.
load_params
(
executor
.
Executor
(
self
.
place
),
param_path
)
if
parallel
:
with
self
.
_prog_and_scope_guard
():
self
.
exe
=
parallel_executor
.
ParallelExecutor
(
use_cuda
=
isinstance
(
self
.
place
,
core
.
CUDAPlace
),
loss_name
=
self
.
predict_var
.
name
)
else
:
self
.
exe
=
executor
.
Executor
(
self
.
place
)
self
.
inference_program
=
self
.
inference_program
.
clone
(
for_test
=
True
)
def
infer
(
self
,
inputs
,
return_numpy
=
True
):
"""
Do Inference for Inputs
Args:
inputs (map): a map of {"input_name": input_var} that will be feed into the inference program
return_numpy (bool): transform return value into numpy or not
Returns:
Tensor or Numpy: the predict value of the inference model for the inputs
Examples:
.. code-block:: python
tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32")
results = inferencer.infer({'x': tensor_x})
"""
if
not
isinstance
(
inputs
,
dict
):
raise
ValueError
(
"inputs should be a map of {'input_name': input_var}"
)
with
self
.
_prog_and_scope_guard
():
results
=
self
.
exe
.
run
(
feed
=
inputs
,
fetch_list
=
[
self
.
predict_var
.
name
],
return_numpy
=
return_numpy
)
return
results
@
contextlib
.
contextmanager
def
_prog_and_scope_guard
(
self
):
with
framework
.
program_guard
(
main_program
=
self
.
inference_program
):
with
executor
.
scope_guard
(
self
.
scope
):
yield
# NOTE: inferencer is moved into fluid.contrib.inferencer.
__all__
=
[]
python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py
浏览文件 @
606dfb13
...
...
@@ -16,6 +16,16 @@ from __future__ import print_function
import
paddle
import
paddle.fluid
as
fluid
import
sys
try
:
from
paddle.fluid.contrib.trainer
import
*
from
paddle.fluid.contrib.inferencer
import
*
except
ImportError
:
print
(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib"
,
file
=
sys
.
stderr
)
from
paddle.fluid.trainer
import
*
from
paddle.fluid.inferencer
import
*
import
contextlib
import
numpy
import
unittest
...
...
@@ -57,11 +67,11 @@ def optimizer_func():
def
train
(
use_cuda
,
train_program
,
params_dirname
,
inference_model_dirname
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
trainer
=
fluid
.
Trainer
(
trainer
=
Trainer
(
train_func
=
train_program
,
place
=
place
,
optimizer_func
=
optimizer_func
)
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndStepEvent
):
if
isinstance
(
event
,
EndStepEvent
):
if
event
.
step
==
10
:
test_metrics
=
trainer
.
test
(
reader
=
test_reader
,
feed_order
=
[
'x'
,
'y'
])
...
...
@@ -91,7 +101,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
return
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
inferencer
=
fluid
.
Inferencer
(
inferencer
=
Inferencer
(
infer_func
=
inference_program
,
param_path
=
params_dirname
,
place
=
place
)
batch_size
=
10
...
...
python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py
浏览文件 @
606dfb13
...
...
@@ -14,11 +14,22 @@
from
__future__
import
print_function
import
sys
import
paddle
import
paddle.fluid
as
fluid
try
:
from
paddle.fluid.contrib.trainer
import
*
from
paddle.fluid.contrib.inferencer
import
*
except
ImportError
:
print
(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib"
,
file
=
sys
.
stderr
)
from
paddle.fluid.trainer
import
*
from
paddle.fluid.inferencer
import
*
import
paddle.fluid.core
as
core
import
numpy
import
six
import
os
import
cifar10_small_test_set
...
...
@@ -106,7 +117,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
paddle
.
dataset
.
cifar
.
test10
(),
batch_size
=
BATCH_SIZE
,
drop_last
=
False
)
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndStepEvent
):
if
isinstance
(
event
,
EndStepEvent
):
avg_cost
,
accuracy
=
trainer
.
test
(
reader
=
test_reader
,
feed_order
=
[
'pixel'
,
'label'
])
...
...
@@ -118,7 +129,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
return
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
trainer
=
fluid
.
Trainer
(
trainer
=
Trainer
(
train_func
=
train_program
,
optimizer_func
=
optimizer_func
,
place
=
place
,
...
...
@@ -133,7 +144,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
def
infer
(
use_cuda
,
inference_program
,
parallel
,
params_dirname
=
None
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
inferencer
=
fluid
.
Inferencer
(
inferencer
=
Inferencer
(
infer_func
=
inference_program
,
param_path
=
params_dirname
,
place
=
place
,
...
...
python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py
浏览文件 @
606dfb13
...
...
@@ -14,11 +14,22 @@
from
__future__
import
print_function
import
sys
import
paddle
import
paddle.fluid
as
fluid
try
:
from
paddle.fluid.contrib.trainer
import
*
from
paddle.fluid.contrib.inferencer
import
*
except
ImportError
:
print
(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib"
,
file
=
sys
.
stderr
)
from
paddle.fluid.trainer
import
*
from
paddle.fluid.inferencer
import
*
import
paddle.fluid.core
as
core
import
numpy
import
six
import
os
import
cifar10_small_test_set
...
...
@@ -83,7 +94,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
paddle
.
dataset
.
cifar
.
test10
(),
batch_size
=
BATCH_SIZE
,
drop_last
=
False
)
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndStepEvent
):
if
isinstance
(
event
,
EndStepEvent
):
avg_cost
,
accuracy
=
trainer
.
test
(
reader
=
test_reader
,
feed_order
=
[
'pixel'
,
'label'
])
...
...
@@ -95,7 +106,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
return
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
trainer
=
fluid
.
Trainer
(
trainer
=
Trainer
(
train_func
=
train_program
,
place
=
place
,
optimizer_func
=
optimizer_func
,
...
...
@@ -110,7 +121,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
def
infer
(
use_cuda
,
inference_program
,
parallel
,
params_dirname
=
None
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
inferencer
=
fluid
.
Inferencer
(
inferencer
=
Inferencer
(
infer_func
=
inference_program
,
param_path
=
params_dirname
,
place
=
place
,
...
...
python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py
浏览文件 @
606dfb13
...
...
@@ -16,6 +16,16 @@ from __future__ import print_function
import
paddle
import
paddle.fluid
as
fluid
import
sys
try
:
from
paddle.fluid.contrib.trainer
import
*
from
paddle.fluid.contrib.inferencer
import
*
except
ImportError
:
print
(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib"
,
file
=
sys
.
stderr
)
from
paddle.fluid.trainer
import
*
from
paddle.fluid.inferencer
import
*
import
numpy
as
np
WORD_DICT
,
VERB_DICT
,
LABEL_DICT
=
paddle
.
dataset
.
conll05
.
get_dict
()
...
...
@@ -149,7 +159,7 @@ def optimize_func():
def
train
(
use_cuda
,
train_program
,
params_dirname
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
trainer
=
fluid
.
Trainer
(
trainer
=
Trainer
(
train_func
=
train_program
,
place
=
place
,
optimizer_func
=
optimize_func
)
feed_order
=
[
...
...
@@ -164,7 +174,7 @@ def train(use_cuda, train_program, params_dirname):
# place)
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndEpochEvent
):
if
isinstance
(
event
,
EndEpochEvent
):
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
conll05
.
test
(),
batch_size
=
BATCH_SIZE
)
avg_cost_set
=
trainer
.
test
(
...
...
@@ -184,7 +194,7 @@ def train(use_cuda, train_program, params_dirname):
if
math
.
isnan
(
float
(
avg_cost
)):
sys
.
exit
(
"got NaN loss, training failed."
)
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
elif
isinstance
(
event
,
EndStepEvent
):
print
(
"Step {0}, Epoch {1} Metrics {2}"
.
format
(
event
.
step
,
event
.
epoch
,
list
(
map
(
np
.
array
,
event
.
metrics
))))
if
event
.
step
==
1
:
# Run 2 iterations to speed CI
...
...
@@ -204,7 +214,7 @@ def train(use_cuda, train_program, params_dirname):
def
infer
(
use_cuda
,
inference_program
,
params_dirname
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
inferencer
=
fluid
.
Inferencer
(
inferencer
=
Inferencer
(
inference_program
,
param_path
=
params_dirname
,
place
=
place
)
# Setup input by creating LoDTensor to represent sequence of words.
...
...
python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py
浏览文件 @
606dfb13
...
...
@@ -13,17 +13,28 @@
# limitations under the License.
from
__future__
import
print_function
import
contextlib
import
sys
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
try
:
from
paddle.fluid.contrib.trainer
import
*
from
paddle.fluid.contrib.inferencer
import
*
except
ImportError
:
print
(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib"
,
file
=
sys
.
stderr
)
from
paddle.fluid.trainer
import
*
from
paddle.fluid.inferencer
import
*
import
paddle.fluid.framework
as
framework
import
paddle.fluid.layers
as
pd
from
paddle.fluid.executor
import
Executor
from
functools
import
partial
import
unittest
import
os
dict_size
=
30000
source_dict_dim
=
target_dict_dim
=
dict_size
...
...
@@ -198,12 +209,12 @@ def train(use_cuda, is_sparse, is_local=True):
]
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndStepEvent
):
if
isinstance
(
event
,
EndStepEvent
):
print
(
'pass_id='
+
str
(
event
.
epoch
)
+
' batch='
+
str
(
event
.
step
))
if
event
.
step
==
10
:
trainer
.
stop
()
trainer
=
fluid
.
Trainer
(
trainer
=
Trainer
(
train_func
=
partial
(
train_program
,
is_sparse
),
place
=
place
,
optimizer_func
=
optimizer_func
)
...
...
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py
浏览文件 @
606dfb13
...
...
@@ -14,14 +14,22 @@
from
__future__
import
print_function
import
argparse
import
sys
import
paddle.fluid
as
fluid
try
:
from
paddle.fluid.contrib.trainer
import
*
from
paddle.fluid.contrib.inferencer
import
*
except
ImportError
:
print
(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib"
,
file
=
sys
.
stderr
)
from
paddle.fluid.trainer
import
*
from
paddle.fluid.inferencer
import
*
import
paddle.fluid.core
as
core
import
paddle
import
six
import
sys
import
numpy
import
unittest
import
math
import
sys
import
os
...
...
@@ -68,14 +76,14 @@ def optimizer_func():
def
train
(
use_cuda
,
train_program
,
parallel
,
params_dirname
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
trainer
=
fluid
.
Trainer
(
trainer
=
Trainer
(
train_func
=
train_program
,
place
=
place
,
optimizer_func
=
optimizer_func
,
parallel
=
parallel
)
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndEpochEvent
):
if
isinstance
(
event
,
EndEpochEvent
):
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
test
(),
batch_size
=
BATCH_SIZE
)
avg_cost
,
acc
=
trainer
.
test
(
...
...
@@ -91,7 +99,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
event
.
epoch
+
1
,
avg_cost
,
acc
))
if
math
.
isnan
(
avg_cost
):
sys
.
exit
(
"got NaN loss, training failed."
)
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
elif
isinstance
(
event
,
EndStepEvent
):
print
(
(
"Step {0}, Epoch {1} Metrics {2}"
.
format
(
event
.
step
,
event
.
epoch
,
...
...
@@ -112,7 +120,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
def
infer
(
use_cuda
,
inference_program
,
parallel
,
params_dirname
=
None
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
inferencer
=
fluid
.
Inferencer
(
inferencer
=
Inferencer
(
infer_func
=
inference_program
,
param_path
=
params_dirname
,
place
=
place
,
...
...
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py
浏览文件 @
606dfb13
...
...
@@ -14,14 +14,22 @@
from
__future__
import
print_function
import
argparse
import
sys
import
paddle.fluid
as
fluid
try
:
from
paddle.fluid.contrib.trainer
import
*
from
paddle.fluid.contrib.inferencer
import
*
except
ImportError
:
print
(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib"
,
file
=
sys
.
stderr
)
from
paddle.fluid.trainer
import
*
from
paddle.fluid.inferencer
import
*
import
paddle.fluid.core
as
core
import
paddle
import
six
import
sys
import
numpy
import
unittest
import
math
import
sys
import
os
...
...
@@ -55,14 +63,14 @@ def optimizer_func():
def
train
(
use_cuda
,
train_program
,
params_dirname
,
parallel
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
trainer
=
fluid
.
Trainer
(
trainer
=
Trainer
(
train_func
=
train_program
,
place
=
place
,
optimizer_func
=
optimizer_func
,
parallel
=
parallel
)
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndEpochEvent
):
if
isinstance
(
event
,
EndEpochEvent
):
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
test
(),
batch_size
=
BATCH_SIZE
)
avg_cost
,
acc
=
trainer
.
test
(
...
...
@@ -94,7 +102,7 @@ def train(use_cuda, train_program, params_dirname, parallel):
def
infer
(
use_cuda
,
inference_program
,
parallel
,
params_dirname
=
None
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
inferencer
=
fluid
.
Inferencer
(
inferencer
=
Inferencer
(
infer_func
=
inference_program
,
param_path
=
params_dirname
,
place
=
place
,
...
...
python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py
浏览文件 @
606dfb13
...
...
@@ -19,6 +19,16 @@ import sys
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
sys
try
:
from
paddle.fluid.contrib.trainer
import
*
from
paddle.fluid.contrib.inferencer
import
*
except
ImportError
:
print
(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib"
,
file
=
sys
.
stderr
)
from
paddle.fluid.trainer
import
*
from
paddle.fluid.inferencer
import
*
import
paddle.fluid.layers
as
layers
import
paddle.fluid.nets
as
nets
...
...
@@ -164,7 +174,7 @@ def optimizer_func():
def
train
(
use_cuda
,
train_program
,
params_dirname
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
trainer
=
fluid
.
Trainer
(
trainer
=
Trainer
(
train_func
=
train_program
,
place
=
place
,
optimizer_func
=
optimizer_func
)
feed_order
=
[
...
...
@@ -173,7 +183,7 @@ def train(use_cuda, train_program, params_dirname):
]
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndStepEvent
):
if
isinstance
(
event
,
EndStepEvent
):
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
movielens
.
test
(),
batch_size
=
BATCH_SIZE
)
avg_cost_set
=
trainer
.
test
(
...
...
@@ -208,7 +218,7 @@ def train(use_cuda, train_program, params_dirname):
def
infer
(
use_cuda
,
inference_program
,
params_dirname
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
inferencer
=
fluid
.
Inferencer
(
inferencer
=
Inferencer
(
inference_program
,
param_path
=
params_dirname
,
place
=
place
)
# Use the first data from paddle.dataset.movielens.test() as input.
...
...
python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py
浏览文件 @
606dfb13
...
...
@@ -16,6 +16,16 @@ from __future__ import print_function
import
paddle
import
paddle.fluid
as
fluid
import
sys
try
:
from
paddle.fluid.contrib.trainer
import
*
from
paddle.fluid.contrib.inferencer
import
*
except
ImportError
:
print
(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib"
,
file
=
sys
.
stderr
)
from
paddle.fluid.trainer
import
*
from
paddle.fluid.inferencer
import
*
from
functools
import
partial
import
numpy
as
np
...
...
@@ -72,13 +82,13 @@ def train(use_cuda, train_program, params_dirname):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
word_dict
=
paddle
.
dataset
.
imdb
.
word_dict
()
trainer
=
fluid
.
Trainer
(
trainer
=
Trainer
(
train_func
=
partial
(
train_program
,
word_dict
),
place
=
place
,
optimizer_func
=
optimizer_func
)
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndEpochEvent
):
if
isinstance
(
event
,
EndEpochEvent
):
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
imdb
.
test
(
word_dict
),
batch_size
=
BATCH_SIZE
)
avg_cost
,
acc
=
trainer
.
test
(
...
...
@@ -96,7 +106,7 @@ def train(use_cuda, train_program, params_dirname):
event
.
epoch
+
1
,
avg_cost
,
acc
))
if
math
.
isnan
(
avg_cost
):
sys
.
exit
(
"got NaN loss, training failed."
)
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
elif
isinstance
(
event
,
EndStepEvent
):
print
(
"Step {0}, Epoch {1} Metrics {2}"
.
format
(
event
.
step
,
event
.
epoch
,
list
(
map
(
np
.
array
,
event
.
metrics
))))
if
event
.
step
==
1
:
# Run 2 iterations to speed CI
...
...
@@ -119,7 +129,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
word_dict
=
paddle
.
dataset
.
imdb
.
word_dict
()
inferencer
=
fluid
.
Inferencer
(
inferencer
=
Inferencer
(
infer_func
=
partial
(
inference_program
,
word_dict
),
param_path
=
params_dirname
,
place
=
place
)
...
...
python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py
浏览文件 @
606dfb13
...
...
@@ -16,6 +16,16 @@ from __future__ import print_function
import
paddle
import
paddle.fluid
as
fluid
import
sys
try
:
from
paddle.fluid.contrib.trainer
import
*
from
paddle.fluid.contrib.inferencer
import
*
except
ImportError
:
print
(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib"
,
file
=
sys
.
stderr
)
from
paddle.fluid.trainer
import
*
from
paddle.fluid.inferencer
import
*
from
functools
import
partial
import
numpy
as
np
...
...
@@ -87,13 +97,13 @@ def train(use_cuda, train_program, params_dirname):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
word_dict
=
paddle
.
dataset
.
imdb
.
word_dict
()
trainer
=
fluid
.
Trainer
(
trainer
=
Trainer
(
train_func
=
partial
(
train_program
,
word_dict
),
place
=
place
,
optimizer_func
=
optimizer_func
)
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndEpochEvent
):
if
isinstance
(
event
,
EndEpochEvent
):
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
imdb
.
test
(
word_dict
),
batch_size
=
BATCH_SIZE
)
avg_cost
,
acc
=
trainer
.
test
(
...
...
@@ -111,7 +121,7 @@ def train(use_cuda, train_program, params_dirname):
event
.
epoch
+
1
,
avg_cost
,
acc
))
if
math
.
isnan
(
avg_cost
):
sys
.
exit
(
"got NaN loss, training failed."
)
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
elif
isinstance
(
event
,
EndStepEvent
):
print
(
"Step {0}, Epoch {1} Metrics {2}"
.
format
(
event
.
step
,
event
.
epoch
,
list
(
map
(
np
.
array
,
event
.
metrics
))))
if
event
.
step
==
1
:
# Run 2 iterations to speed CI
...
...
@@ -134,7 +144,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
word_dict
=
paddle
.
dataset
.
imdb
.
word_dict
()
inferencer
=
fluid
.
Inferencer
(
inferencer
=
Inferencer
(
infer_func
=
partial
(
inference_program
,
word_dict
),
param_path
=
params_dirname
,
place
=
place
)
...
...
python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py
浏览文件 @
606dfb13
...
...
@@ -16,6 +16,16 @@ from __future__ import print_function
import
paddle
import
paddle.fluid
as
fluid
import
sys
try
:
from
paddle.fluid.contrib.trainer
import
*
from
paddle.fluid.contrib.inferencer
import
*
except
ImportError
:
print
(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib"
,
file
=
sys
.
stderr
)
from
paddle.fluid.trainer
import
*
from
paddle.fluid.inferencer
import
*
from
functools
import
partial
import
numpy
as
np
...
...
@@ -79,13 +89,13 @@ def train(use_cuda, train_program, params_dirname):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
word_dict
=
paddle
.
dataset
.
imdb
.
word_dict
()
trainer
=
fluid
.
Trainer
(
trainer
=
Trainer
(
train_func
=
partial
(
train_program
,
word_dict
),
place
=
place
,
optimizer_func
=
optimizer_func
)
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndEpochEvent
):
if
isinstance
(
event
,
EndEpochEvent
):
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
imdb
.
test
(
word_dict
),
batch_size
=
BATCH_SIZE
,
...
...
@@ -105,7 +115,7 @@ def train(use_cuda, train_program, params_dirname):
event
.
epoch
+
1
,
avg_cost
,
acc
))
if
math
.
isnan
(
avg_cost
):
sys
.
exit
(
"got NaN loss, training failed."
)
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
elif
isinstance
(
event
,
EndStepEvent
):
print
(
"Step {0}, Epoch {1} Metrics {2}"
.
format
(
event
.
step
,
event
.
epoch
,
list
(
map
(
np
.
array
,
event
.
metrics
))))
if
event
.
step
==
1
:
# Run 2 iterations to speed CI
...
...
@@ -129,7 +139,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
word_dict
=
paddle
.
dataset
.
imdb
.
word_dict
()
inferencer
=
fluid
.
Inferencer
(
inferencer
=
Inferencer
(
infer_func
=
partial
(
inference_program
,
word_dict
),
param_path
=
params_dirname
,
place
=
place
)
...
...
python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py
浏览文件 @
606dfb13
...
...
@@ -16,6 +16,16 @@ from __future__ import print_function
import
paddle
import
paddle.fluid
as
fluid
import
sys
try
:
from
paddle.fluid.contrib.trainer
import
*
from
paddle.fluid.contrib.inferencer
import
*
except
ImportError
:
print
(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib"
,
file
=
sys
.
stderr
)
from
paddle.fluid.trainer
import
*
from
paddle.fluid.inferencer
import
*
import
numpy
as
np
import
math
import
sys
...
...
@@ -95,7 +105,7 @@ def train(use_cuda, train_program, params_dirname):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndStepEvent
):
if
isinstance
(
event
,
EndStepEvent
):
outs
=
trainer
.
test
(
reader
=
test_reader
,
feed_order
=
[
'firstw'
,
'secondw'
,
'thirdw'
,
'forthw'
,
'nextw'
])
...
...
@@ -109,7 +119,7 @@ def train(use_cuda, train_program, params_dirname):
if
math
.
isnan
(
avg_cost
):
sys
.
exit
(
"got NaN loss, training failed."
)
trainer
=
fluid
.
Trainer
(
trainer
=
Trainer
(
train_func
=
train_program
,
optimizer_func
=
optimizer_func
,
place
=
place
)
trainer
.
train
(
...
...
@@ -121,7 +131,7 @@ def train(use_cuda, train_program, params_dirname):
def
infer
(
use_cuda
,
inference_program
,
params_dirname
=
None
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
inferencer
=
fluid
.
Inferencer
(
inferencer
=
Inferencer
(
infer_func
=
inference_program
,
param_path
=
params_dirname
,
place
=
place
)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
...
...
python/paddle/fluid/trainer.py
浏览文件 @
606dfb13
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录