未验证 提交 606dfb13 编写于 作者: Y Yu Yang 提交者: GitHub

Merge pull request #13442 from reyoung/feature/remove_trainer_api

Move trainer to contrib
...@@ -35,19 +35,6 @@ paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, ...@@ -35,19 +35,6 @@ paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None,
paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False)) paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False))
paddle.fluid.global_scope ArgSpec(args=[], varargs=None, keywords=None, defaults=None) paddle.fluid.global_scope ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.scope_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) paddle.fluid.scope_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.Trainer.__init__ ArgSpec(args=['self', 'train_func', 'optimizer_func', 'param_path', 'place', 'parallel', 'checkpoint_config'], varargs=None, keywords=None, defaults=(None, None, False, None))
paddle.fluid.Trainer.save_inference_model ArgSpec(args=['self', 'param_path', 'feeded_var_names', 'target_var_indexes'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Trainer.save_params ArgSpec(args=['self', 'param_path'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Trainer.stop ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Trainer.test ArgSpec(args=['self', 'reader', 'feed_order'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Trainer.train ArgSpec(args=['self', 'num_epochs', 'event_handler', 'reader', 'feed_order'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.BeginEpochEvent.__init__ ArgSpec(args=['self', 'epoch_id'], varargs=None, keywords=None, defaults=None)
paddle.fluid.EndEpochEvent.__init__ ArgSpec(args=['self', 'epoch_id'], varargs=None, keywords=None, defaults=None)
paddle.fluid.BeginStepEvent.__init__ ArgSpec(args=['self', 'epoch_id', 'step_id'], varargs=None, keywords=None, defaults=None)
paddle.fluid.EndStepEvent.__init__ ArgSpec(args=['self', 'epoch_id', 'step_id', 'metrics'], varargs=None, keywords=None, defaults=None)
paddle.fluid.CheckpointConfig.__init__ ArgSpec(args=['self', 'checkpoint_dir', 'max_num_checkpoints', 'epoch_interval', 'step_interval'], varargs=None, keywords=None, defaults=(None, 3, 1, 10))
paddle.fluid.Inferencer.__init__ ArgSpec(args=['self', 'infer_func', 'param_path', 'place', 'parallel'], varargs=None, keywords=None, defaults=(None, False))
paddle.fluid.Inferencer.infer ArgSpec(args=['self', 'inputs', 'return_numpy'], varargs=None, keywords=None, defaults=(True,))
paddle.fluid.DistributeTranspiler.__init__ ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.DistributeTranspiler.__init__ ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.DistributeTranspiler.get_pserver_program ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None) paddle.fluid.DistributeTranspiler.get_pserver_program ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
paddle.fluid.DistributeTranspiler.get_pserver_programs ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None) paddle.fluid.DistributeTranspiler.get_pserver_programs ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
......
...@@ -19,17 +19,8 @@ from .framework import * ...@@ -19,17 +19,8 @@ from .framework import *
# import all class inside executor into fluid module # import all class inside executor into fluid module
from . import executor from . import executor
from .executor import * from .executor import *
from . import trainer from . import trainer
from .trainer import Trainer
from .trainer import BeginEpochEvent
from .trainer import EndEpochEvent
from .trainer import BeginStepEvent
from .trainer import EndStepEvent
from .trainer import CheckpointConfig
from . import inferencer from . import inferencer
from .inferencer import Inferencer
from . import io from . import io
from . import evaluator from . import evaluator
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import contextlib
from .. import core
from .. import executor
from .. import framework
from .. import io
from .. import parallel_executor
from .. import unique_name
from .trainer import check_and_get_place
__all__ = ['Inferencer', ]
class Inferencer(object):
"""
Inferencer High Level API.
Args:
infer_func (Python func): Infer function that will return predict Variable
param_path (str): The path where the inference model is saved by fluid.io.save_params
place (Place): place to do the inference
parallel (bool): use parallel_executor to run the inference, it will use multi CPU/GPU.
Examples:
.. code-block:: python
def inference_program():
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
return y_predict
place = fluid.CPUPlace()
inferencer = fluid.Inferencer(
infer_func=inference_program, param_path="/tmp/model", place=place)
"""
def __init__(self, infer_func, param_path, place=None, parallel=False):
self.param_path = param_path
self.scope = core.Scope()
self.parallel = parallel
self.place = check_and_get_place(place)
self.inference_program = framework.Program()
with framework.program_guard(self.inference_program):
with unique_name.guard():
self.predict_var = infer_func()
with self._prog_and_scope_guard():
# load params from param_path into scope
io.load_params(executor.Executor(self.place), param_path)
if parallel:
with self._prog_and_scope_guard():
self.exe = parallel_executor.ParallelExecutor(
use_cuda=isinstance(self.place, core.CUDAPlace),
loss_name=self.predict_var.name)
else:
self.exe = executor.Executor(self.place)
self.inference_program = self.inference_program.clone(for_test=True)
def infer(self, inputs, return_numpy=True):
"""
Do Inference for Inputs
Args:
inputs (map): a map of {"input_name": input_var} that will be feed into the inference program
return_numpy (bool): transform return value into numpy or not
Returns:
Tensor or Numpy: the predict value of the inference model for the inputs
Examples:
.. code-block:: python
tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32")
results = inferencer.infer({'x': tensor_x})
"""
if not isinstance(inputs, dict):
raise ValueError(
"inputs should be a map of {'input_name': input_var}")
with self._prog_and_scope_guard():
results = self.exe.run(feed=inputs,
fetch_list=[self.predict_var.name],
return_numpy=return_numpy)
return results
@contextlib.contextmanager
def _prog_and_scope_guard(self):
with framework.program_guard(main_program=self.inference_program):
with executor.scope_guard(self.scope):
yield
此差异已折叠。
...@@ -12,101 +12,5 @@ ...@@ -12,101 +12,5 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function # NOTE: inferencer is moved into fluid.contrib.inferencer.
__all__ = []
import contextlib
from . import core
from . import executor
from . import framework
from . import io
from . import parallel_executor
from . import unique_name
from .trainer import check_and_get_place
__all__ = ['Inferencer', ]
class Inferencer(object):
"""
Inferencer High Level API.
Args:
infer_func (Python func): Infer function that will return predict Variable
param_path (str): The path where the inference model is saved by fluid.io.save_params
place (Place): place to do the inference
parallel (bool): use parallel_executor to run the inference, it will use multi CPU/GPU.
Examples:
.. code-block:: python
def inference_program():
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
return y_predict
place = fluid.CPUPlace()
inferencer = fluid.Inferencer(
infer_func=inference_program, param_path="/tmp/model", place=place)
"""
def __init__(self, infer_func, param_path, place=None, parallel=False):
self.param_path = param_path
self.scope = core.Scope()
self.parallel = parallel
self.place = check_and_get_place(place)
self.inference_program = framework.Program()
with framework.program_guard(self.inference_program):
with unique_name.guard():
self.predict_var = infer_func()
with self._prog_and_scope_guard():
# load params from param_path into scope
io.load_params(executor.Executor(self.place), param_path)
if parallel:
with self._prog_and_scope_guard():
self.exe = parallel_executor.ParallelExecutor(
use_cuda=isinstance(self.place, core.CUDAPlace),
loss_name=self.predict_var.name)
else:
self.exe = executor.Executor(self.place)
self.inference_program = self.inference_program.clone(for_test=True)
def infer(self, inputs, return_numpy=True):
"""
Do Inference for Inputs
Args:
inputs (map): a map of {"input_name": input_var} that will be feed into the inference program
return_numpy (bool): transform return value into numpy or not
Returns:
Tensor or Numpy: the predict value of the inference model for the inputs
Examples:
.. code-block:: python
tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32")
results = inferencer.infer({'x': tensor_x})
"""
if not isinstance(inputs, dict):
raise ValueError(
"inputs should be a map of {'input_name': input_var}")
with self._prog_and_scope_guard():
results = self.exe.run(feed=inputs,
fetch_list=[self.predict_var.name],
return_numpy=return_numpy)
return results
@contextlib.contextmanager
def _prog_and_scope_guard(self):
with framework.program_guard(main_program=self.inference_program):
with executor.scope_guard(self.scope):
yield
...@@ -16,6 +16,16 @@ from __future__ import print_function ...@@ -16,6 +16,16 @@ from __future__ import print_function
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
import contextlib import contextlib
import numpy import numpy
import unittest import unittest
...@@ -57,11 +67,11 @@ def optimizer_func(): ...@@ -57,11 +67,11 @@ def optimizer_func():
def train(use_cuda, train_program, params_dirname, inference_model_dirname): def train(use_cuda, train_program, params_dirname, inference_model_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer( trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func) train_func=train_program, place=place, optimizer_func=optimizer_func)
def event_handler(event): def event_handler(event):
if isinstance(event, fluid.EndStepEvent): if isinstance(event, EndStepEvent):
if event.step == 10: if event.step == 10:
test_metrics = trainer.test( test_metrics = trainer.test(
reader=test_reader, feed_order=['x', 'y']) reader=test_reader, feed_order=['x', 'y'])
...@@ -91,7 +101,7 @@ def infer(use_cuda, inference_program, params_dirname=None): ...@@ -91,7 +101,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
return return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer( inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place) infer_func=inference_program, param_path=params_dirname, place=place)
batch_size = 10 batch_size = 10
......
...@@ -14,11 +14,22 @@ ...@@ -14,11 +14,22 @@
from __future__ import print_function from __future__ import print_function
import sys
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
import paddle.fluid.core as core import paddle.fluid.core as core
import numpy import numpy
import six
import os import os
import cifar10_small_test_set import cifar10_small_test_set
...@@ -106,7 +117,7 @@ def train(use_cuda, train_program, parallel, params_dirname): ...@@ -106,7 +117,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE, drop_last=False) paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE, drop_last=False)
def event_handler(event): def event_handler(event):
if isinstance(event, fluid.EndStepEvent): if isinstance(event, EndStepEvent):
avg_cost, accuracy = trainer.test( avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label']) reader=test_reader, feed_order=['pixel', 'label'])
...@@ -118,7 +129,7 @@ def train(use_cuda, train_program, parallel, params_dirname): ...@@ -118,7 +129,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
return return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer( trainer = Trainer(
train_func=train_program, train_func=train_program,
optimizer_func=optimizer_func, optimizer_func=optimizer_func,
place=place, place=place,
...@@ -133,7 +144,7 @@ def train(use_cuda, train_program, parallel, params_dirname): ...@@ -133,7 +144,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
def infer(use_cuda, inference_program, parallel, params_dirname=None): def infer(use_cuda, inference_program, parallel, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer( inferencer = Inferencer(
infer_func=inference_program, infer_func=inference_program,
param_path=params_dirname, param_path=params_dirname,
place=place, place=place,
......
...@@ -14,11 +14,22 @@ ...@@ -14,11 +14,22 @@
from __future__ import print_function from __future__ import print_function
import sys
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
import paddle.fluid.core as core import paddle.fluid.core as core
import numpy import numpy
import six
import os import os
import cifar10_small_test_set import cifar10_small_test_set
...@@ -83,7 +94,7 @@ def train(use_cuda, train_program, parallel, params_dirname): ...@@ -83,7 +94,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE, drop_last=False) paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE, drop_last=False)
def event_handler(event): def event_handler(event):
if isinstance(event, fluid.EndStepEvent): if isinstance(event, EndStepEvent):
avg_cost, accuracy = trainer.test( avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label']) reader=test_reader, feed_order=['pixel', 'label'])
...@@ -95,7 +106,7 @@ def train(use_cuda, train_program, parallel, params_dirname): ...@@ -95,7 +106,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
return return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer( trainer = Trainer(
train_func=train_program, train_func=train_program,
place=place, place=place,
optimizer_func=optimizer_func, optimizer_func=optimizer_func,
...@@ -110,7 +121,7 @@ def train(use_cuda, train_program, parallel, params_dirname): ...@@ -110,7 +121,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
def infer(use_cuda, inference_program, parallel, params_dirname=None): def infer(use_cuda, inference_program, parallel, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer( inferencer = Inferencer(
infer_func=inference_program, infer_func=inference_program,
param_path=params_dirname, param_path=params_dirname,
place=place, place=place,
......
...@@ -16,6 +16,16 @@ from __future__ import print_function ...@@ -16,6 +16,16 @@ from __future__ import print_function
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
import numpy as np import numpy as np
WORD_DICT, VERB_DICT, LABEL_DICT = paddle.dataset.conll05.get_dict() WORD_DICT, VERB_DICT, LABEL_DICT = paddle.dataset.conll05.get_dict()
...@@ -149,7 +159,7 @@ def optimize_func(): ...@@ -149,7 +159,7 @@ def optimize_func():
def train(use_cuda, train_program, params_dirname): def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer( trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimize_func) train_func=train_program, place=place, optimizer_func=optimize_func)
feed_order = [ feed_order = [
...@@ -164,7 +174,7 @@ def train(use_cuda, train_program, params_dirname): ...@@ -164,7 +174,7 @@ def train(use_cuda, train_program, params_dirname):
# place) # place)
def event_handler(event): def event_handler(event):
if isinstance(event, fluid.EndEpochEvent): if isinstance(event, EndEpochEvent):
test_reader = paddle.batch( test_reader = paddle.batch(
paddle.dataset.conll05.test(), batch_size=BATCH_SIZE) paddle.dataset.conll05.test(), batch_size=BATCH_SIZE)
avg_cost_set = trainer.test( avg_cost_set = trainer.test(
...@@ -184,7 +194,7 @@ def train(use_cuda, train_program, params_dirname): ...@@ -184,7 +194,7 @@ def train(use_cuda, train_program, params_dirname):
if math.isnan(float(avg_cost)): if math.isnan(float(avg_cost)):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
elif isinstance(event, fluid.EndStepEvent): elif isinstance(event, EndStepEvent):
print("Step {0}, Epoch {1} Metrics {2}".format( print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, list(map(np.array, event.metrics)))) event.step, event.epoch, list(map(np.array, event.metrics))))
if event.step == 1: # Run 2 iterations to speed CI if event.step == 1: # Run 2 iterations to speed CI
...@@ -204,7 +214,7 @@ def train(use_cuda, train_program, params_dirname): ...@@ -204,7 +214,7 @@ def train(use_cuda, train_program, params_dirname):
def infer(use_cuda, inference_program, params_dirname): def infer(use_cuda, inference_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer( inferencer = Inferencer(
inference_program, param_path=params_dirname, place=place) inference_program, param_path=params_dirname, place=place)
# Setup input by creating LoDTensor to represent sequence of words. # Setup input by creating LoDTensor to represent sequence of words.
......
...@@ -13,17 +13,28 @@ ...@@ -13,17 +13,28 @@
# limitations under the License. # limitations under the License.
from __future__ import print_function from __future__ import print_function
import contextlib import contextlib
import sys
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
import paddle.fluid.layers as pd import paddle.fluid.layers as pd
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
from functools import partial from functools import partial
import unittest import unittest
import os
dict_size = 30000 dict_size = 30000
source_dict_dim = target_dict_dim = dict_size source_dict_dim = target_dict_dim = dict_size
...@@ -198,12 +209,12 @@ def train(use_cuda, is_sparse, is_local=True): ...@@ -198,12 +209,12 @@ def train(use_cuda, is_sparse, is_local=True):
] ]
def event_handler(event): def event_handler(event):
if isinstance(event, fluid.EndStepEvent): if isinstance(event, EndStepEvent):
print('pass_id=' + str(event.epoch) + ' batch=' + str(event.step)) print('pass_id=' + str(event.epoch) + ' batch=' + str(event.step))
if event.step == 10: if event.step == 10:
trainer.stop() trainer.stop()
trainer = fluid.Trainer( trainer = Trainer(
train_func=partial(train_program, is_sparse), train_func=partial(train_program, is_sparse),
place=place, place=place,
optimizer_func=optimizer_func) optimizer_func=optimizer_func)
......
...@@ -14,14 +14,22 @@ ...@@ -14,14 +14,22 @@
from __future__ import print_function from __future__ import print_function
import argparse import sys
import paddle.fluid as fluid import paddle.fluid as fluid
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle import paddle
import six
import sys
import numpy import numpy
import unittest
import math import math
import sys import sys
import os import os
...@@ -68,14 +76,14 @@ def optimizer_func(): ...@@ -68,14 +76,14 @@ def optimizer_func():
def train(use_cuda, train_program, parallel, params_dirname): def train(use_cuda, train_program, parallel, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer( trainer = Trainer(
train_func=train_program, train_func=train_program,
place=place, place=place,
optimizer_func=optimizer_func, optimizer_func=optimizer_func,
parallel=parallel) parallel=parallel)
def event_handler(event): def event_handler(event):
if isinstance(event, fluid.EndEpochEvent): if isinstance(event, EndEpochEvent):
test_reader = paddle.batch( test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
avg_cost, acc = trainer.test( avg_cost, acc = trainer.test(
...@@ -91,7 +99,7 @@ def train(use_cuda, train_program, parallel, params_dirname): ...@@ -91,7 +99,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
event.epoch + 1, avg_cost, acc)) event.epoch + 1, avg_cost, acc))
if math.isnan(avg_cost): if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
elif isinstance(event, fluid.EndStepEvent): elif isinstance(event, EndStepEvent):
print( print(
("Step {0}, Epoch {1} Metrics {2}".format( ("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, event.step, event.epoch,
...@@ -112,7 +120,7 @@ def train(use_cuda, train_program, parallel, params_dirname): ...@@ -112,7 +120,7 @@ def train(use_cuda, train_program, parallel, params_dirname):
def infer(use_cuda, inference_program, parallel, params_dirname=None): def infer(use_cuda, inference_program, parallel, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer( inferencer = Inferencer(
infer_func=inference_program, infer_func=inference_program,
param_path=params_dirname, param_path=params_dirname,
place=place, place=place,
......
...@@ -14,14 +14,22 @@ ...@@ -14,14 +14,22 @@
from __future__ import print_function from __future__ import print_function
import argparse import sys
import paddle.fluid as fluid import paddle.fluid as fluid
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle import paddle
import six
import sys
import numpy import numpy
import unittest
import math import math
import sys import sys
import os import os
...@@ -55,14 +63,14 @@ def optimizer_func(): ...@@ -55,14 +63,14 @@ def optimizer_func():
def train(use_cuda, train_program, params_dirname, parallel): def train(use_cuda, train_program, params_dirname, parallel):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer( trainer = Trainer(
train_func=train_program, train_func=train_program,
place=place, place=place,
optimizer_func=optimizer_func, optimizer_func=optimizer_func,
parallel=parallel) parallel=parallel)
def event_handler(event): def event_handler(event):
if isinstance(event, fluid.EndEpochEvent): if isinstance(event, EndEpochEvent):
test_reader = paddle.batch( test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
avg_cost, acc = trainer.test( avg_cost, acc = trainer.test(
...@@ -94,7 +102,7 @@ def train(use_cuda, train_program, params_dirname, parallel): ...@@ -94,7 +102,7 @@ def train(use_cuda, train_program, params_dirname, parallel):
def infer(use_cuda, inference_program, parallel, params_dirname=None): def infer(use_cuda, inference_program, parallel, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer( inferencer = Inferencer(
infer_func=inference_program, infer_func=inference_program,
param_path=params_dirname, param_path=params_dirname,
place=place, place=place,
......
...@@ -19,6 +19,16 @@ import sys ...@@ -19,6 +19,16 @@ import sys
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import paddle.fluid.nets as nets import paddle.fluid.nets as nets
...@@ -164,7 +174,7 @@ def optimizer_func(): ...@@ -164,7 +174,7 @@ def optimizer_func():
def train(use_cuda, train_program, params_dirname): def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer( trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func) train_func=train_program, place=place, optimizer_func=optimizer_func)
feed_order = [ feed_order = [
...@@ -173,7 +183,7 @@ def train(use_cuda, train_program, params_dirname): ...@@ -173,7 +183,7 @@ def train(use_cuda, train_program, params_dirname):
] ]
def event_handler(event): def event_handler(event):
if isinstance(event, fluid.EndStepEvent): if isinstance(event, EndStepEvent):
test_reader = paddle.batch( test_reader = paddle.batch(
paddle.dataset.movielens.test(), batch_size=BATCH_SIZE) paddle.dataset.movielens.test(), batch_size=BATCH_SIZE)
avg_cost_set = trainer.test( avg_cost_set = trainer.test(
...@@ -208,7 +218,7 @@ def train(use_cuda, train_program, params_dirname): ...@@ -208,7 +218,7 @@ def train(use_cuda, train_program, params_dirname):
def infer(use_cuda, inference_program, params_dirname): def infer(use_cuda, inference_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer( inferencer = Inferencer(
inference_program, param_path=params_dirname, place=place) inference_program, param_path=params_dirname, place=place)
# Use the first data from paddle.dataset.movielens.test() as input. # Use the first data from paddle.dataset.movielens.test() as input.
......
...@@ -16,6 +16,16 @@ from __future__ import print_function ...@@ -16,6 +16,16 @@ from __future__ import print_function
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
from functools import partial from functools import partial
import numpy as np import numpy as np
...@@ -72,13 +82,13 @@ def train(use_cuda, train_program, params_dirname): ...@@ -72,13 +82,13 @@ def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
word_dict = paddle.dataset.imdb.word_dict() word_dict = paddle.dataset.imdb.word_dict()
trainer = fluid.Trainer( trainer = Trainer(
train_func=partial(train_program, word_dict), train_func=partial(train_program, word_dict),
place=place, place=place,
optimizer_func=optimizer_func) optimizer_func=optimizer_func)
def event_handler(event): def event_handler(event):
if isinstance(event, fluid.EndEpochEvent): if isinstance(event, EndEpochEvent):
test_reader = paddle.batch( test_reader = paddle.batch(
paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE)
avg_cost, acc = trainer.test( avg_cost, acc = trainer.test(
...@@ -96,7 +106,7 @@ def train(use_cuda, train_program, params_dirname): ...@@ -96,7 +106,7 @@ def train(use_cuda, train_program, params_dirname):
event.epoch + 1, avg_cost, acc)) event.epoch + 1, avg_cost, acc))
if math.isnan(avg_cost): if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
elif isinstance(event, fluid.EndStepEvent): elif isinstance(event, EndStepEvent):
print("Step {0}, Epoch {1} Metrics {2}".format( print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, list(map(np.array, event.metrics)))) event.step, event.epoch, list(map(np.array, event.metrics))))
if event.step == 1: # Run 2 iterations to speed CI if event.step == 1: # Run 2 iterations to speed CI
...@@ -119,7 +129,7 @@ def infer(use_cuda, inference_program, params_dirname=None): ...@@ -119,7 +129,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
word_dict = paddle.dataset.imdb.word_dict() word_dict = paddle.dataset.imdb.word_dict()
inferencer = fluid.Inferencer( inferencer = Inferencer(
infer_func=partial(inference_program, word_dict), infer_func=partial(inference_program, word_dict),
param_path=params_dirname, param_path=params_dirname,
place=place) place=place)
......
...@@ -16,6 +16,16 @@ from __future__ import print_function ...@@ -16,6 +16,16 @@ from __future__ import print_function
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
from functools import partial from functools import partial
import numpy as np import numpy as np
...@@ -87,13 +97,13 @@ def train(use_cuda, train_program, params_dirname): ...@@ -87,13 +97,13 @@ def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
word_dict = paddle.dataset.imdb.word_dict() word_dict = paddle.dataset.imdb.word_dict()
trainer = fluid.Trainer( trainer = Trainer(
train_func=partial(train_program, word_dict), train_func=partial(train_program, word_dict),
place=place, place=place,
optimizer_func=optimizer_func) optimizer_func=optimizer_func)
def event_handler(event): def event_handler(event):
if isinstance(event, fluid.EndEpochEvent): if isinstance(event, EndEpochEvent):
test_reader = paddle.batch( test_reader = paddle.batch(
paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE)
avg_cost, acc = trainer.test( avg_cost, acc = trainer.test(
...@@ -111,7 +121,7 @@ def train(use_cuda, train_program, params_dirname): ...@@ -111,7 +121,7 @@ def train(use_cuda, train_program, params_dirname):
event.epoch + 1, avg_cost, acc)) event.epoch + 1, avg_cost, acc))
if math.isnan(avg_cost): if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
elif isinstance(event, fluid.EndStepEvent): elif isinstance(event, EndStepEvent):
print("Step {0}, Epoch {1} Metrics {2}".format( print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, list(map(np.array, event.metrics)))) event.step, event.epoch, list(map(np.array, event.metrics))))
if event.step == 1: # Run 2 iterations to speed CI if event.step == 1: # Run 2 iterations to speed CI
...@@ -134,7 +144,7 @@ def infer(use_cuda, inference_program, params_dirname=None): ...@@ -134,7 +144,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
word_dict = paddle.dataset.imdb.word_dict() word_dict = paddle.dataset.imdb.word_dict()
inferencer = fluid.Inferencer( inferencer = Inferencer(
infer_func=partial(inference_program, word_dict), infer_func=partial(inference_program, word_dict),
param_path=params_dirname, param_path=params_dirname,
place=place) place=place)
......
...@@ -16,6 +16,16 @@ from __future__ import print_function ...@@ -16,6 +16,16 @@ from __future__ import print_function
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
from functools import partial from functools import partial
import numpy as np import numpy as np
...@@ -79,13 +89,13 @@ def train(use_cuda, train_program, params_dirname): ...@@ -79,13 +89,13 @@ def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
word_dict = paddle.dataset.imdb.word_dict() word_dict = paddle.dataset.imdb.word_dict()
trainer = fluid.Trainer( trainer = Trainer(
train_func=partial(train_program, word_dict), train_func=partial(train_program, word_dict),
place=place, place=place,
optimizer_func=optimizer_func) optimizer_func=optimizer_func)
def event_handler(event): def event_handler(event):
if isinstance(event, fluid.EndEpochEvent): if isinstance(event, EndEpochEvent):
test_reader = paddle.batch( test_reader = paddle.batch(
paddle.dataset.imdb.test(word_dict), paddle.dataset.imdb.test(word_dict),
batch_size=BATCH_SIZE, batch_size=BATCH_SIZE,
...@@ -105,7 +115,7 @@ def train(use_cuda, train_program, params_dirname): ...@@ -105,7 +115,7 @@ def train(use_cuda, train_program, params_dirname):
event.epoch + 1, avg_cost, acc)) event.epoch + 1, avg_cost, acc))
if math.isnan(avg_cost): if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
elif isinstance(event, fluid.EndStepEvent): elif isinstance(event, EndStepEvent):
print("Step {0}, Epoch {1} Metrics {2}".format( print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, list(map(np.array, event.metrics)))) event.step, event.epoch, list(map(np.array, event.metrics))))
if event.step == 1: # Run 2 iterations to speed CI if event.step == 1: # Run 2 iterations to speed CI
...@@ -129,7 +139,7 @@ def infer(use_cuda, inference_program, params_dirname=None): ...@@ -129,7 +139,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
word_dict = paddle.dataset.imdb.word_dict() word_dict = paddle.dataset.imdb.word_dict()
inferencer = fluid.Inferencer( inferencer = Inferencer(
infer_func=partial(inference_program, word_dict), infer_func=partial(inference_program, word_dict),
param_path=params_dirname, param_path=params_dirname,
place=place) place=place)
......
...@@ -16,6 +16,16 @@ from __future__ import print_function ...@@ -16,6 +16,16 @@ from __future__ import print_function
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
import numpy as np import numpy as np
import math import math
import sys import sys
...@@ -95,7 +105,7 @@ def train(use_cuda, train_program, params_dirname): ...@@ -95,7 +105,7 @@ def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
def event_handler(event): def event_handler(event):
if isinstance(event, fluid.EndStepEvent): if isinstance(event, EndStepEvent):
outs = trainer.test( outs = trainer.test(
reader=test_reader, reader=test_reader,
feed_order=['firstw', 'secondw', 'thirdw', 'forthw', 'nextw']) feed_order=['firstw', 'secondw', 'thirdw', 'forthw', 'nextw'])
...@@ -109,7 +119,7 @@ def train(use_cuda, train_program, params_dirname): ...@@ -109,7 +119,7 @@ def train(use_cuda, train_program, params_dirname):
if math.isnan(avg_cost): if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
trainer = fluid.Trainer( trainer = Trainer(
train_func=train_program, optimizer_func=optimizer_func, place=place) train_func=train_program, optimizer_func=optimizer_func, place=place)
trainer.train( trainer.train(
...@@ -121,7 +131,7 @@ def train(use_cuda, train_program, params_dirname): ...@@ -121,7 +131,7 @@ def train(use_cuda, train_program, params_dirname):
def infer(use_cuda, inference_program, params_dirname=None): def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer( inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place) infer_func=inference_program, param_path=params_dirname, place=place)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册