未验证 提交 da161514 编写于 作者: Y Yu Yang 提交者: GitHub

Merge pull request #620 from reyoung/feature/move_trainer_to_contrib

Move trainer to contrib
......@@ -164,7 +164,7 @@ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
训练器会读入一个训练程序和一些必要的其他参数:
```python
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program,
place=place,
optimizer_func=optimizer_program)
......@@ -194,7 +194,7 @@ step = 0
# event_handler prints training and testing info
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if step % 10 == 0: # record a train cost every 10 batches
plot_cost.append(train_title, step, event.metrics[0])
......@@ -210,7 +210,7 @@ def event_handler_plot(event):
trainer.stop()
step += 1
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if event.epoch % 10 == 0:
# We can save the trained parameters for the inferences later
if params_dirname is not None:
......@@ -254,7 +254,7 @@ def inference_program():
预测器会从`params_dirname`中读取已经训练好的模型,来对从未遇见过的数据进行预测。
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
batch_size = 10
......
......@@ -180,7 +180,7 @@ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
The trainer will take the `train_program` as input.
```python
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program,
place=place,
optimizer_func=optimizer_program)
......@@ -213,7 +213,7 @@ step = 0
# event_handler prints training and testing info
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if step % 10 == 0: # record a train cost every 10 batches
plot_cost.append(train_title, step, event.metrics[0])
......@@ -229,7 +229,7 @@ def event_handler_plot(event):
trainer.stop()
step += 1
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if event.epoch % 10 == 0:
# We can save the trained parameters for the inferences later
if params_dirname is not None:
......@@ -276,7 +276,7 @@ def inference_program():
Inferencer will load the trained model from `params_dirname` and use it to infer the unseen data.
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
batch_size = 10
......
......@@ -206,7 +206,7 @@ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
训练器会读入一个训练程序和一些必要的其他参数:
```python
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program,
place=place,
optimizer_func=optimizer_program)
......@@ -236,7 +236,7 @@ step = 0
# event_handler prints training and testing info
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if step % 10 == 0: # record a train cost every 10 batches
plot_cost.append(train_title, step, event.metrics[0])
......@@ -252,7 +252,7 @@ def event_handler_plot(event):
trainer.stop()
step += 1
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if event.epoch % 10 == 0:
# We can save the trained parameters for the inferences later
if params_dirname is not None:
......@@ -296,7 +296,7 @@ def inference_program():
预测器会从`params_dirname`中读取已经训练好的模型,来对从未遇见过的数据进行预测。
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
batch_size = 10
......
......@@ -222,7 +222,7 @@ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
The trainer will take the `train_program` as input.
```python
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program,
place=place,
optimizer_func=optimizer_program)
......@@ -255,7 +255,7 @@ step = 0
# event_handler prints training and testing info
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if step % 10 == 0: # record a train cost every 10 batches
plot_cost.append(train_title, step, event.metrics[0])
......@@ -271,7 +271,7 @@ def event_handler_plot(event):
trainer.stop()
step += 1
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if event.epoch % 10 == 0:
# We can save the trained parameters for the inferences later
if params_dirname is not None:
......@@ -318,7 +318,7 @@ def inference_program():
Inferencer will load the trained model from `params_dirname` and use it to infer the unseen data.
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
batch_size = 10
......
......@@ -15,6 +15,18 @@
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
import numpy
BATCH_SIZE = 20
......@@ -49,7 +61,7 @@ def optimizer_program():
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_program)
feed_order = ['x', 'y']
......@@ -59,6 +71,7 @@ params_dirname = "fit_a_line.inference.model"
# Plot data
from paddle.v2.plot import Ploter
train_title = "Train cost"
test_title = "Test cost"
plot_cost = Ploter(train_title, test_title)
......@@ -69,7 +82,7 @@ step = 0
# event_handler prints training and testing info
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, EndStepEvent):
if step % 10 == 0: # record a train cost every 10 batches
plot_cost.append(train_title, step, event.metrics[0])
......@@ -85,7 +98,7 @@ def event_handler_plot(event):
trainer.stop()
step += 1
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, EndEpochEvent):
if event.epoch % 10 == 0:
# We can save the trained parameters for the inferences later
if params_dirname is not None:
......@@ -106,7 +119,7 @@ def inference_program():
return y_predict
inferencer = fluid.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
batch_size = 10
......
......@@ -274,7 +274,7 @@ test_reader = paddle.batch(
use_cuda = False # set to True if training with GPU
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_program)
```
......@@ -290,14 +290,14 @@ Fluid API 在训练期间为回调函数提供了一个钩子。用户能够通
params_dirname = "recognize_digits_network.inference.model"
lists = []
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if event.step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
print("Pass %d, Batch %d, Cost %f" % (
event.step, event.epoch, event.metrics[0]))
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label'])
......@@ -328,14 +328,14 @@ lists = []
# event_handler to plot a figure
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
# save parameters
trainer.save_params(params_dirname)
......@@ -380,7 +380,7 @@ Test with Epoch 0, avg_cost: 0.053097883707459624, acc: 0.9822850318471338
## 应用模型
可以使用训练好的模型对手写体数字图片进行分类,下面程序展示了如何使用 `fluid.Inferencer` 接口进行推断。
可以使用训练好的模型对手写体数字图片进行分类,下面程序展示了如何使用 `fluid.contrib.inferencer.Inferencer` 接口进行推断。
### Inference 配置
......@@ -388,7 +388,7 @@ Test with Epoch 0, avg_cost: 0.053097883707459624, acc: 0.9822850318471338
我们可以简单地插入在此之前定义的分类器。
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
# infer_func=softmax_regression, # uncomment for softmax regression
# infer_func=multilayer_perceptron, # uncomment for MLP
infer_func=convolutional_neural_network, # uncomment for LeNet5
......
......@@ -281,7 +281,7 @@ Now, we need to setup the trainer. The trainer need to take in `train_program`,
use_cuda = False # set to True if training with GPU
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_program)
```
......@@ -297,14 +297,14 @@ We will demonstrate two event handlers here. Please feel free to modify on the J
params_dirname = "recognize_digits_network.inference.model"
lists = []
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if event.step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
print("Pass %d, Batch %d, Cost %f" % (
event.step, event.epoch, event.metrics[0]))
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label'])
......@@ -331,14 +331,14 @@ lists = []
# event_handler to plot a figure
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
# save parameters
trainer.save_params(params_dirname)
......@@ -390,7 +390,7 @@ Usually, with MNIST data, the softmax regression model achieves an accuracy arou
## Application
After training, users can use the trained model to classify images. The following code shows how to inference MNIST images through `fluid.Inferencer`.
After training, users can use the trained model to classify images. The following code shows how to inference MNIST images through `fluid.contrib.inferencer.Inferencer`.
### Create Inferencer
......@@ -398,7 +398,7 @@ The `Inferencer` takes an `infer_func` and `param_path` to setup the network and
We can simply plug-in the classifier defined earlier here.
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
# infer_func=softmax_regression, # uncomment for softmax regression
# infer_func=multilayer_perceptron, # uncomment for MLP
infer_func=convolutional_neural_network, # uncomment for LeNet5
......
......@@ -316,7 +316,7 @@ test_reader = paddle.batch(
use_cuda = False # set to True if training with GPU
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_program)
```
......@@ -332,14 +332,14 @@ Fluid API 在训练期间为回调函数提供了一个钩子。用户能够通
params_dirname = "recognize_digits_network.inference.model"
lists = []
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if event.step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
print("Pass %d, Batch %d, Cost %f" % (
event.step, event.epoch, event.metrics[0]))
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label'])
......@@ -370,14 +370,14 @@ lists = []
# event_handler to plot a figure
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
# save parameters
trainer.save_params(params_dirname)
......@@ -422,7 +422,7 @@ Test with Epoch 0, avg_cost: 0.053097883707459624, acc: 0.9822850318471338
## 应用模型
可以使用训练好的模型对手写体数字图片进行分类,下面程序展示了如何使用 `fluid.Inferencer` 接口进行推断。
可以使用训练好的模型对手写体数字图片进行分类,下面程序展示了如何使用 `fluid.contrib.inferencer.Inferencer` 接口进行推断。
### Inference 配置
......@@ -430,7 +430,7 @@ Test with Epoch 0, avg_cost: 0.053097883707459624, acc: 0.9822850318471338
我们可以简单地插入在此之前定义的分类器。
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
# infer_func=softmax_regression, # uncomment for softmax regression
# infer_func=multilayer_perceptron, # uncomment for MLP
infer_func=convolutional_neural_network, # uncomment for LeNet5
......
......@@ -323,7 +323,7 @@ Now, we need to setup the trainer. The trainer need to take in `train_program`,
use_cuda = False # set to True if training with GPU
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_program)
```
......@@ -339,14 +339,14 @@ We will demonstrate two event handlers here. Please feel free to modify on the J
params_dirname = "recognize_digits_network.inference.model"
lists = []
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if event.step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
print("Pass %d, Batch %d, Cost %f" % (
event.step, event.epoch, event.metrics[0]))
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label'])
......@@ -373,14 +373,14 @@ lists = []
# event_handler to plot a figure
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
# save parameters
trainer.save_params(params_dirname)
......@@ -432,7 +432,7 @@ Usually, with MNIST data, the softmax regression model achieves an accuracy arou
## Application
After training, users can use the trained model to classify images. The following code shows how to inference MNIST images through `fluid.Inferencer`.
After training, users can use the trained model to classify images. The following code shows how to inference MNIST images through `fluid.contrib.inferencer.Inferencer`.
### Create Inferencer
......@@ -440,7 +440,7 @@ The `Inferencer` takes an `infer_func` and `param_path` to setup the network and
We can simply plug-in the classifier defined earlier here.
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
# infer_func=softmax_regression, # uncomment for softmax regression
# infer_func=multilayer_perceptron, # uncomment for MLP
infer_func=convolutional_neural_network, # uncomment for LeNet5
......
......@@ -5,6 +5,16 @@ import numpy as np
import paddle
import paddle.fluid as fluid
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
def softmax_regression():
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
......@@ -77,7 +87,7 @@ def main():
use_cuda = False # set to True if training with GPU
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_program)
# Save the parameter into a directory. The Inferencer can load the parameters from it to do infer
......@@ -86,14 +96,14 @@ def main():
lists = []
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
print("Pass %d, Batch %d, Cost %f" % (event.step, event.epoch,
event.metrics[0]))
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, EndEpochEvent):
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label'])
......@@ -125,7 +135,7 @@ def main():
cur_dir = os.path.dirname(os.path.realpath(__file__))
img = load_image(cur_dir + '/image/infer_3.png')
inferencer = fluid.Inferencer(
inferencer = Inferencer(
# infer_func=softmax_regression, # uncomment for softmax regression
# infer_func=multilayer_perceptron, # uncomment for MLP
infer_func=convolutional_neural_network, # uncomment for LeNet5
......
......@@ -346,7 +346,7 @@ def optimizer_program():
```python
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program,
optimizer_func=optimizer_program,
place=place)
......@@ -394,12 +394,12 @@ cost_ploter = Ploter(train_title, test_title)
step = 0
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if step % 1 == 0:
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
avg_cost, accuracy = trainer.test(
reader=test_reader,
feed_order=['pixel', 'label'])
......@@ -417,7 +417,7 @@ params_dirname = "image_classification_resnet.inference.model"
# event handler to track training and testing process
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if event.step % 100 == 0:
print("\nPass %d, Batch %d, Cost %f, Acc %f" %
(event.step, event.epoch, event.metrics[0],
......@@ -426,7 +426,7 @@ def event_handler(event):
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
# Test against with the test dataset to get accuracy.
avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label'])
......@@ -475,7 +475,7 @@ Test with Pass 0, Loss 1.1, Acc 0.6
## 应用模型
可以使用训练好的模型对图片进行分类,下面程序展示了如何使用 `fluid.Inferencer` 接口进行推断,可以打开注释,更改加载的模型。
可以使用训练好的模型对图片进行分类,下面程序展示了如何使用 `fluid.contrib.inferencer.Inferencer` 接口进行推断,可以打开注释,更改加载的模型。
### 生成预测输入数据
......@@ -513,7 +513,7 @@ img = load_image(cur_dir + '/image/dog.png')
现在我们准备做预测。
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
label_list = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
# inference
......
......@@ -348,7 +348,7 @@ Here we specify `Adam` optimization algorithm via `fluid.optimizer`.
```python
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program,
optimizer_func=optimizer_program,
place=place)
......@@ -392,12 +392,12 @@ cost_ploter = Ploter(train_title, test_title)
step = 0
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if step % 1 == 0:
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
avg_cost, accuracy = trainer.test(
reader=test_reader,
feed_order=['pixel', 'label'])
......@@ -415,7 +415,7 @@ params_dirname = "image_classification_resnet.inference.model"
# event handler to track training and testing process
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if event.step % 100 == 0:
print("\nPass %d, Batch %d, Cost %f, Acc %f" %
(event.step, event.epoch, event.metrics[0],
......@@ -424,7 +424,7 @@ def event_handler(event):
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
# Test against with the test dataset to get accuracy.
avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label'])
......@@ -474,7 +474,7 @@ Figure 12. The error rate of VGG model on CIFAR10
## Application
After training is completed, users can use the trained model to classify images. The following code shows how to infer through `fluid.Inferencer` interface. You can uncomment some lines from below to change the model name.
After training is completed, users can use the trained model to classify images. The following code shows how to infer through `fluid.contrib.inferencer.Inferencer` interface. You can uncomment some lines from below to change the model name.
### Generate input data for inferring
......@@ -512,7 +512,7 @@ We can simply plug-in the inference_program defined earlier here.
Now we are ready to do inference.
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
label_list = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
......
......@@ -388,7 +388,7 @@ def optimizer_program():
```python
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program,
optimizer_func=optimizer_program,
place=place)
......@@ -436,12 +436,12 @@ cost_ploter = Ploter(train_title, test_title)
step = 0
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if step % 1 == 0:
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
avg_cost, accuracy = trainer.test(
reader=test_reader,
feed_order=['pixel', 'label'])
......@@ -459,7 +459,7 @@ params_dirname = "image_classification_resnet.inference.model"
# event handler to track training and testing process
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if event.step % 100 == 0:
print("\nPass %d, Batch %d, Cost %f, Acc %f" %
(event.step, event.epoch, event.metrics[0],
......@@ -468,7 +468,7 @@ def event_handler(event):
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
# Test against with the test dataset to get accuracy.
avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label'])
......@@ -517,7 +517,7 @@ Test with Pass 0, Loss 1.1, Acc 0.6
## 应用模型
可以使用训练好的模型对图片进行分类,下面程序展示了如何使用 `fluid.Inferencer` 接口进行推断,可以打开注释,更改加载的模型。
可以使用训练好的模型对图片进行分类,下面程序展示了如何使用 `fluid.contrib.inferencer.Inferencer` 接口进行推断,可以打开注释,更改加载的模型。
### 生成预测输入数据
......@@ -555,7 +555,7 @@ img = load_image(cur_dir + '/image/dog.png')
现在我们准备做预测。
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
label_list = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
# inference
......
......@@ -390,7 +390,7 @@ Here we specify `Adam` optimization algorithm via `fluid.optimizer`.
```python
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program,
optimizer_func=optimizer_program,
place=place)
......@@ -434,12 +434,12 @@ cost_ploter = Ploter(train_title, test_title)
step = 0
def event_handler_plot(event):
global step
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if step % 1 == 0:
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
avg_cost, accuracy = trainer.test(
reader=test_reader,
feed_order=['pixel', 'label'])
......@@ -457,7 +457,7 @@ params_dirname = "image_classification_resnet.inference.model"
# event handler to track training and testing process
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if event.step % 100 == 0:
print("\nPass %d, Batch %d, Cost %f, Acc %f" %
(event.step, event.epoch, event.metrics[0],
......@@ -466,7 +466,7 @@ def event_handler(event):
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
# Test against with the test dataset to get accuracy.
avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label'])
......@@ -516,7 +516,7 @@ Figure 12. The error rate of VGG model on CIFAR10
## Application
After training is completed, users can use the trained model to classify images. The following code shows how to infer through `fluid.Inferencer` interface. You can uncomment some lines from below to change the model name.
After training is completed, users can use the trained model to classify images. The following code shows how to infer through `fluid.contrib.inferencer.Inferencer` interface. You can uncomment some lines from below to change the model name.
### Generate input data for inferring
......@@ -554,7 +554,7 @@ We can simply plug-in the inference_program defined earlier here.
Now we are ready to do inference.
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
label_list = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
......
......@@ -13,6 +13,17 @@
# limitations under the License.
import paddle.fluid as fluid
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
__all__ = ['resnet_cifar10']
......
......@@ -19,6 +19,16 @@ import paddle.fluid as fluid
import numpy
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
from vgg import vgg_bn_drop
from resnet import resnet_cifar10
......@@ -58,7 +68,7 @@ def train(use_cuda, train_program, params_dirname):
paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE)
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 100 == 0:
print("\nPass %d, Batch %d, Cost %f, Acc %f" %
(event.step, event.epoch, event.metrics[0],
......@@ -67,7 +77,7 @@ def train(use_cuda, train_program, params_dirname):
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, fluid.EndEpochEvent):
if isinstance(event, EndEpochEvent):
avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label'])
......@@ -77,7 +87,7 @@ def train(use_cuda, train_program, params_dirname):
trainer.save_params(params_dirname)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
trainer = Trainer(
train_func=train_program, optimizer_func=optimizer_program, place=place)
trainer.train(
......@@ -89,7 +99,7 @@ def train(use_cuda, train_program, params_dirname):
def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
# Prepare testing data.
......
......@@ -14,6 +14,17 @@
import paddle
import paddle.fluid as fluid
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
__all__ = ['vgg_bn_drop']
......
......@@ -309,7 +309,7 @@ def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
# We output cost every 10 steps.
if event.step % 10 == 0:
outs = trainer.test(
......@@ -329,7 +329,7 @@ def train(use_cuda, train_program, params_dirname):
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program,
optimizer_func=optimizer_func,
place=place)
......@@ -360,7 +360,7 @@ Step 20: Average Cost 5.766995
```python
def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
......
......@@ -321,7 +321,7 @@ def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
outs = trainer.test(
reader=test_reader,
feed_order=['firstw', 'secondw', 'thirdw', 'fourthw', 'nextw'])
......@@ -341,7 +341,7 @@ def train(use_cuda, train_program, params_dirname):
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program,
# Note here we need to choose more sophisticated optimizer
# such as AdaGrad with a decay rate. The normal SGD converges
......@@ -378,7 +378,7 @@ We can use our trained model to predict the next word given its previous N-gram.
```python
def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
......
......@@ -351,7 +351,7 @@ def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
# We output cost every 10 steps.
if event.step % 10 == 0:
outs = trainer.test(
......@@ -371,7 +371,7 @@ def train(use_cuda, train_program, params_dirname):
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program,
optimizer_func=optimizer_func,
place=place)
......@@ -402,7 +402,7 @@ Step 20: Average Cost 5.766995
```python
def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
......
......@@ -363,7 +363,7 @@ def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
outs = trainer.test(
reader=test_reader,
feed_order=['firstw', 'secondw', 'thirdw', 'fourthw', 'nextw'])
......@@ -383,7 +383,7 @@ def train(use_cuda, train_program, params_dirname):
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program,
# Note here we need to choose more sophisticated optimizer
# such as AdaGrad with a decay rate. The normal SGD converges
......@@ -420,7 +420,7 @@ We can use our trained model to predict the next word given its previous N-gram.
```python
def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
......
......@@ -14,6 +14,17 @@
from __future__ import print_function
import paddle.v2 as paddle
import paddle.fluid as fluid
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
import numpy
import sys
from functools import partial
......@@ -97,7 +108,7 @@ def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, EndStepEvent):
outs = trainer.test(
reader=test_reader,
feed_order=['firstw', 'secondw', 'thirdw', 'fourthw', 'nextw'])
......@@ -116,7 +127,7 @@ def train(use_cuda, train_program, params_dirname):
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
trainer = fluid.Trainer(
trainer = Trainer(
train_func=train_program,
# optimizer=fluid.optimizer.SGD(learning_rate=0.001),
optimizer_func=optimizer_func,
......@@ -131,7 +142,7 @@ def train(use_cuda, train_program, params_dirname):
def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
......
......@@ -409,7 +409,7 @@ test_reader = paddle.batch(
训练器需要一个训练程序和一个训练优化函数。
```python
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func)
```
......@@ -437,7 +437,7 @@ plot_cost = Ploter(test_title)
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
avg_cost_set = trainer.test(
reader=test_reader, feed_order=feed_order)
......@@ -473,7 +473,7 @@ trainer.train(
传入`inference_program``params_dirname`来初始化一个预测器, `params_dirname`用来存放训练过程中的各个参数。
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
inference_program, param_path=params_dirname, place=place)
```
......
......@@ -382,7 +382,7 @@ test_reader = paddle.batch(
Create a trainer that takes `train_program` as input and specify optimizer function.
```python
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func)
```
......@@ -412,7 +412,7 @@ plot_cost = Ploter(test_title)
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
avg_cost_set = trainer.test(
reader=test_reader, feed_order=feed_order)
......@@ -451,7 +451,7 @@ trainer.train(
Initialize Inferencer with `inference_program` and `params_dirname` which is where we save params from training.
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
inference_program, param_path=params_dirname, place=place)
```
......
......@@ -451,7 +451,7 @@ test_reader = paddle.batch(
训练器需要一个训练程序和一个训练优化函数。
```python
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func)
```
......@@ -479,7 +479,7 @@ plot_cost = Ploter(test_title)
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
avg_cost_set = trainer.test(
reader=test_reader, feed_order=feed_order)
......@@ -515,7 +515,7 @@ trainer.train(
传入`inference_program`和`params_dirname`来初始化一个预测器, `params_dirname`用来存放训练过程中的各个参数。
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
inference_program, param_path=params_dirname, place=place)
```
......
......@@ -424,7 +424,7 @@ test_reader = paddle.batch(
Create a trainer that takes `train_program` as input and specify optimizer function.
```python
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func)
```
......@@ -454,7 +454,7 @@ plot_cost = Ploter(test_title)
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
avg_cost_set = trainer.test(
reader=test_reader, feed_order=feed_order)
......@@ -493,7 +493,7 @@ trainer.train(
Initialize Inferencer with `inference_program` and `params_dirname` which is where we save params from training.
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
inference_program, param_path=params_dirname, place=place)
```
......
......@@ -20,6 +20,15 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
IS_SPARSE = True
USE_GPU = False
......@@ -160,7 +169,7 @@ def optimizer_func():
def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func)
feed_order = [
......@@ -169,7 +178,7 @@ def train(use_cuda, train_program, params_dirname):
]
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, EndStepEvent):
test_reader = paddle.batch(
paddle.dataset.movielens.test(), batch_size=BATCH_SIZE)
avg_cost_set = trainer.test(
......@@ -202,7 +211,7 @@ def train(use_cuda, train_program, params_dirname):
def infer(use_cuda, inference_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(
inferencer = Inferencer(
inference_program, param_path=params_dirname, place=place)
# Use the first data from paddle.dataset.movielens.test() as input.
......
......@@ -249,7 +249,7 @@ train_reader = paddle.batch(
训练器需要一个训练程序和一个训练优化函数。
```python
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=partial(train_program, word_dict),
place=place,
optimizer_func=optimizer_func)
......@@ -272,7 +272,7 @@ feed_order = ['words', 'label']
params_dirname = "understand_sentiment_conv.inference.model"
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, map(np.array, event.metrics)))
......@@ -300,7 +300,7 @@ trainer.train(
传入`inference_program``params_dirname`来初始化一个预测器, `params_dirname`用来存放训练过程中的各个参数。
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=partial(inference_program, word_dict), param_path=params_dirname, place=place)
```
......
......@@ -255,7 +255,7 @@ train_reader = paddle.batch(
Create a trainer that takes `train_program` as input and specify optimizer function.
```python
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=partial(train_program, word_dict),
place=place,
optimizer_func=optimizer_func)
......@@ -279,7 +279,7 @@ For example, we can check the cost by `trainer.test` when `EndStepEvent` occurs
params_dirname = "understand_sentiment_conv.inference.model"
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, map(np.array, event.metrics)))
......@@ -307,7 +307,7 @@ trainer.train(
Initialize Inferencer with `inference_program` and `params_dirname` which is where we save params from training.
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=partial(inference_program, word_dict),
param_path=params_dirname,
place=place)
......
......@@ -291,7 +291,7 @@ train_reader = paddle.batch(
训练器需要一个训练程序和一个训练优化函数。
```python
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=partial(train_program, word_dict),
place=place,
optimizer_func=optimizer_func)
......@@ -314,7 +314,7 @@ feed_order = ['words', 'label']
params_dirname = "understand_sentiment_conv.inference.model"
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, map(np.array, event.metrics)))
......@@ -342,7 +342,7 @@ trainer.train(
传入`inference_program`和`params_dirname`来初始化一个预测器, `params_dirname`用来存放训练过程中的各个参数。
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=partial(inference_program, word_dict), param_path=params_dirname, place=place)
```
......
......@@ -297,7 +297,7 @@ train_reader = paddle.batch(
Create a trainer that takes `train_program` as input and specify optimizer function.
```python
trainer = fluid.Trainer(
trainer = fluid.contrib.trainer.Trainer(
train_func=partial(train_program, word_dict),
place=place,
optimizer_func=optimizer_func)
......@@ -321,7 +321,7 @@ For example, we can check the cost by `trainer.test` when `EndStepEvent` occurs
params_dirname = "understand_sentiment_conv.inference.model"
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, map(np.array, event.metrics)))
......@@ -349,7 +349,7 @@ trainer.train(
Initialize Inferencer with `inference_program` and `params_dirname` which is where we save params from training.
```python
inferencer = fluid.Inferencer(
inferencer = fluid.contrib.inferencer.Inferencer(
infer_func=partial(inference_program, word_dict),
param_path=params_dirname,
place=place)
......
......@@ -19,6 +19,17 @@ import paddle
import paddle.fluid as fluid
from functools import partial
import numpy as np
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
CLASS_DIM = 2
EMB_DIM = 128
......@@ -83,7 +94,7 @@ def train(use_cuda, train_program, params_dirname):
test_reader = paddle.batch(
paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE)
trainer = fluid.Trainer(
trainer = Trainer(
train_func=partial(train_program, word_dict),
place=place,
optimizer_func=optimizer_func)
......@@ -91,7 +102,7 @@ def train(use_cuda, train_program, params_dirname):
feed_order = ['words', 'label']
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 10 == 0:
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=feed_order)
......@@ -102,7 +113,7 @@ def train(use_cuda, train_program, params_dirname):
print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, map(np.array, event.metrics)))
elif isinstance(event, fluid.EndEpochEvent):
elif isinstance(event, EndEpochEvent):
trainer.save_params(params_dirname)
trainer.train(
......@@ -116,7 +127,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
word_dict = paddle.dataset.imdb.word_dict()
inferencer = fluid.Inferencer(
inferencer = Inferencer(
infer_func=partial(inference_program, word_dict),
param_path=params_dirname,
place=place)
......
......@@ -19,6 +19,17 @@ import paddle
import paddle.fluid as fluid
from functools import partial
import numpy as np
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
CLASS_DIM = 2
EMB_DIM = 128
......@@ -100,7 +111,7 @@ def train(use_cuda, train_program, params_dirname):
test_reader = paddle.batch(
paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE)
trainer = fluid.Trainer(
trainer = Trainer(
train_func=partial(train_program, word_dict),
place=place,
optimizer_func=optimizer_func)
......@@ -108,7 +119,7 @@ def train(use_cuda, train_program, params_dirname):
feed_order = ['words', 'label']
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 10 == 0:
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=feed_order)
......@@ -119,7 +130,7 @@ def train(use_cuda, train_program, params_dirname):
print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, map(np.array, event.metrics)))
elif isinstance(event, fluid.EndEpochEvent):
elif isinstance(event, EndEpochEvent):
trainer.save_params(params_dirname)
trainer.train(
......@@ -133,7 +144,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
word_dict = paddle.dataset.imdb.word_dict()
inferencer = fluid.Inferencer(
inferencer = Inferencer(
infer_func=partial(inference_program, word_dict),
param_path=params_dirname,
place=place)
......
......@@ -19,6 +19,17 @@ import paddle
import paddle.fluid as fluid
from functools import partial
import numpy as np
import sys
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
CLASS_DIM = 2
EMB_DIM = 128
......@@ -91,7 +102,7 @@ def train(use_cuda, train_program, params_dirname):
test_reader = paddle.batch(
paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE)
trainer = fluid.Trainer(
trainer = Trainer(
train_func=partial(train_program, word_dict),
place=place,
optimizer_func=optimizer_func)
......@@ -99,7 +110,7 @@ def train(use_cuda, train_program, params_dirname):
feed_order = ['words', 'label']
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 10 == 0:
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=feed_order)
......@@ -110,7 +121,7 @@ def train(use_cuda, train_program, params_dirname):
print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, map(np.array, event.metrics)))
elif isinstance(event, fluid.EndEpochEvent):
elif isinstance(event, EndEpochEvent):
trainer.save_params(params_dirname)
trainer.train(
......@@ -124,7 +135,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
word_dict = paddle.dataset.imdb.word_dict()
inferencer = fluid.Inferencer(
inferencer = Inferencer(
infer_func=partial(inference_program, word_dict),
param_path=params_dirname,
place=place)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册