提交 89c0c006 编写于 作者: R root

delete untracked file

上级 1685f671
......@@ -104,6 +104,16 @@ import paddle
import paddle.fluid as fluid
import numpy
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
我们通过uci_housing模块引入了数据集合[UCI Housing Data Set](https://archive.ics.uci.edu/ml/datasets/Housing)
......@@ -167,7 +177,7 @@ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
训练器会读入一个训练程序和一些必要的其他参数:
```python
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program,
place=place,
optimizer_func=optimizer_program)
......@@ -186,22 +196,28 @@ feed_order=['x', 'y']
# Specify the directory to save the parameters
params_dirname = "fit_a_line.inference.model"
# Plot data
from paddle.utils import Ploter
train_title = "Train cost"
test_title = "Test cost"
plot_cost = Ploter(train_title, test_title)
step = 0
# event_handler prints training and testing info
def event_handler(event):
def event_handler_plot(event):
global step
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if step % 10 == 0: # record a train cost every 10 batches
print("%s, Step %d, Cost %f" % (train_title, step, event.metrics[0]))
plot_cost.append(train_title, step, event.metrics[0])
if step % 100 == 0: # record a test cost every 100 batches
test_metrics = trainer.test(
reader=test_reader, feed_order=feed_order)
print("%s, Step %d, Cost %f" % (test_title, step, test_metrics[0]))
plot_cost.append(test_title, step, test_metrics[0])
plot_cost.plot()
if test_metrics[0] < 10.0:
# If the accuracy is good enough, we can stop the training.
......@@ -209,7 +225,7 @@ def event_handler(event):
trainer.stop()
step += 1
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
if event.epoch % 10 == 0:
# We can save the trained parameters for the inferences later
if params_dirname is not None:
......@@ -226,7 +242,7 @@ def event_handler(event):
trainer.train(
reader=train_reader,
num_epochs=100,
event_handler=event_handler,
event_handler=event_handler_plot,
feed_order=feed_order)
```
......@@ -249,7 +265,7 @@ def inference_program():
预测器会从`params_dirname`中读取已经训练好的模型,来对从未遇见过的数据进行预测。
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
batch_size = 10
......
......@@ -115,6 +115,16 @@ import paddle
import paddle.fluid as fluid
import numpy
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
We encapsulated the [UCI Housing Data Set](https://archive.ics.uci.edu/ml/datasets/Housing) in our Python module `uci_housing`. This module can
......@@ -180,7 +190,7 @@ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
The trainer will take the `train_program` as input.
```python
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program,
place=place,
optimizer_func=optimizer_program)
......@@ -202,24 +212,28 @@ Moreover, an event handler is provided to print the training progress:
# Specify the directory to save the parameters
params_dirname = "fit_a_line.inference.model"
# Plot data
from paddle.utils import Ploter
train_title = "Train cost"
test_title = "Test cost"
plot_cost = Ploter(train_title, test_title)
step = 0
# event_handler prints training and testing info
def event_handler(event):
def event_handler_plot(event):
global step
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if step % 10 == 0: # record a train cost every 10 batches
print("%s, Step %d, Cost %f" % (train_title, step, event.metrics[0]))
plot_cost.append(train_title, step, event.metrics[0])
if step % 100 == 0: # record a test cost every 100 batches
test_metrics = trainer.test(
reader=test_reader, feed_order=feed_order)
print("%s, Step %d, Cost %f" % (test_title, step, test_metrics[0]))
plot_cost.append(test_title, step, test_metrics[0])
plot_cost.plot()
if test_metrics[0] < 10.0:
print("%s, Step %d, Cost %f" % (test_title, step, test_metrics[0]))
# If the accuracy is good enough, we can stop the training.
print('loss is less than 10.0, stop')
trainer.stop()
......@@ -244,7 +258,7 @@ We now can start training by calling `trainer.train()`.
trainer.train(
reader=train_reader,
num_epochs=100,
event_handler=event_handler,
event_handler=event_handler_plot,
feed_order=feed_order)
```
......@@ -272,7 +286,7 @@ def inference_program():
Inferencer will load the trained model from `params_dirname` and use it to infer the unseen data.
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
batch_size = 10
......
......@@ -146,6 +146,16 @@ import paddle
import paddle.fluid as fluid
import numpy
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
我们通过uci_housing模块引入了数据集合[UCI Housing Data Set](https://archive.ics.uci.edu/ml/datasets/Housing)
......@@ -209,7 +219,7 @@ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
训练器会读入一个训练程序和一些必要的其他参数:
```python
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program,
place=place,
optimizer_func=optimizer_program)
......@@ -228,22 +238,28 @@ feed_order=['x', 'y']
# Specify the directory to save the parameters
params_dirname = "fit_a_line.inference.model"
# Plot data
from paddle.utils import Ploter
train_title = "Train cost"
test_title = "Test cost"
plot_cost = Ploter(train_title, test_title)
step = 0
# event_handler prints training and testing info
def event_handler(event):
def event_handler_plot(event):
global step
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if step % 10 == 0: # record a train cost every 10 batches
print("%s, Step %d, Cost %f" % (train_title, step, event.metrics[0]))
plot_cost.append(train_title, step, event.metrics[0])
if step % 100 == 0: # record a test cost every 100 batches
test_metrics = trainer.test(
reader=test_reader, feed_order=feed_order)
print("%s, Step %d, Cost %f" % (test_title, step, test_metrics[0]))
plot_cost.append(test_title, step, test_metrics[0])
plot_cost.plot()
if test_metrics[0] < 10.0:
# If the accuracy is good enough, we can stop the training.
......@@ -251,7 +267,7 @@ def event_handler(event):
trainer.stop()
step += 1
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
if event.epoch % 10 == 0:
# We can save the trained parameters for the inferences later
if params_dirname is not None:
......@@ -268,7 +284,7 @@ def event_handler(event):
trainer.train(
reader=train_reader,
num_epochs=100,
event_handler=event_handler,
event_handler=event_handler_plot,
feed_order=feed_order)
```
......@@ -291,7 +307,7 @@ def inference_program():
预测器会从`params_dirname`中读取已经训练好的模型来对从未遇见过的数据进行预测
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
batch_size = 10
......
......@@ -157,6 +157,16 @@ import paddle
import paddle.fluid as fluid
import numpy
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
We encapsulated the [UCI Housing Data Set](https://archive.ics.uci.edu/ml/datasets/Housing) in our Python module `uci_housing`. This module can
......@@ -222,7 +232,7 @@ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
The trainer will take the `train_program` as input.
```python
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program,
place=place,
optimizer_func=optimizer_program)
......@@ -244,24 +254,28 @@ Moreover, an event handler is provided to print the training progress:
# Specify the directory to save the parameters
params_dirname = "fit_a_line.inference.model"
# Plot data
from paddle.utils import Ploter
train_title = "Train cost"
test_title = "Test cost"
plot_cost = Ploter(train_title, test_title)
step = 0
# event_handler prints training and testing info
def event_handler(event):
def event_handler_plot(event):
global step
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if step % 10 == 0: # record a train cost every 10 batches
print("%s, Step %d, Cost %f" % (train_title, step, event.metrics[0]))
plot_cost.append(train_title, step, event.metrics[0])
if step % 100 == 0: # record a test cost every 100 batches
test_metrics = trainer.test(
reader=test_reader, feed_order=feed_order)
print("%s, Step %d, Cost %f" % (test_title, step, test_metrics[0]))
plot_cost.append(test_title, step, test_metrics[0])
plot_cost.plot()
if test_metrics[0] < 10.0:
print("%s, Step %d, Cost %f" % (test_title, step, test_metrics[0]))
# If the accuracy is good enough, we can stop the training.
print('loss is less than 10.0, stop')
trainer.stop()
......@@ -286,7 +300,7 @@ We now can start training by calling `trainer.train()`.
trainer.train(
reader=train_reader,
num_epochs=100,
event_handler=event_handler,
event_handler=event_handler_plot,
feed_order=feed_order)
```
......@@ -314,7 +328,7 @@ def inference_program():
Inferencer will load the trained model from `params_dirname` and use it to infer the unseen data.
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
batch_size = 10
......
......@@ -69,25 +69,26 @@ feed_order = ['x', 'y']
# Specify the directory to save the parameters
params_dirname = "fit_a_line.inference.model"
from paddle.utils import Ploter
train_title = "Train cost"
test_title = "Test cost"
plot_cost = Ploter(train_title, test_title)
step = 0
# event_handler prints training and testing info
def event_handler(event):
def event_handler_plot(event):
global step
if isinstance(event, EndStepEvent):
if step % 10 == 0: # record a train cost every 10 batches
print("%s, Step %d, Cost %f" %
(train_title, step, event.metrics[0]))
plot_cost.append(train_title, step, event.metrics[0])
if step % 100 == 0: # record a test cost every 100 batches
test_metrics = trainer.test(
reader=test_reader, feed_order=feed_order)
print("%s, Step %d, Cost %f" % (test_title, step, test_metrics[0]))
plot_cost.append(test_title, step, test_metrics[0])
plot_cost.plot()
if test_metrics[0] < 10.0:
# If the accuracy is good enough, we can stop the training.
print('loss is less than 10.0, stop')
......@@ -105,7 +106,7 @@ def event_handler(event):
trainer.train(
reader=train_reader,
num_epochs=100,
event_handler=event_handler,
event_handler=event_handler_plot,
feed_order=feed_order)
......
......@@ -160,6 +160,15 @@ PaddlePaddle在API中提供了自动加载[MNIST](http://yann.lecun.com/exdb/mni
import paddle
import paddle.fluid as fluid
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
### Program Functions 配置
......@@ -278,7 +287,7 @@ test_reader = paddle.batch(
use_cuda = False # set to True if training with GPU
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_program)
```
......@@ -294,14 +303,14 @@ Fluid API 在训练期间为回调函数提供了一个钩子。用户能够通
params_dirname = "recognize_digits_network.inference.model"
lists = []
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
print("Pass %d, Batch %d, Cost %f" % (
event.step, event.epoch, event.metrics[0]))
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label'])
......@@ -332,14 +341,14 @@ lists = []
# event_handler to plot a figure
def event_handler_plot(event):
global step
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
# save parameters
trainer.save_params(params_dirname)
......@@ -392,7 +401,7 @@ Test with Epoch 0, avg_cost: 0.053097883707459624, acc: 0.9822850318471338
我们可以简单地插入在此之前定义的分类器。
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
# infer_func=softmax_regression, # uncomment for softmax regression
# infer_func=multilayer_perceptron, # uncomment for MLP
infer_func=convolutional_neural_network, # uncomment for LeNet5
......
......@@ -162,6 +162,15 @@ A PaddlePaddle program starts from importing the API package:
import paddle
import paddle.fluid as fluid
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
### Program Functions Configuration
......@@ -281,7 +290,7 @@ Now, we need to setup the trainer. The trainer need to take in `train_program`,
use_cuda = False # set to True if training with GPU
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_program)
```
......@@ -297,14 +306,14 @@ We will demonstrate two event handlers here. Please feel free to modify on the J
params_dirname = "recognize_digits_network.inference.model"
lists = []
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
print("Pass %d, Batch %d, Cost %f" % (
event.step, event.epoch, event.metrics[0]))
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label'])
......@@ -331,14 +340,14 @@ lists = []
# event_handler to plot a figure
def event_handler_plot(event):
global step
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
# save parameters
trainer.save_params(params_dirname)
......@@ -398,7 +407,7 @@ The `Inferencer` takes an `infer_func` and `param_path` to setup the network and
We can simply plug-in the classifier defined earlier here.
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
# infer_func=softmax_regression, # uncomment for softmax regression
# infer_func=multilayer_perceptron, # uncomment for MLP
infer_func=convolutional_neural_network, # uncomment for LeNet5
......
......@@ -202,6 +202,15 @@ PaddlePaddle在API中提供了自动加载[MNIST](http://yann.lecun.com/exdb/mni
import paddle
import paddle.fluid as fluid
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
### Program Functions 配置
......@@ -320,7 +329,7 @@ test_reader = paddle.batch(
use_cuda = False # set to True if training with GPU
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_program)
```
......@@ -336,14 +345,14 @@ Fluid API 在训练期间为回调函数提供了一个钩子。用户能够通
params_dirname = "recognize_digits_network.inference.model"
lists = []
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
print("Pass %d, Batch %d, Cost %f" % (
event.step, event.epoch, event.metrics[0]))
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label'])
......@@ -374,14 +383,14 @@ lists = []
# event_handler to plot a figure
def event_handler_plot(event):
global step
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
# save parameters
trainer.save_params(params_dirname)
......@@ -434,7 +443,7 @@ Test with Epoch 0, avg_cost: 0.053097883707459624, acc: 0.9822850318471338
我们可以简单地插入在此之前定义的分类器。
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
# infer_func=softmax_regression, # uncomment for softmax regression
# infer_func=multilayer_perceptron, # uncomment for MLP
infer_func=convolutional_neural_network, # uncomment for LeNet5
......
......@@ -204,6 +204,15 @@ A PaddlePaddle program starts from importing the API package:
import paddle
import paddle.fluid as fluid
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
### Program Functions Configuration
......@@ -323,7 +332,7 @@ Now, we need to setup the trainer. The trainer need to take in `train_program`,
use_cuda = False # set to True if training with GPU
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_program)
```
......@@ -339,14 +348,14 @@ We will demonstrate two event handlers here. Please feel free to modify on the J
params_dirname = "recognize_digits_network.inference.model"
lists = []
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
print("Pass %d, Batch %d, Cost %f" % (
event.step, event.epoch, event.metrics[0]))
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label'])
......@@ -373,14 +382,14 @@ lists = []
# event_handler to plot a figure
def event_handler_plot(event):
global step
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if step % 100 == 0:
# event.metrics maps with train program return arguments.
# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
# save parameters
trainer.save_params(params_dirname)
......@@ -440,7 +449,7 @@ The `Inferencer` takes an `infer_func` and `param_path` to setup the network and
We can simply plug-in the classifier defined earlier here.
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
# infer_func=softmax_regression, # uncomment for softmax regression
# infer_func=multilayer_perceptron, # uncomment for MLP
infer_func=convolutional_neural_network, # uncomment for LeNet5
......
......@@ -169,6 +169,15 @@ import paddle.fluid as fluid
import numpy
import sys
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
本教程中我们提供了VGG和ResNet两个模型的配置。
......@@ -346,7 +355,7 @@ def optimizer_program():
```python
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program,
optimizer_func=optimizer_program,
place=place)
......@@ -394,12 +403,12 @@ cost_ploter = Ploter(train_title, test_title)
step = 0
def event_handler_plot(event):
global step
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if step % 1 == 0:
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
avg_cost, accuracy = trainer.test(
reader=test_reader,
feed_order=['pixel', 'label'])
......@@ -417,7 +426,7 @@ params_dirname = "image_classification_resnet.inference.model"
# event handler to track training and testing process
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 100 == 0:
print("\nPass %d, Batch %d, Cost %f, Acc %f" %
(event.step, event.epoch, event.metrics[0],
......@@ -426,7 +435,7 @@ def event_handler(event):
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
# Test against with the test dataset to get accuracy.
avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label'])
......@@ -513,7 +522,7 @@ img = load_image(cur_dir + '/image/dog.png')
现在我们准备做预测。
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
label_list = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
# inference
......
......@@ -172,6 +172,15 @@ import paddle.fluid as fluid
import numpy
import sys
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
Now we are going to walk you through the implementations of the VGG and ResNet.
......@@ -348,7 +357,7 @@ Here we specify `Adam` optimization algorithm via `fluid.optimizer`.
```python
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program,
optimizer_func=optimizer_program,
place=place)
......@@ -392,12 +401,12 @@ cost_ploter = Ploter(train_title, test_title)
step = 0
def event_handler_plot(event):
global step
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if step % 1 == 0:
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
avg_cost, accuracy = trainer.test(
reader=test_reader,
feed_order=['pixel', 'label'])
......@@ -415,7 +424,7 @@ params_dirname = "image_classification_resnet.inference.model"
# event handler to track training and testing process
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 100 == 0:
print("\nPass %d, Batch %d, Cost %f, Acc %f" %
(event.step, event.epoch, event.metrics[0],
......@@ -424,7 +433,7 @@ def event_handler(event):
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
# Test against with the test dataset to get accuracy.
avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label'])
......@@ -512,7 +521,7 @@ We can simply plug-in the inference_program defined earlier here.
Now we are ready to do inference.
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
label_list = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
......
......@@ -211,6 +211,15 @@ import paddle.fluid as fluid
import numpy
import sys
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
本教程中我们提供了VGG和ResNet两个模型的配置。
......@@ -388,7 +397,7 @@ def optimizer_program():
```python
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program,
optimizer_func=optimizer_program,
place=place)
......@@ -436,12 +445,12 @@ cost_ploter = Ploter(train_title, test_title)
step = 0
def event_handler_plot(event):
global step
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if step % 1 == 0:
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
avg_cost, accuracy = trainer.test(
reader=test_reader,
feed_order=['pixel', 'label'])
......@@ -459,7 +468,7 @@ params_dirname = "image_classification_resnet.inference.model"
# event handler to track training and testing process
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 100 == 0:
print("\nPass %d, Batch %d, Cost %f, Acc %f" %
(event.step, event.epoch, event.metrics[0],
......@@ -468,7 +477,7 @@ def event_handler(event):
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
# Test against with the test dataset to get accuracy.
avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label'])
......@@ -555,7 +564,7 @@ img = load_image(cur_dir + '/image/dog.png')
现在我们准备做预测。
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
label_list = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
# inference
......
......@@ -214,6 +214,15 @@ import paddle.fluid as fluid
import numpy
import sys
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
Now we are going to walk you through the implementations of the VGG and ResNet.
......@@ -390,7 +399,7 @@ Here we specify `Adam` optimization algorithm via `fluid.optimizer`.
```python
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program,
optimizer_func=optimizer_program,
place=place)
......@@ -434,12 +443,12 @@ cost_ploter = Ploter(train_title, test_title)
step = 0
def event_handler_plot(event):
global step
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if step % 1 == 0:
cost_ploter.append(train_title, step, event.metrics[0])
cost_ploter.plot()
step += 1
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
avg_cost, accuracy = trainer.test(
reader=test_reader,
feed_order=['pixel', 'label'])
......@@ -457,7 +466,7 @@ params_dirname = "image_classification_resnet.inference.model"
# event handler to track training and testing process
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 100 == 0:
print("\nPass %d, Batch %d, Cost %f, Acc %f" %
(event.step, event.epoch, event.metrics[0],
......@@ -466,7 +475,7 @@ def event_handler(event):
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, fluid.contrib.trainer.EndEpochEvent):
if isinstance(event, EndEpochEvent):
# Test against with the test dataset to get accuracy.
avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label'])
......@@ -554,7 +563,7 @@ We can simply plug-in the inference_program defined earlier here.
Now we are ready to do inference.
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
label_list = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
......
......@@ -211,6 +211,15 @@ import os
import six
import sys
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
然后,定义参数:
......@@ -310,7 +319,7 @@ def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
# We output cost every 10 steps.
if event.step % 10 == 0:
outs = trainer.test(
......@@ -330,7 +339,7 @@ def train(use_cuda, train_program, params_dirname):
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program,
optimizer_func=optimizer_func,
place=place)
......@@ -361,7 +370,7 @@ Step 20: Average Cost 5.766995
```python
def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
......
......@@ -224,6 +224,15 @@ import os
import six
import sys
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
- Configure parameters and build word dictionary.
......@@ -322,7 +331,7 @@ def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
outs = trainer.test(
reader=test_reader,
feed_order=['firstw', 'secondw', 'thirdw', 'fourthw', 'nextw'])
......@@ -342,9 +351,9 @@ def train(use_cuda, train_program, params_dirname):
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program,
# Note here we need to choose more sophisticated optimizer
# Note here we need to chse more sophisticated optimizer
# such as AdaGrad with a decay rate. The normal SGD converges
# very slowly.
# optimizer=fluid.optimizer.SGD(learning_rate=0.001),
......@@ -379,7 +388,7 @@ We can use our trained model to predict the next word given its previous N-gram.
```python
def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
......
......@@ -253,6 +253,15 @@ import os
import six
import sys
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
然后,定义参数:
......@@ -352,7 +361,7 @@ def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
# We output cost every 10 steps.
if event.step % 10 == 0:
outs = trainer.test(
......@@ -372,7 +381,7 @@ def train(use_cuda, train_program, params_dirname):
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program,
optimizer_func=optimizer_func,
place=place)
......@@ -403,7 +412,7 @@ Step 20: Average Cost 5.766995
```python
def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
......
......@@ -266,6 +266,15 @@ import os
import six
import sys
from __future__ import print_function
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
```
- Configure parameters and build word dictionary.
......@@ -364,7 +373,7 @@ def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
outs = trainer.test(
reader=test_reader,
feed_order=['firstw', 'secondw', 'thirdw', 'fourthw', 'nextw'])
......@@ -384,9 +393,9 @@ def train(use_cuda, train_program, params_dirname):
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program,
# Note here we need to choose more sophisticated optimizer
# Note here we need to chse more sophisticated optimizer
# such as AdaGrad with a decay rate. The normal SGD converges
# very slowly.
# optimizer=fluid.optimizer.SGD(learning_rate=0.001),
......@@ -421,7 +430,7 @@ We can use our trained model to predict the next word given its previous N-gram.
```python
def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
......
......@@ -38,7 +38,7 @@ HIDDEN_SIZE = 256
N = 5
BATCH_SIZE = 100
use_cuda = False # set to True if training with GPU
use_cuda = True # set to True if training with GPU
word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)
......
......@@ -225,6 +225,15 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
IS_SPARSE = True
USE_GPU = False
......@@ -409,7 +418,7 @@ test_reader = paddle.batch(
训练器需要一个训练程序和一个训练优化函数。
```python
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func)
```
......@@ -437,7 +446,7 @@ plot_cost = Ploter(test_title)
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
avg_cost_set = trainer.test(
reader=test_reader, feed_order=feed_order)
......@@ -473,7 +482,7 @@ trainer.train(
传入`inference_program``params_dirname`来初始化一个预测器, `params_dirname`用来存放训练过程中的各个参数。
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
inference_program, param_path=params_dirname, place=place)
```
......
......@@ -193,6 +193,15 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
IS_SPARSE = True
USE_GPU = False
......@@ -382,7 +391,7 @@ test_reader = paddle.batch(
Create a trainer that takes `train_program` as input and specify optimizer function.
```python
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func)
```
......@@ -412,7 +421,7 @@ plot_cost = Ploter(test_title)
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
avg_cost_set = trainer.test(
reader=test_reader, feed_order=feed_order)
......@@ -451,7 +460,7 @@ trainer.train(
Initialize Inferencer with `inference_program` and `params_dirname` which is where we save params from training.
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
inference_program, param_path=params_dirname, place=place)
```
......
......@@ -267,6 +267,15 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
IS_SPARSE = True
USE_GPU = False
......@@ -451,7 +460,7 @@ test_reader = paddle.batch(
训练器需要一个训练程序和一个训练优化函数。
```python
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func)
```
......@@ -479,7 +488,7 @@ plot_cost = Ploter(test_title)
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
avg_cost_set = trainer.test(
reader=test_reader, feed_order=feed_order)
......@@ -515,7 +524,7 @@ trainer.train(
传入`inference_program`和`params_dirname`来初始化一个预测器, `params_dirname`用来存放训练过程中的各个参数。
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
inference_program, param_path=params_dirname, place=place)
```
......
......@@ -235,6 +235,15 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
IS_SPARSE = True
USE_GPU = False
......@@ -424,7 +433,7 @@ test_reader = paddle.batch(
Create a trainer that takes `train_program` as input and specify optimizer function.
```python
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func)
```
......@@ -454,7 +463,7 @@ plot_cost = Ploter(test_title)
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
avg_cost_set = trainer.test(
reader=test_reader, feed_order=feed_order)
......@@ -493,7 +502,7 @@ trainer.train(
Initialize Inferencer with `inference_program` and `params_dirname` which is where we save params from training.
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
inference_program, param_path=params_dirname, place=place)
```
......
......@@ -112,6 +112,15 @@ import paddle
import paddle.fluid as fluid
from functools import partial
import numpy as np
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
CLASS_DIM = 2
EMB_DIM = 128
......@@ -249,7 +258,7 @@ train_reader = paddle.batch(
训练器需要一个训练程序和一个训练优化函数。
```python
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=partial(train_program, word_dict),
place=place,
optimizer_func=optimizer_func)
......@@ -272,7 +281,7 @@ feed_order = ['words', 'label']
params_dirname = "understand_sentiment_conv.inference.model"
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, list(map(np.array, event.metrics))))
......@@ -300,7 +309,7 @@ trainer.train(
传入`inference_program``params_dirname`来初始化一个预测器, `params_dirname`用来存放训练过程中的各个参数。
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=partial(inference_program, word_dict), param_path=params_dirname, place=place)
```
......
......@@ -108,6 +108,15 @@ import paddle
import paddle.fluid as fluid
from functools import partial
import numpy as np
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
CLASS_DIM = 2
EMB_DIM = 128
......@@ -255,7 +264,7 @@ train_reader = paddle.batch(
Create a trainer that takes `train_program` as input and specify optimizer function.
```python
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=partial(train_program, word_dict),
place=place,
optimizer_func=optimizer_func)
......@@ -279,7 +288,7 @@ For example, we can check the cost by `trainer.test` when `EndStepEvent` occurs
params_dirname = "understand_sentiment_conv.inference.model"
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, list(map(np.array, event.metrics))))
......@@ -307,7 +316,7 @@ trainer.train(
Initialize Inferencer with `inference_program` and `params_dirname` which is where we save params from training.
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=partial(inference_program, word_dict),
param_path=params_dirname,
place=place)
......
......@@ -154,6 +154,15 @@ import paddle
import paddle.fluid as fluid
from functools import partial
import numpy as np
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
CLASS_DIM = 2
EMB_DIM = 128
......@@ -291,7 +300,7 @@ train_reader = paddle.batch(
训练器需要一个训练程序和一个训练优化函数。
```python
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=partial(train_program, word_dict),
place=place,
optimizer_func=optimizer_func)
......@@ -314,7 +323,7 @@ feed_order = ['words', 'label']
params_dirname = "understand_sentiment_conv.inference.model"
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, list(map(np.array, event.metrics))))
......@@ -342,7 +351,7 @@ trainer.train(
传入`inference_program`和`params_dirname`来初始化一个预测器, `params_dirname`用来存放训练过程中的各个参数。
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=partial(inference_program, word_dict), param_path=params_dirname, place=place)
```
......
......@@ -150,6 +150,15 @@ import paddle
import paddle.fluid as fluid
from functools import partial
import numpy as np
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
CLASS_DIM = 2
EMB_DIM = 128
......@@ -297,7 +306,7 @@ train_reader = paddle.batch(
Create a trainer that takes `train_program` as input and specify optimizer function.
```python
trainer = fluid.contrib.trainer.Trainer(
trainer = Trainer(
train_func=partial(train_program, word_dict),
place=place,
optimizer_func=optimizer_func)
......@@ -321,7 +330,7 @@ For example, we can check the cost by `trainer.test` when `EndStepEvent` occurs
params_dirname = "understand_sentiment_conv.inference.model"
def event_handler(event):
if isinstance(event, fluid.contrib.trainer.EndStepEvent):
if isinstance(event, EndStepEvent):
print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, list(map(np.array, event.metrics))))
......@@ -349,7 +358,7 @@ trainer.train(
Initialize Inferencer with `inference_program` and `params_dirname` which is where we save params from training.
```python
inferencer = fluid.contrib.inferencer.Inferencer(
inferencer = Inferencer(
infer_func=partial(inference_program, word_dict),
param_path=params_dirname,
place=place)
......
......@@ -150,6 +150,15 @@ import paddle.fluid.layers as pd
from paddle.fluid.executor import Executor
from functools import partial
import os
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
dict_size = 30000
source_dict_dim = target_dict_dim = dict_size
......@@ -338,7 +347,7 @@ train_reader = paddle.batch(
```python
is_sparse = False
trainer = fluid.Trainer(
trainer = Trainer(
train_func=partial(train_program, is_sparse),
place=place,
optimizer_func=optimizer_func)
......@@ -359,7 +368,7 @@ feed_order = [
```python
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 10 == 0:
print('pass_id=' + str(event.epoch) + ' batch=' + str(event.step))
......
......@@ -180,6 +180,15 @@ import paddle.fluid.layers as pd
from paddle.fluid.executor import Executor
from functools import partial
import os
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
dict_size = 30000
source_dict_dim = target_dict_dim = dict_size
......@@ -374,7 +383,7 @@ Create a trainer that takes `train_program` as input and specify optimizer funct
```python
is_sparse = False
trainer = fluid.Trainer(
trainer = Trainer(
train_func=partial(train_program, is_sparse),
place=place,
optimizer_func=optimizer_func)
......@@ -397,7 +406,7 @@ For example, we can check the cost by `trainer.test` when `EndStepEvent` occurs
```python
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 10 == 0:
print('pass_id=' + str(event.epoch) + ' batch=' + str(event.step))
......
......@@ -192,6 +192,15 @@ import paddle.fluid.layers as pd
from paddle.fluid.executor import Executor
from functools import partial
import os
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
dict_size = 30000
source_dict_dim = target_dict_dim = dict_size
......@@ -380,7 +389,7 @@ train_reader = paddle.batch(
```python
is_sparse = False
trainer = fluid.Trainer(
trainer = Trainer(
train_func=partial(train_program, is_sparse),
place=place,
optimizer_func=optimizer_func)
......@@ -401,7 +410,7 @@ feed_order = [
```python
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 10 == 0:
print('pass_id=' + str(event.epoch) + ' batch=' + str(event.step))
......
......@@ -222,6 +222,15 @@ import paddle.fluid.layers as pd
from paddle.fluid.executor import Executor
from functools import partial
import os
try:
from paddle.fluid.contrib.trainer import *
from paddle.fluid.contrib.inferencer import *
except ImportError:
print(
"In the fluid 1.0, the trainer and inferencer are moving to paddle.fluid.contrib",
file=sys.stderr)
from paddle.fluid.trainer import *
from paddle.fluid.inferencer import *
dict_size = 30000
source_dict_dim = target_dict_dim = dict_size
......@@ -416,7 +425,7 @@ Create a trainer that takes `train_program` as input and specify optimizer funct
```python
is_sparse = False
trainer = fluid.Trainer(
trainer = Trainer(
train_func=partial(train_program, is_sparse),
place=place,
optimizer_func=optimizer_func)
......@@ -439,7 +448,7 @@ For example, we can check the cost by `trainer.test` when `EndStepEvent` occurs
```python
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if isinstance(event, EndStepEvent):
if event.step % 10 == 0:
print('pass_id=' + str(event.epoch) + ' batch=' + str(event.step))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册