未验证 提交 787a4d8e 编写于 作者: C Chen Weihang 提交者: GitHub

Update save/load configs & 2.0 examples (#2553)

* update configs & 2.0 examples

* move apis directory

* update api names
上级 256421b2
...@@ -4,7 +4,7 @@ load ...@@ -4,7 +4,7 @@ load
---- ----
.. py:function:: paddle.load(model_path, configs=None) .. py:function:: paddle.load(model_path, config=None)
:api_attr: 命令式编程模式(动态图) :api_attr: 命令式编程模式(动态图)
...@@ -15,7 +15,7 @@ load ...@@ -15,7 +15,7 @@ load
参数: 参数:
- **model_path** (str) – 保存state_dict的文件前缀。该路径不应该包括后缀 ``.pdparams`` 或 ``.pdopt``。 - **model_path** (str) – 保存state_dict的文件前缀。该路径不应该包括后缀 ``.pdparams`` 或 ``.pdopt``。
- **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象,这些选项主要是用于兼容 ``paddle.io.save_inference_model`` 存储模型的格式。默认为 ``None``。 - **config** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象,这些选项主要是用于兼容 ``paddle.io.save_inference_model`` 存储模型的格式。默认为 ``None``。
返回: 两个 ``dict`` ,即从文件中恢复的模型参数 ``dict`` 和优化器参数 ``dict``,如果只找到其中一个的存储文件,另一个返回None 返回: 两个 ``dict`` ,即从文件中恢复的模型参数 ``dict`` 和优化器参数 ``dict``,如果只找到其中一个的存储文件,另一个返回None
......
...@@ -13,56 +13,60 @@ SaveLoadConfig ...@@ -13,56 +13,60 @@ SaveLoadConfig
.. code-block:: python .. code-block:: python
import numpy as np import paddle
import paddle.fluid as fluid import paddle.nn as nn
from paddle.fluid.dygraph import Linear import paddle.optimizer as opt
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer): class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size): def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__() super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size) self._linear = nn.Linear(in_size, out_size)
@declarative
@paddle.jit.to_static
def forward(self, x): def forward(self, x):
y = self._linear(x) y = self._linear(x)
z = self._linear(y) z = self._linear(y)
return z return z
# 开启命令式编程模式
fluid.enable_dygraph() # enable dygraph mode
# 训练模型 paddle.disable_static()
# train model
net = SimpleNet(8, 8) net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) x = paddle.randn([4, 8], 'float32')
for i in range(10): for i in range(10):
out = net(x) out = net(x)
loss = fluid.layers.mean(out) loss = paddle.tensor.mean(out)
loss.backward() loss.backward()
adam.minimize(loss) adam.step()
net.clear_gradients() adam.clear_grad()
# 在存储模型时使用SaveLoadConfig
# use SaveLoadconfig when saving model
model_path = "simplenet.example.model" model_path = "simplenet.example.model"
configs = fluid.dygraph.jit.SaveLoadConfig() config = paddle.SaveLoadConfig()
configs.model_filename = "__simplenet__" config.model_filename = "__simplenet__"
fluid.dygraph.jit.save( paddle.jit.save(
layer=net, layer=net,
model_path=model_path, model_path=model_path,
input_spec=[x], config=config)
configs=configs)
2. 在载入模型时使用 ``SaveLoadConfig`` 2. 在载入模型时使用 ``SaveLoadConfig``
.. code-block:: python .. code-block:: python
import numpy as np import paddle
import paddle.fluid as fluid
# 开启命令式编程模式 # enable dygraph mode
fluid.enable_dygraph() paddle.disable_static()
# 在载入模型时使用SaveLoadconfig
# use SaveLoadconfig when loading model
model_path = "simplenet.example.model" model_path = "simplenet.example.model"
configs = fluid.dygraph.jit.SaveLoadConfig() config = paddle.SaveLoadConfig()
configs.model_filename = "__simplenet__" config.model_filename = "__simplenet__"
infer_net = fluid.dygraph.jit.load(model_path, configs=configs) infer_net = paddle.jit.load(model_path, config=config)
# 预测 # inference
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) x = paddle.randn([4, 8], 'float32')
pred = infer_net(x) pred = infer_net(x)
属性 属性
...@@ -82,47 +86,50 @@ SaveLoadConfig ...@@ -82,47 +86,50 @@ SaveLoadConfig
**示例代码:** **示例代码:**
.. code-block:: python .. code-block:: python
import numpy as np import paddle
import paddle.fluid as fluid import paddle.nn as nn
from paddle.fluid.dygraph import Linear import paddle.optimizer as opt
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer): class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size): def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__() super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size) self._linear = nn.Linear(in_size, out_size)
@declarative
@paddle.jit.to_static
def forward(self, x): def forward(self, x):
y = self._linear(x) y = self._linear(x)
z = self._linear(y) z = self._linear(y)
loss = fluid.layers.mean(z) loss = paddle.tensor.mean(z)
return z, loss return z, loss
# 开启命令式编程模式
fluid.enable_dygraph() # enable dygraph mode
# 训练模型 paddle.disable_static()
# train model
net = SimpleNet(8, 8) net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) x = paddle.randn([4, 8], 'float32')
for i in range(10): for i in range(10):
out, loss = net(x) out, loss = net(x)
loss.backward() loss.backward()
adam.minimize(loss) adam.step()
net.clear_gradients() adam.clear_grad()
# 使用SaveLoadconfig.output_spec
# use SaveLoadconfig.output_spec
model_path = "simplenet.example.model.output_spec" model_path = "simplenet.example.model.output_spec"
configs = fluid.dygraph.jit.SaveLoadConfig() config = paddle.SaveLoadConfig()
# 仅在存储模型中保留预测结果,丢弃loss config.output_spec = [out]
configs.output_spec = [out] paddle.jit.save(
fluid.dygraph.jit.save(
layer=net, layer=net,
model_path=model_path, model_path=model_path,
input_spec=[x], config=config)
configs=configs)
infer_net = fluid.dygraph.jit.load(model_path, configs=configs) infer_net = paddle.jit.load(model_path)
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) x = paddle.randn([4, 8], 'float32')
# 仅有预测结果输出
pred = infer_net(x) pred = infer_net(x)
.. py:attribute:: model_filename .. py:attribute:: model_filename
存储转写 :ref:`cn_api_fluid_dygraph_Layer` 模型结构 ``Program`` 的文件名称。默认文件名为 ``__model__``。 存储转写 :ref:`cn_api_fluid_dygraph_Layer` 模型结构 ``Program`` 的文件名称。默认文件名为 ``__model__``。
...@@ -130,45 +137,47 @@ SaveLoadConfig ...@@ -130,45 +137,47 @@ SaveLoadConfig
**示例代码** **示例代码**
.. code-block:: python .. code-block:: python
import numpy as np import paddle
import paddle.fluid as fluid import paddle.nn as nn
from paddle.fluid.dygraph import Linear import paddle.optimizer as opt
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer): class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size): def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__() super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size) self._linear = nn.Linear(in_size, out_size)
@declarative
@paddle.jit.to_static
def forward(self, x): def forward(self, x):
y = self._linear(x) y = self._linear(x)
z = self._linear(y) z = self._linear(y)
return z return z
# 开启命令式编程模式
fluid.enable_dygraph() # enable dygraph mode
# 训练模型 paddle.disable_static()
# train model
net = SimpleNet(8, 8) net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) x = paddle.randn([4, 8], 'float32')
for i in range(10): for i in range(10):
out = net(x) out = net(x)
loss = fluid.layers.mean(out) loss = paddle.tensor.mean(out)
loss.backward() loss.backward()
adam.minimize(loss) adam.step()
net.clear_gradients() adam.clear_grad()
# saving with configs.model_filename
model_path = "simplenet.example.model.model_filename" model_path = "simplenet.example.model.model_filename"
configs = fluid.dygraph.jit.SaveLoadConfig() config = paddle.SaveLoadConfig()
configs.model_filename = "__simplenet__" config.model_filename = "__simplenet__"
# 配置configs.model_filename存储模型 paddle.jit.save(
fluid.dygraph.jit.save(
layer=net, layer=net,
model_path=model_path, model_path=model_path,
input_spec=[x], config=config)
configs=configs)
# [结果] 存储模型目录文件包括: # loading with configs.model_filename
# __simplenet__ __variables__ __variables.info__ infer_net = paddle.jit.load(model_path, config=config)
# 配置configs.model_filename载入模型 x = paddle.randn([4, 8], 'float32')
infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
pred = infer_net(x) pred = infer_net(x)
...@@ -179,45 +188,48 @@ SaveLoadConfig ...@@ -179,45 +188,48 @@ SaveLoadConfig
**示例代码** **示例代码**
.. code-block:: python .. code-block:: python
import numpy as np import paddle
import paddle.fluid as fluid import paddle.nn as nn
from paddle.fluid.dygraph import Linear import paddle.optimizer as opt
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer): class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size): def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__() super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size) self._linear = nn.Linear(in_size, out_size)
@declarative
@paddle.jit.to_static
def forward(self, x): def forward(self, x):
y = self._linear(x) y = self._linear(x)
z = self._linear(y) z = self._linear(y)
return z return z
# 开启命令式编程模式
fluid.enable_dygraph() # enable dygraph mode
# 训练模型 paddle.disable_static()
# train model
net = SimpleNet(8, 8) net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) x = paddle.randn([4, 8], 'float32')
for i in range(10): for i in range(10):
out = net(x) out = net(x)
loss = fluid.layers.mean(out) loss = paddle.tensor.mean(out)
loss.backward() loss.backward()
adam.minimize(loss) adam.step()
net.clear_gradients() adam.clear_grad()
model_path = "simplenet.example.model.params_filename" model_path = "simplenet.example.model.params_filename"
configs = fluid.dygraph.jit.SaveLoadConfig() config = paddle.SaveLoadConfig()
configs.params_filename = "__params__" config.params_filename = "__params__"
# 配置configs.params_filename存储模型
fluid.dygraph.jit.save( # saving with configs.params_filename
paddle.jit.save(
layer=net, layer=net,
model_path=model_path, model_path=model_path,
input_spec=[x], config=config)
configs=configs)
# [结果] 存储模型目录文件包括: # loading with configs.params_filename
# __model__ __params__ __variables.info__ infer_net = paddle.jit.load(model_path, config=config)
# 配置configs.params_filename载入模型 x = paddle.randn([4, 8], 'float32')
infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
pred = infer_net(x) pred = infer_net(x)
...@@ -231,45 +243,50 @@ SaveLoadConfig ...@@ -231,45 +243,50 @@ SaveLoadConfig
**示例代码** **示例代码**
.. code-block:: python .. code-block:: python
import numpy as np import paddle
import paddle.fluid as fluid import paddle.nn as nn
from paddle.fluid.dygraph import Linear import paddle.optimizer as opt
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer): class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size): def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__() super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size) self._linear = nn.Linear(in_size, out_size)
@declarative
@paddle.jit.to_static
def forward(self, x): def forward(self, x):
y = self._linear(x) y = self._linear(x)
z = self._linear(y) z = self._linear(y)
return z return z
# 开启命令式编程模式
fluid.enable_dygraph() # enable dygraph mode
# 训练模型 paddle.disable_static()
# train model
net = SimpleNet(8, 8) net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) x = paddle.randn([4, 8], 'float32')
for i in range(10): for i in range(10):
out = net(x) out = net(x)
loss = fluid.layers.mean(out) loss = paddle.tensor.mean(out)
loss.backward() loss.backward()
adam.minimize(loss) adam.step()
net.clear_gradients() adam.clear_grad()
model_path = "simplenet.example.model.separate_params" model_path = "simplenet.example.model.separate_params"
configs = fluid.dygraph.jit.SaveLoadConfig() config = paddle.jit.SaveLoadConfig()
configs.separate_params = True config.separate_params = True
# 配置configs.separate_params存储模型
fluid.dygraph.jit.save( # saving with configs.separate_params
paddle.jit.save(
layer=net, layer=net,
model_path=model_path, model_path=model_path,
input_spec=[x], config=config)
configs=configs) # [result] the saved model directory contains:
# [结果] 存储模型目录文件包括:
# linear_0.b_0 linear_0.w_0 __model__ __variables.info__ # linear_0.b_0 linear_0.w_0 __model__ __variables.info__
# 配置configs.params_filename载入模型
infer_net = fluid.dygraph.jit.load(model_path, configs=configs) # loading with configs.params_filename
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) infer_net = paddle.jit.load(model_path, config=config)
x = paddle.randn([4, 8], 'float32')
pred = infer_net(x) pred = infer_net(x)
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
load load
----------------- -----------------
.. py:function:: paddle.fluid.dygraph.jit.load(model_path, configs=None) .. py:function:: paddle.jit.load(model_path, config=None)
:api_attr: 命令式编程模式(动态图) :api_attr: 命令式编程模式(动态图)
...@@ -19,7 +19,7 @@ load ...@@ -19,7 +19,7 @@ load
参数: 参数:
- **model_path** (str) - 存储模型的目录。 - **model_path** (str) - 存储模型的目录。
- **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 - **config** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。
返回:TranslatedLayer - 一个能够执行存储模型的 ``Layer`` 对象。 返回:TranslatedLayer - 一个能够执行存储模型的 ``Layer`` 对象。
...@@ -30,77 +30,92 @@ load ...@@ -30,77 +30,92 @@ load
.. code-block:: python .. code-block:: python
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle
from paddle.fluid.dygraph import Linear import paddle.nn as nn
from paddle.fluid.dygraph import declarative import paddle.optimizer as opt
BATCH_SIZE = 32
BATCH_NUM = 20 BATCH_SIZE = 16
def random_batch_reader(): BATCH_NUM = 4
def _get_random_images_and_labels(image_shape, label_shape): EPOCH_NUM = 4
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64') IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label return image, label
def __reader__():
for _ in range(BATCH_NUM): def __len__(self):
batch_image, batch_label = _get_random_images_and_labels( return self.num_samples
[BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label class LinearNet(nn.Layer):
return __reader__ def __init__(self):
class LinearNet(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(LinearNet, self).__init__() super(LinearNet, self).__init__()
self._linear = Linear(in_size, out_size) self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@declarative
@paddle.jit.to_static
def forward(self, x): def forward(self, x):
return self._linear(x) return self._linear(x)
# 开启命令式编程模式
fluid.enable_dygraph() def train(layer, loader, loss_fn, opt):
# 1. 训练存储模型. for epoch_id in range(EPOCH_NUM):
# 创建网络 for batch_id, (image, label) in enumerate(loader()):
net = LinearNet(784, 1) out = layer(image)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) loss = loss_fn(out, label)
# 创建DataLoader loss.backward()
train_loader = fluid.io.DataLoader.from_generator(capacity=5) opt.step()
train_loader.set_batch_generator(random_batch_reader()) opt.clear_grad()
# 训练 print("Epoch {} batch {}: loss = {}".format(
for data in train_loader(): epoch_id, batch_id, np.mean(loss.numpy())))
img, label = data
label.stop_gradient = True # enable dygraph mode
cost = net(img) place = paddle.CPUPlace()
loss = fluid.layers.cross_entropy(cost, label) paddle.disable_static(place)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward() # 1. train & save model.
adam.minimize(avg_loss)
net.clear_gradients() # create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# train
train(layer, loader, loss_fn, adam)
# save
model_path = "linear.example.model" model_path = "linear.example.model"
fluid.dygraph.jit.save( paddle.jit.save(layer, model_path)
layer=net,
model_path=model_path, # 2. load model
input_spec=[img])
# 2. 载入模型 & 预测 # load
# 载入模型 loaded_layer = paddle.jit.load(model_path)
infer_net = fluid.dygraph.jit.load(model_path)
# 预测 # inference
x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) loaded_layer.eval()
pred = infer_net(x) x = paddle.randn([1, IMAGE_SIZE], 'float32')
# 3. 载入模型 & fine-tune训练 pred = loaded_layer(x)
# 载入模型
train_net = fluid.dygraph.jit.load(model_path) # fine-tune
train_net.train() loaded_layer.train()
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=train_net.parameters()) adam = opt.Adam(learning_rate=0.001, parameters=loaded_layer.parameters())
# 创建DataLoader train(loaded_layer, loader, loss_fn, adam)
train_loader = fluid.io.DataLoader.from_generator(capacity=5)
train_loader.set_batch_generator(random_batch_reader())
# fine-tune训练
for data in train_loader():
img, label = data
label.stop_gradient = True
cost = train_net(img)
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
train_net.clear_gradients()
2. 载入由接口 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型进行预测推理及fine-tune训练。 2. 载入由接口 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型进行预测推理及fine-tune训练。
...@@ -108,61 +123,95 @@ load ...@@ -108,61 +123,95 @@ load
.. code-block:: python .. code-block:: python
import numpy as np import numpy as np
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
BATCH_SIZE = 32 import paddle.nn as nn
BATCH_NUM = 20 import paddle.optimizer as opt
def random_batch_reader():
def _get_random_images_and_labels(image_shape, label_shape): BATCH_SIZE = 16
image = np.random.random(size=image_shape).astype('float32') BATCH_NUM = 4
label = np.random.random(size=label_shape).astype('int64') EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label return image, label
def __reader__():
for _ in range(BATCH_NUM): def __len__(self):
batch_image, batch_label = _get_random_images_and_labels( return self.num_samples
[BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label image = fluid.data(name='image', shape=[None, 784], dtype='float32')
return __reader__
img = fluid.data(name='img', shape=[None, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64') label = fluid.data(name='label', shape=[None, 1], dtype='int64')
pred = fluid.layers.fc(input=img, size=10, act='softmax') pred = fluid.layers.fc(input=image, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=pred, label=label) loss = fluid.layers.cross_entropy(input=pred, label=label)
avg_loss = fluid.layers.mean(loss) avg_loss = fluid.layers.mean(loss)
optimizer = fluid.optimizer.SGD(learning_rate=0.001) optimizer = fluid.optimizer.SGD(learning_rate=0.001)
optimizer.minimize(avg_loss) optimizer.minimize(avg_loss)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
loader = fluid.io.DataLoader.from_generator(
feed_list=[img, label], capacity=5, iterable=True) # create data loader
loader.set_batch_generator(random_batch_reader(), places=place) dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
# 1. 训练 & 存储预测模型 loader = paddle.io.DataLoader(dataset,
feed_list=[image, label],
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# 1. train and save inference model
for data in loader(): for data in loader():
exe.run( exe.run(
fluid.default_main_program(), fluid.default_main_program(),
feed=data, feed=data,
fetch_list=[avg_loss]) fetch_list=[avg_loss])
model_path = "fc.example.model" model_path = "fc.example.model"
fluid.io.save_inference_model( fluid.io.save_inference_model(
model_path, ["img"], [pred], exe) model_path, ["image"], [pred], exe)
# 开启命令式编程模式
fluid.enable_dygraph() # 2. load model
# 2. 载入模型 & 预测
fc = fluid.dygraph.jit.load(model_path) # enable dygraph mode
x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) paddle.disable_static(place)
# load
fc = paddle.jit.load(model_path)
# inference
fc.eval()
x = paddle.randn([1, IMAGE_SIZE], 'float32')
pred = fc(x) pred = fc(x)
# 3. 载入模型 & fine-tune训练
fc = fluid.dygraph.jit.load(model_path) # fine-tune
fc.train() fc.train()
sgd = fluid.optimizer.SGD(learning_rate=0.001, loss_fn = nn.CrossEntropyLoss()
parameter_list=fc.parameters()) adam = opt.Adam(learning_rate=0.001, parameters=fc.parameters())
train_loader = fluid.io.DataLoader.from_generator(capacity=5) loader = paddle.io.DataLoader(dataset,
train_loader.set_batch_generator( places=place,
random_batch_reader(), places=place) batch_size=BATCH_SIZE,
for data in train_loader(): shuffle=True,
img, label = data drop_last=True,
label.stop_gradient = True num_workers=2)
cost = fc(img) for epoch_id in range(EPOCH_NUM):
loss = fluid.layers.cross_entropy(cost, label) for batch_id, (image, label) in enumerate(loader()):
avg_loss = fluid.layers.mean(loss) out = fc(image)
avg_loss.backward() loss = loss_fn(out, label)
sgd.minimize(avg_loss) loss.backward()
adam.step()
adam.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
save save
----------------- -----------------
.. py:function:: paddle.fluid.dygraph.jit.save(layer, model_path, input_spec=None, configs=None) .. py:function:: paddle.jit.save(layer, model_path, input_spec=None, config=None)
将输入的经过 ``@declarative`` 装饰的 :ref:`cn_api_fluid_dygraph_Layer` 存储为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 格式的模型, 将输入的经过 ``@declarative`` 装饰的 :ref:`cn_api_fluid_dygraph_Layer` 存储为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 格式的模型,
载入后可用于预测推理或者fine-tune训练。 载入后可用于预测推理或者fine-tune训练。
...@@ -22,7 +22,7 @@ save ...@@ -22,7 +22,7 @@ save
- **layer** (Layer) - 需要存储的 :ref:`cn_api_fluid_dygraph_Layer` 对象。输入的 ``Layer`` 需要经过 ``@declarative`` 装饰。 - **layer** (Layer) - 需要存储的 :ref:`cn_api_fluid_dygraph_Layer` 对象。输入的 ``Layer`` 需要经过 ``@declarative`` 装饰。
- **model_path** (str) - 存储模型的目录。 - **model_path** (str) - 存储模型的目录。
- **input_spec** (list[Variable], 可选) - 描述存储模型的输入。此参数是传入当前存储的 ``TranslatedLayer`` forward方法的一个示例输入。如果为 ``None`` ,所有原 ``Layer`` forward方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。 - **input_spec** (list[Variable], 可选) - 描述存储模型的输入。此参数是传入当前存储的 ``TranslatedLayer`` forward方法的一个示例输入。如果为 ``None`` ,所有原 ``Layer`` forward方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。
- **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 - **config** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。
返回:无 返回:无
...@@ -31,50 +31,74 @@ save ...@@ -31,50 +31,74 @@ save
.. code-block:: python .. code-block:: python
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle
from paddle.fluid.dygraph import Linear import paddle.nn as nn
from paddle.fluid.dygraph import declarative import paddle.optimizer as opt
BATCH_SIZE = 32
BATCH_NUM = 20 BATCH_SIZE = 16
def random_batch_reader(): BATCH_NUM = 4
def _get_random_images_and_labels(image_shape, label_shape): EPOCH_NUM = 4
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64') IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label return image, label
def __reader__():
for _ in range(BATCH_NUM): def __len__(self):
batch_image, batch_label = _get_random_images_and_labels( return self.num_samples
[BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label class LinearNet(nn.Layer):
return __reader__ def __init__(self):
class LinearNet(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(LinearNet, self).__init__() super(LinearNet, self).__init__()
self._linear = Linear(in_size, out_size) self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@declarative
@paddle.jit.to_static
def forward(self, x): def forward(self, x):
return self._linear(x) return self._linear(x)
# 开启命令式编程模式
fluid.enable_dygraph() def train(layer, loader, loss_fn, opt):
# 创建网络 for epoch_id in range(EPOCH_NUM):
net = LinearNet(784, 1) for batch_id, (image, label) in enumerate(loader()):
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) out = layer(image)
# 创建DataLoader loss = loss_fn(out, label)
train_loader = fluid.io.DataLoader.from_generator(capacity=5) loss.backward()
train_loader.set_batch_generator(random_batch_reader()) opt.step()
# 训练 opt.clear_grad()
for data in train_loader(): print("Epoch {} batch {}: loss = {}".format(
img, label = data epoch_id, batch_id, np.mean(loss.numpy())))
label.stop_gradient = True
cost = net(img) # enable dygraph mode
loss = fluid.layers.cross_entropy(cost, label) place = paddle.CPUPlace()
avg_loss = fluid.layers.mean(loss) paddle.disable_static(place)
avg_loss.backward()
adam.minimize(avg_loss) # 1. train & save model.
net.clear_gradients()
# 存储模型 # create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# train
train(layer, loader, loss_fn, adam)
# save
model_path = "linear.example.model" model_path = "linear.example.model"
fluid.dygraph.jit.save( paddle.jit.save(layer, model_path)
layer=net,
model_path=model_path,
input_spec=[img])
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册