未验证 提交 787a4d8e 编写于 作者: C Chen Weihang 提交者: GitHub

Update save/load configs & 2.0 examples (#2553)

* update configs & 2.0 examples

* move apis directory

* update api names
上级 256421b2
......@@ -4,7 +4,7 @@ load
----
.. py:function:: paddle.load(model_path, configs=None)
.. py:function:: paddle.load(model_path, config=None)
:api_attr: 命令式编程模式(动态图)
......@@ -15,7 +15,7 @@ load
参数:
- **model_path** (str) – 保存state_dict的文件前缀。该路径不应该包括后缀 ``.pdparams`` 或 ``.pdopt``。
- **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象,这些选项主要是用于兼容 ``paddle.io.save_inference_model`` 存储模型的格式。默认为 ``None``。
- **config** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象,这些选项主要是用于兼容 ``paddle.io.save_inference_model`` 存储模型的格式。默认为 ``None``。
返回: 两个 ``dict`` ,即从文件中恢复的模型参数 ``dict`` 和优化器参数 ``dict``,如果只找到其中一个的存储文件,另一个返回None
......
......@@ -13,56 +13,60 @@ SaveLoadConfig
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer):
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
self._linear = nn.Linear(in_size, out_size)
@paddle.jit.to_static
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
return z
# 开启命令式编程模式
fluid.enable_dygraph()
# 训练模型
# enable dygraph mode
paddle.disable_static()
# train model
net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = paddle.randn([4, 8], 'float32')
for i in range(10):
out = net(x)
loss = fluid.layers.mean(out)
loss = paddle.tensor.mean(out)
loss.backward()
adam.minimize(loss)
net.clear_gradients()
# 在存储模型时使用SaveLoadConfig
adam.step()
adam.clear_grad()
# use SaveLoadconfig when saving model
model_path = "simplenet.example.model"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.model_filename = "__simplenet__"
fluid.dygraph.jit.save(
config = paddle.SaveLoadConfig()
config.model_filename = "__simplenet__"
paddle.jit.save(
layer=net,
model_path=model_path,
input_spec=[x],
configs=configs)
config=config)
2. 在载入模型时使用 ``SaveLoadConfig``
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
# 开启命令式编程模式
fluid.enable_dygraph()
# 在载入模型时使用SaveLoadconfig
import paddle
# enable dygraph mode
paddle.disable_static()
# use SaveLoadconfig when loading model
model_path = "simplenet.example.model"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.model_filename = "__simplenet__"
infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
# 预测
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
config = paddle.SaveLoadConfig()
config.model_filename = "__simplenet__"
infer_net = paddle.jit.load(model_path, config=config)
# inference
x = paddle.randn([4, 8], 'float32')
pred = infer_net(x)
属性
......@@ -82,47 +86,50 @@ SaveLoadConfig
**示例代码:**
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer):
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
self._linear = nn.Linear(in_size, out_size)
@paddle.jit.to_static
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
loss = fluid.layers.mean(z)
loss = paddle.tensor.mean(z)
return z, loss
# 开启命令式编程模式
fluid.enable_dygraph()
# 训练模型
# enable dygraph mode
paddle.disable_static()
# train model
net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = paddle.randn([4, 8], 'float32')
for i in range(10):
out, loss = net(x)
loss.backward()
adam.minimize(loss)
net.clear_gradients()
# 使用SaveLoadconfig.output_spec
adam.step()
adam.clear_grad()
# use SaveLoadconfig.output_spec
model_path = "simplenet.example.model.output_spec"
configs = fluid.dygraph.jit.SaveLoadConfig()
# 仅在存储模型中保留预测结果,丢弃loss
configs.output_spec = [out]
fluid.dygraph.jit.save(
config = paddle.SaveLoadConfig()
config.output_spec = [out]
paddle.jit.save(
layer=net,
model_path=model_path,
input_spec=[x],
configs=configs)
infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
# 仅有预测结果输出
config=config)
infer_net = paddle.jit.load(model_path)
x = paddle.randn([4, 8], 'float32')
pred = infer_net(x)
.. py:attribute:: model_filename
存储转写 :ref:`cn_api_fluid_dygraph_Layer` 模型结构 ``Program`` 的文件名称。默认文件名为 ``__model__``。
......@@ -130,45 +137,47 @@ SaveLoadConfig
**示例代码**
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer):
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
self._linear = nn.Linear(in_size, out_size)
@paddle.jit.to_static
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
return z
# 开启命令式编程模式
fluid.enable_dygraph()
# 训练模型
# enable dygraph mode
paddle.disable_static()
# train model
net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = paddle.randn([4, 8], 'float32')
for i in range(10):
out = net(x)
loss = fluid.layers.mean(out)
loss = paddle.tensor.mean(out)
loss.backward()
adam.minimize(loss)
net.clear_gradients()
adam.step()
adam.clear_grad()
# saving with configs.model_filename
model_path = "simplenet.example.model.model_filename"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.model_filename = "__simplenet__"
# 配置configs.model_filename存储模型
fluid.dygraph.jit.save(
config = paddle.SaveLoadConfig()
config.model_filename = "__simplenet__"
paddle.jit.save(
layer=net,
model_path=model_path,
input_spec=[x],
configs=configs)
# [结果] 存储模型目录文件包括:
# __simplenet__ __variables__ __variables.info__
# 配置configs.model_filename载入模型
infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
config=config)
# loading with configs.model_filename
infer_net = paddle.jit.load(model_path, config=config)
x = paddle.randn([4, 8], 'float32')
pred = infer_net(x)
......@@ -179,45 +188,48 @@ SaveLoadConfig
**示例代码**
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer):
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
self._linear = nn.Linear(in_size, out_size)
@paddle.jit.to_static
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
return z
# 开启命令式编程模式
fluid.enable_dygraph()
# 训练模型
# enable dygraph mode
paddle.disable_static()
# train model
net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = paddle.randn([4, 8], 'float32')
for i in range(10):
out = net(x)
loss = fluid.layers.mean(out)
loss = paddle.tensor.mean(out)
loss.backward()
adam.minimize(loss)
net.clear_gradients()
adam.step()
adam.clear_grad()
model_path = "simplenet.example.model.params_filename"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.params_filename = "__params__"
# 配置configs.params_filename存储模型
fluid.dygraph.jit.save(
config = paddle.SaveLoadConfig()
config.params_filename = "__params__"
# saving with configs.params_filename
paddle.jit.save(
layer=net,
model_path=model_path,
input_spec=[x],
configs=configs)
# [结果] 存储模型目录文件包括:
# __model__ __params__ __variables.info__
# 配置configs.params_filename载入模型
infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
config=config)
# loading with configs.params_filename
infer_net = paddle.jit.load(model_path, config=config)
x = paddle.randn([4, 8], 'float32')
pred = infer_net(x)
......@@ -231,45 +243,50 @@ SaveLoadConfig
**示例代码**
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer):
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
self._linear = nn.Linear(in_size, out_size)
@paddle.jit.to_static
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
return z
# 开启命令式编程模式
fluid.enable_dygraph()
# 训练模型
# enable dygraph mode
paddle.disable_static()
# train model
net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = paddle.randn([4, 8], 'float32')
for i in range(10):
out = net(x)
loss = fluid.layers.mean(out)
loss = paddle.tensor.mean(out)
loss.backward()
adam.minimize(loss)
net.clear_gradients()
adam.step()
adam.clear_grad()
model_path = "simplenet.example.model.separate_params"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.separate_params = True
# 配置configs.separate_params存储模型
fluid.dygraph.jit.save(
config = paddle.jit.SaveLoadConfig()
config.separate_params = True
# saving with configs.separate_params
paddle.jit.save(
layer=net,
model_path=model_path,
input_spec=[x],
configs=configs)
# [结果] 存储模型目录文件包括:
config=config)
# [result] the saved model directory contains:
# linear_0.b_0 linear_0.w_0 __model__ __variables.info__
# 配置configs.params_filename载入模型
infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
# loading with configs.params_filename
infer_net = paddle.jit.load(model_path, config=config)
x = paddle.randn([4, 8], 'float32')
pred = infer_net(x)
......
......@@ -3,7 +3,7 @@
load
-----------------
.. py:function:: paddle.fluid.dygraph.jit.load(model_path, configs=None)
.. py:function:: paddle.jit.load(model_path, config=None)
:api_attr: 命令式编程模式(动态图)
......@@ -19,7 +19,7 @@ load
参数:
- **model_path** (str) - 存储模型的目录。
- **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。
- **config** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。
返回:TranslatedLayer - 一个能够执行存储模型的 ``Layer`` 对象。
......@@ -30,77 +30,92 @@ load
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative
BATCH_SIZE = 32
BATCH_NUM = 20
def random_batch_reader():
def _get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = _get_random_images_and_labels(
[BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label
return __reader__
class LinearNet(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
def __len__(self):
return self.num_samples
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
# 开启命令式编程模式
fluid.enable_dygraph()
# 1. 训练存储模型.
# 创建网络
net = LinearNet(784, 1)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
# 创建DataLoader
train_loader = fluid.io.DataLoader.from_generator(capacity=5)
train_loader.set_batch_generator(random_batch_reader())
# 训练
for data in train_loader():
img, label = data
label.stop_gradient = True
cost = net(img)
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
net.clear_gradients()
def train(layer, loader, loss_fn, opt):
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
opt.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
# enable dygraph mode
place = paddle.CPUPlace()
paddle.disable_static(place)
# 1. train & save model.
# create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# train
train(layer, loader, loss_fn, adam)
# save
model_path = "linear.example.model"
fluid.dygraph.jit.save(
layer=net,
model_path=model_path,
input_spec=[img])
# 2. 载入模型 & 预测
# 载入模型
infer_net = fluid.dygraph.jit.load(model_path)
# 预测
x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32'))
pred = infer_net(x)
# 3. 载入模型 & fine-tune训练
# 载入模型
train_net = fluid.dygraph.jit.load(model_path)
train_net.train()
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=train_net.parameters())
# 创建DataLoader
train_loader = fluid.io.DataLoader.from_generator(capacity=5)
train_loader.set_batch_generator(random_batch_reader())
# fine-tune训练
for data in train_loader():
img, label = data
label.stop_gradient = True
cost = train_net(img)
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
train_net.clear_gradients()
paddle.jit.save(layer, model_path)
# 2. load model
# load
loaded_layer = paddle.jit.load(model_path)
# inference
loaded_layer.eval()
x = paddle.randn([1, IMAGE_SIZE], 'float32')
pred = loaded_layer(x)
# fine-tune
loaded_layer.train()
adam = opt.Adam(learning_rate=0.001, parameters=loaded_layer.parameters())
train(loaded_layer, loader, loss_fn, adam)
2. 载入由接口 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型进行预测推理及fine-tune训练。
......@@ -108,61 +123,95 @@ load
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
BATCH_SIZE = 32
BATCH_NUM = 20
def random_batch_reader():
def _get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
import paddle.nn as nn
import paddle.optimizer as opt
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = _get_random_images_and_labels(
[BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label
return __reader__
img = fluid.data(name='img', shape=[None, 784], dtype='float32')
def __len__(self):
return self.num_samples
image = fluid.data(name='image', shape=[None, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
pred = fluid.layers.fc(input=img, size=10, act='softmax')
pred = fluid.layers.fc(input=image, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=pred, label=label)
avg_loss = fluid.layers.mean(loss)
optimizer = fluid.optimizer.SGD(learning_rate=0.001)
optimizer.minimize(avg_loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
loader = fluid.io.DataLoader.from_generator(
feed_list=[img, label], capacity=5, iterable=True)
loader.set_batch_generator(random_batch_reader(), places=place)
# 1. 训练 & 存储预测模型
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
feed_list=[image, label],
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# 1. train and save inference model
for data in loader():
exe.run(
fluid.default_main_program(),
feed=data,
fetch_list=[avg_loss])
model_path = "fc.example.model"
fluid.io.save_inference_model(
model_path, ["img"], [pred], exe)
# 开启命令式编程模式
fluid.enable_dygraph()
# 2. 载入模型 & 预测
fc = fluid.dygraph.jit.load(model_path)
x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32'))
model_path, ["image"], [pred], exe)
# 2. load model
# enable dygraph mode
paddle.disable_static(place)
# load
fc = paddle.jit.load(model_path)
# inference
fc.eval()
x = paddle.randn([1, IMAGE_SIZE], 'float32')
pred = fc(x)
# 3. 载入模型 & fine-tune训练
fc = fluid.dygraph.jit.load(model_path)
# fine-tune
fc.train()
sgd = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=fc.parameters())
train_loader = fluid.io.DataLoader.from_generator(capacity=5)
train_loader.set_batch_generator(
random_batch_reader(), places=place)
for data in train_loader():
img, label = data
label.stop_gradient = True
cost = fc(img)
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
sgd.minimize(avg_loss)
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=fc.parameters())
loader = paddle.io.DataLoader(dataset,
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = fc(image)
loss = loss_fn(out, label)
loss.backward()
adam.step()
adam.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
......@@ -3,7 +3,7 @@
save
-----------------
.. py:function:: paddle.fluid.dygraph.jit.save(layer, model_path, input_spec=None, configs=None)
.. py:function:: paddle.jit.save(layer, model_path, input_spec=None, config=None)
将输入的经过 ``@declarative`` 装饰的 :ref:`cn_api_fluid_dygraph_Layer` 存储为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 格式的模型,
载入后可用于预测推理或者fine-tune训练。
......@@ -22,7 +22,7 @@ save
- **layer** (Layer) - 需要存储的 :ref:`cn_api_fluid_dygraph_Layer` 对象。输入的 ``Layer`` 需要经过 ``@declarative`` 装饰。
- **model_path** (str) - 存储模型的目录。
- **input_spec** (list[Variable], 可选) - 描述存储模型的输入。此参数是传入当前存储的 ``TranslatedLayer`` forward方法的一个示例输入。如果为 ``None`` ,所有原 ``Layer`` forward方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。
- **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。
- **config** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。
返回:无
......@@ -31,50 +31,74 @@ save
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative
BATCH_SIZE = 32
BATCH_NUM = 20
def random_batch_reader():
def _get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = _get_random_images_and_labels(
[BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label
return __reader__
class LinearNet(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
def __len__(self):
return self.num_samples
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
# 开启命令式编程模式
fluid.enable_dygraph()
# 创建网络
net = LinearNet(784, 1)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
# 创建DataLoader
train_loader = fluid.io.DataLoader.from_generator(capacity=5)
train_loader.set_batch_generator(random_batch_reader())
# 训练
for data in train_loader():
img, label = data
label.stop_gradient = True
cost = net(img)
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
net.clear_gradients()
# 存储模型
def train(layer, loader, loss_fn, opt):
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
opt.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
# enable dygraph mode
place = paddle.CPUPlace()
paddle.disable_static(place)
# 1. train & save model.
# create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# train
train(layer, loader, loss_fn, adam)
# save
model_path = "linear.example.model"
fluid.dygraph.jit.save(
layer=net,
model_path=model_path,
input_spec=[img])
paddle.jit.save(layer, model_path)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册