未验证 提交 6233e0bc 编写于 作者: C Chen Long 提交者: GitHub

fix_some_docs test=develop (#2552)

上级 feecb413
.. _cn_api_fluid_dygraph_load_dygraph: .. _cn_api_fluid_dygraph_load_dygraph:
load_dygraph load
------------------------------- ----
.. py:function:: paddle.fluid.dygraph.load_dygraph(model_path) .. py:function:: paddle.load(model_path, configs=None)
:api_attr: 命令式编程模式(动态图) :api_attr: 命令式编程模式(动态图)
该接口用于从磁盘中加载Layer和Optimizer的 ``state_dict`` ,该接口会同时加载 ``model_path + ".pdparams"`` 和 ``model_path + ".pdopt"`` 中的内容。
.. note::
该接口尝试从磁盘中加载参数或优化器的 ``dict`` 。 由于一些历史原因,如果从 ``paddle.io.save_inference_model`` 的存储结果中载入 ``state_dict`` ,动态图模式下参数的结构性变量名将无法被恢复。并且在将载入的 ``state_dict`` 配置到当前Layer中时,需要配置 ``Layer.set_state_dict`` 的参数 ``use_structured_name=False`` 。
该接口会同时加载 ``model_path + ".pdparams"`` 和 ``model_path + ".pdopt"`` 中的内容。
参数: 参数:
- **model_path** (str) – 保存state_dict的文件前缀。该路径不应该包括后缀 ``.pdparams`` 或 ``.pdopt``。 - **model_path** (str) – 保存state_dict的文件前缀。该路径不应该包括后缀 ``.pdparams`` 或 ``.pdopt``。
- **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象,这些选项主要是用于兼容 ``paddle.io.save_inference_model`` 存储模型的格式。默认为 ``None``。
返回: 两个 ``dict`` ,即从文件中恢复的参数 ``dict`` 和优化器 ``dict`` 返回: 两个 ``dict`` ,即从文件中恢复的模型参数 ``dict`` 和优化器参数 ``dict``,如果只找到其中一个的存储文件,另一个返回None
- para_dict: 从文件中恢复的参数 ``dict`` - param_dict: 从文件中恢复的模型参数 ``dict``
- opti_dict: 从文件中恢复的优化器 ``dict`` - opt_dict: 从文件中恢复的优化器参数 ``dict``
返回类型: tuple(dict, dict) 返回类型: tuple(dict, dict)
...@@ -29,18 +29,24 @@ load_dygraph ...@@ -29,18 +29,24 @@ load_dygraph
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
paddle.disable_static()
emb = paddle.nn.Embedding([10, 10])
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10])
state_dict = emb.state_dict() state_dict = emb.state_dict()
fluid.save_dygraph( state_dict, "paddle_dy") paddle.save(state_dict, "paddle_dy")
adam = fluid.optimizer.Adam( learning_rate = fluid.layers.noam_decay( 100, 10000) ,
parameter_list = emb.parameters() ) scheduler = paddle.optimizer.lr_scheduler.NoamLR(
d_model=0.01, warmup_steps=100, verbose=True)
adam = paddle.optimizer.Adam(
learning_rate=scheduler,
parameters=emb.parameters())
state_dict = adam.state_dict() state_dict = adam.state_dict()
fluid.save_dygraph( state_dict, "paddle_dy") paddle.save(state_dict, "paddle_dy")
para_state_dict, opti_state_dict = fluid.load_dygraph( "paddle_dy") para_state_dict, opti_state_dict = paddle.load("paddle_dy")
...@@ -15,70 +15,180 @@ TranslatedLayer ...@@ -15,70 +15,180 @@ TranslatedLayer
.. code-block:: python .. code-block:: python
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle
from paddle.fluid.dygraph import Linear import paddle.nn as nn
from paddle.fluid.dygraph import declarative import paddle.optimizer as opt
BATCH_SIZE = 32
BATCH_NUM = 20 BATCH_SIZE = 16
def random_batch_reader(): BATCH_NUM = 4
def _get_random_images_and_labels(image_shape, label_shape): EPOCH_NUM = 4
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64') IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label return image, label
def __reader__():
for _ in range(BATCH_NUM): def __len__(self):
batch_image, batch_label = _get_random_images_and_labels( return self.num_samples
[BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label class LinearNet(nn.Layer):
return __reader__ def __init__(self):
class LinearNet(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(LinearNet, self).__init__() super(LinearNet, self).__init__()
self._linear = Linear(in_size, out_size) self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@declarative
@paddle.jit.to_static
def forward(self, x): def forward(self, x):
return self._linear(x) return self._linear(x)
# 开启命令式编程模式
fluid.enable_dygraph() def train(layer, loader, loss_fn, opt):
# 1. 训练存储模型. for epoch_id in range(EPOCH_NUM):
# 创建网络 for batch_id, (image, label) in enumerate(loader()):
net = LinearNet(784, 1) out = layer(image)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) loss = loss_fn(out, label)
# 创建DataLoader loss.backward()
train_loader = fluid.io.DataLoader.from_generator(capacity=5) opt.step()
train_loader.set_batch_generator(random_batch_reader()) opt.clear_grad()
# 训练 print("Epoch {} batch {}: loss = {}".format(
for data in train_loader(): epoch_id, batch_id, np.mean(loss.numpy())))
img, label = data
label.stop_gradient = True # enable dygraph mode
cost = net(img) place = paddle.CPUPlace()
loss = fluid.layers.cross_entropy(cost, label) paddle.disable_static(place)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward() # 1. train & save model.
adam.minimize(avg_loss)
net.clear_gradients() # create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# train
train(layer, loader, loss_fn, adam)
# save
model_path = "linear.example.model" model_path = "linear.example.model"
fluid.dygraph.jit.save( paddle.jit.save(layer, model_path)
layer=net,
model_path=model_path, # 2. load model as TranslatedLayer
input_spec=[img])
# 2. 载入模型构建TranslatedLayer # load
translated_layer = fluid.dygraph.jit.load(model_path) translated_layer = paddle.jit.load(model_path)
# 预测
# inference
translated_layer.eval() translated_layer.eval()
x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) x = paddle.randn([1, IMAGE_SIZE], 'float32')
pred = translated_layer(x) pred = translated_layer(x)
# fine-tune训练
# fine-tune
translated_layer.train() translated_layer.train()
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=translated_layer.parameters()) adam = opt.Adam(learning_rate=0.001, parameters=translated_layer.parameters())
train_loader = fluid.io.DataLoader.from_generator(capacity=5) train(translated_layer, loader, loss_fn, adam)
train_loader.set_batch_generator(random_batch_reader())
for data in train_loader():
img, label = data .. py:method:: program(method_name='forward'):
label.stop_gradient = True
cost = translated_layer(img) 获取TranslatedLayer中指定方法对应的Program。
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss) 参数:
avg_loss.backward() - **method_name** (string) - 要获取的Porgram对应的方法名。默认值为"forward"。
adam.minimize(avg_loss)
translated_layer.clear_gradients() 返回:Program
返回类型:Program
**示例代码:**
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
def train(layer, loader, loss_fn, opt):
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
opt.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
# enable dygraph mode
place = paddle.CPUPlace()
paddle.disable_static(place)
# create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# train
train(layer, loader, loss_fn, adam)
# save
model_path = "linear.example.model"
paddle.jit.save(layer, model_path)
# load
translated_layer = paddle.jit.load(model_path)
# get program
program = translated_layer.program()
print(program)
...@@ -391,13 +391,14 @@ buffer是一个非参数类型的变量,不会被优化器更新,但在评 ...@@ -391,13 +391,14 @@ buffer是一个非参数类型的变量,不会被优化器更新,但在评
state_dict = emb.state_dict() state_dict = emb.state_dict()
fluid.save_dygraph(state_dict, "paddle_dy") fluid.save_dygraph(state_dict, "paddle_dy")
.. py:method:: set_dict(stat_dict, include_sublayers=True) .. py:method:: set_state_dict(state_dict, include_sublayers=True, use_structured_name=True)
根据传入的 ``stat_dict`` 设置参数和可持久性buffers 所有参数和buffers将由 ``stat_dict`` 中的 ``Tensor`` 设置。 根据传入的 ``state_dict`` 设置参数和可持久性buffers 所有参数和buffers将由 ``state_dict`` 中的 ``Tensor`` 设置。
参数: 参数:
- **state_dict** (dict) - 包含所有参数和可持久性buffersdict - **state_dict** (dict) - 包含所有参数和可持久性buffersdict
- **include_sublayers** (bool, 可选) - 如果设置为True,则还包括子层的参数和buffers 默认值:True - **include_sublayers** (bool, 可选) - 如果设置为True,则还包括子层的参数和buffers 默认值:True
- **use_structured_name** (bool, 可选) - 如果设置为True,将使用Layer的结构性变量名作为dictkey,否则将使用Parameter或者Buffer的变量名作为key。默认值:True
返回:None 返回:None
...@@ -405,36 +406,16 @@ buffer是一个非参数类型的变量,不会被优化器更新,但在评 ...@@ -405,36 +406,16 @@ buffer是一个非参数类型的变量,不会被优化器更新,但在评
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10])
state_dict = emb.state_dict()
fluid.save_dygraph(state_dict, "paddle_dy")
para_state_dict, _ = fluid.load_dygraph("paddle_dy")
emb.set_dict(para_state_dict)
.. py:method:: load_dict(stat_dict, include_sublayers=True) paddle.disable_static()
.. warning::
该函数将被弃用。请使用set_dict函数。
根据传入的 ``stat_dict`` 设置参数和可持久性buffers 所有参数和buffers将由 ``stat_dict`` 中的 ``Tensor`` 设置。
参数:
- **state_dict** (dict) - 包含所有参数和可持久性buffersdict
- **include_sublayers** (bool, 可选) - 如果设置为True,则还包括子层的参数和buffers 默认值:True
返回:None emb = paddle.nn.Embedding([10, 10])
**代码示例** state_dict = emb.state_dict()
paddle.save(state_dict, "paddle_dy")
.. code-block:: python para_state_dict, _ = paddle.load("paddle_dy")
import paddle.fluid as fluid emb.set_state_dict(para_state_dict)
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10])
state_dict = emb.state_dict()
fluid.save_dygraph(state_dict, "paddle_dy")
para_state_dict, _ = fluid.load_dygraph("paddle_dy")
emb.load_dict(para_state_dict)
.. _cn_api_fluid_dygraph_jit_SaveLoadConfig:
SaveLoadConfig
--------------
.. py:class:: paddle.SaveLoadConfig()
用于配置接口 :ref:`cn_api_fluid_dygraph_jit_save` 和 :ref:`cn_api_fluid_dygraph_jit_load` 存储载入 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 时的附加选项。
**示例代码:**
1. 在存储模型时使用 ``SaveLoadConfig``
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
return z
# 开启命令式编程模式
fluid.enable_dygraph()
# 训练模型
net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
for i in range(10):
out = net(x)
loss = fluid.layers.mean(out)
loss.backward()
adam.minimize(loss)
net.clear_gradients()
# 在存储模型时使用SaveLoadConfig
model_path = "simplenet.example.model"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.model_filename = "__simplenet__"
fluid.dygraph.jit.save(
layer=net,
model_path=model_path,
input_spec=[x],
configs=configs)
2. 在载入模型时使用 ``SaveLoadConfig``
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
# 开启命令式编程模式
fluid.enable_dygraph()
# 在载入模型时使用SaveLoadconfig
model_path = "simplenet.example.model"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.model_filename = "__simplenet__"
infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
# 预测
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
pred = infer_net(x)
属性
::::::::::::
.. py:attribute:: output_spec
选择保存模型( :ref:`cn_api_fluid_dygraph_TranslatedLayer` )的输出变量,通过指定的这些变量能够使模型仅计算特定的结果。
默认情况下,原始 :ref:`cn_api_fluid_dygraph_Layer` 的forward方法的所有返回变量都将配置为存储后模型 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 的输出变量。
``output_spec`` 属性类型需要是 ``list[Variable]``。如果输入的 ``output_spec`` 列表不是原始 :ref:`cn_api_fluid_dygraph_Layer` 的forward方法的所有返回变量,
将会依据输入的 ``output_spec`` 列表对存储的模型进行裁剪。
.. note::
``output_spec`` 属性仅在存储模型时使用。
**示例代码:**
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
loss = fluid.layers.mean(z)
return z, loss
# 开启命令式编程模式
fluid.enable_dygraph()
# 训练模型
net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
for i in range(10):
out, loss = net(x)
loss.backward()
adam.minimize(loss)
net.clear_gradients()
# 使用SaveLoadconfig.output_spec
model_path = "simplenet.example.model.output_spec"
configs = fluid.dygraph.jit.SaveLoadConfig()
# 仅在存储模型中保留预测结果,丢弃loss
configs.output_spec = [out]
fluid.dygraph.jit.save(
layer=net,
model_path=model_path,
input_spec=[x],
configs=configs)
infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
# 仅有预测结果输出
pred = infer_net(x)
.. py:attribute:: model_filename
存储转写 :ref:`cn_api_fluid_dygraph_Layer` 模型结构 ``Program`` 的文件名称。默认文件名为 ``__model__``。
**示例代码**
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
return z
# 开启命令式编程模式
fluid.enable_dygraph()
# 训练模型
net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
for i in range(10):
out = net(x)
loss = fluid.layers.mean(out)
loss.backward()
adam.minimize(loss)
net.clear_gradients()
model_path = "simplenet.example.model.model_filename"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.model_filename = "__simplenet__"
# 配置configs.model_filename存储模型
fluid.dygraph.jit.save(
layer=net,
model_path=model_path,
input_spec=[x],
configs=configs)
# [结果] 存储模型目录文件包括:
# __simplenet__ __variables__ __variables.info__
# 配置configs.model_filename载入模型
infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
pred = infer_net(x)
.. py:attribute:: params_filename
存储转写 :ref:`cn_api_fluid_dygraph_Layer` 所有持久参数(包括 ``Parameters`` 和持久的 ``Buffers``)的文件名称。默认文件名称为 ``__variable__``。
**示例代码**
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
return z
# 开启命令式编程模式
fluid.enable_dygraph()
# 训练模型
net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
for i in range(10):
out = net(x)
loss = fluid.layers.mean(out)
loss.backward()
adam.minimize(loss)
net.clear_gradients()
model_path = "simplenet.example.model.params_filename"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.params_filename = "__params__"
# 配置configs.params_filename存储模型
fluid.dygraph.jit.save(
layer=net,
model_path=model_path,
input_spec=[x],
configs=configs)
# [结果] 存储模型目录文件包括:
# __model__ __params__ __variables.info__
# 配置configs.params_filename载入模型
infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
pred = infer_net(x)
.. py:attribute:: separate_params
配置是否将 :ref:`cn_api_fluid_dygraph_Layer` 的参数存储为分散的文件。
(这是为了兼容接口 :ref:`cn_api_fluid_io_save_inference_model` 的行为)
如果设置为 ``True`` ,每个参数将会被存储为一个文件,文件名为参数名,同时``SaveLoadConfig.params_filename`` 指定的文件名将不会生效。默认为 ``False``。
**示例代码**
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative
class SimpleNet(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
return z
# 开启命令式编程模式
fluid.enable_dygraph()
# 训练模型
net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
for i in range(10):
out = net(x)
loss = fluid.layers.mean(out)
loss.backward()
adam.minimize(loss)
net.clear_gradients()
model_path = "simplenet.example.model.separate_params"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.separate_params = True
# 配置configs.separate_params存储模型
fluid.dygraph.jit.save(
layer=net,
model_path=model_path,
input_spec=[x],
configs=configs)
# [结果] 存储模型目录文件包括:
# linear_0.b_0 linear_0.w_0 __model__ __variables.info__
# 配置configs.params_filename载入模型
infer_net = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
pred = infer_net(x)
.. py:attribute:: keep_name_table
配置是否保留 ``paddle.load`` 载入结果中 ``structured_name`` 到真实的参数变量名的映射表。这个映射表是调用 ``paddle.save`` 时存储的,一般仅用于调试,移除此映射表不影响真实的训练和预测。默认情况下不会保留在 ``paddle.load`` 的结果中。默认值为False。
.. note::
该配置仅用于 ``paddle.load`` 方法。
**示例代码**
.. code-block:: python
import paddle
paddle.disable_static()
linear = paddle.nn.Linear(5, 1)
state_dict = linear.state_dict()
paddle.save(state_dict, "paddle_dy")
configs = paddle.SaveLoadConfig()
configs.keep_name_table = True
para_state_dict, _ = paddle.load("paddle_dy", configs)
print(para_state_dict)
# the name_table is 'StructuredToParameterName@@'
# {'bias': array([0.], dtype=float32),
# 'StructuredToParameterName@@':
# {'bias': u'linear_0.b_0', 'weight': u'linear_0.w_0'},
# 'weight': array([[ 0.04230034],
# [-0.1222527 ],
# [ 0.7392676 ],
# [-0.8136974 ],
# [ 0.01211023]], dtype=float32)}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册