未验证 提交 fd6631ef 编写于 作者: L lujun 提交者: GitHub

Fix dygraph show style (#18297)

Fix dygraph show style for FluidDoc.
上级 9931bc64
......@@ -728,8 +728,8 @@ paddle.fluid.dygraph.Tracer.train_mode (ArgSpec(args=['self'], varargs=None, key
paddle.fluid.dygraph.start_gperf_profiler (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.stop_gperf_profiler (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.prepare_context (ArgSpec(args=['strategy'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.save_persistables (ArgSpec(args=['model_dict', 'dirname', 'optimizers'], varargs=None, keywords=None, defaults=('save_dir', None)), ('document', 'd264d1d00dcf3c7e957978563369b57f'))
paddle.fluid.dygraph.load_persistables (ArgSpec(args=['dirname'], varargs=None, keywords=None, defaults=('save_dir',)), ('document', '8a0f69b10754e45907b904aa68f3c5dc'))
paddle.fluid.dygraph.save_persistables (ArgSpec(args=['model_dict', 'dirname', 'optimizers'], varargs=None, keywords=None, defaults=('save_dir', None)), ('document', '7f526f879139a14cda8e0b5a9171f264'))
paddle.fluid.dygraph.load_persistables (ArgSpec(args=['dirname'], varargs=None, keywords=None, defaults=('save_dir',)), ('document', '2574d50a7a9f89fb0d74ddf73d8128f0'))
paddle.fluid.dygraph.NoamDecay.__init__ (ArgSpec(args=['self', 'd_model', 'warmup_steps', 'begin', 'step', 'dtype'], varargs=None, keywords=None, defaults=(1, 1, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.NoamDecay.create_lr_var (ArgSpec(args=['self', 'lr'], varargs=None, keywords=None, defaults=None), ('document', '013bc233558149d0757b3df57845b866'))
paddle.fluid.dygraph.NoamDecay.step (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
......
......@@ -163,21 +163,22 @@ void BindImperative(pybind11::module *m_ptr) {
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid import FC
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
inputs2.append(fluid.dygraph.base.to_variable(x))
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid import FC
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
inputs2.append(fluid.dygraph.base.to_variable(x))
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
)DOC");
backward_strategy.def(py::init())
.def_property("sort_sum_gradient",
......
......@@ -43,35 +43,38 @@ def save_persistables(model_dict, dirname='save_dir', optimizers=None):
optimizers(fluid.Optimizer|list(fluid.Optimizer)|None): The optimizers to be saved
Returns:
None
Examples:
.. code-block:: python
ptb_model = PtbModel(
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
sgd = fluid.optimizer.SGD(learning_rate=0.01)
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
x_data = x_data.reshape((-1, num_steps, 1))
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
sgd = fluid.optimizer.SGD(learning_rate=0.01)
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
x_data = x_data.reshape((-1, num_steps, 1))
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
dy_loss.backward()
sgd.minimize(dy_loss)
ptb_model.clear_gradient()
param_path = "./my_paddle_model"
fluid.dygraph.save_persistables(ptb_model.state_dict(), dirname=param_path, sgd)
dy_loss.backward()
sgd.minimize(dy_loss)
ptb_model.clear_gradient()
param_path = "./my_paddle_model"
fluid.dygraph.save_persistables(ptb_model.state_dict(), dirname=param_path, sgd)
"""
if isinstance(model_dict, collections.OrderedDict):
_save_var_to_file(model_dict, optimizers, dirname, None)
......@@ -95,13 +98,15 @@ def load_persistables(dirname='save_dir'):
optimizer dict: The optimizer
Examples:
.. code-block:: python
my_layer = layer(fluid.Layer)
param_path = "./my_paddle_model"
sgd = SGDOptimizer(learning_rate=1e-3)
param_dict, optimizer_dict = fluid.dygraph.load_persistables(my_layer.parameters(), param_path)
param_1 = param_dict['PtbModel_0.w_1']
sgd.load(optimizer_dict)
.. code-block:: python
my_layer = layer(fluid.Layer)
param_path = "./my_paddle_model"
sgd = SGDOptimizer(learning_rate=1e-3)
param_dict, optimizer_dict = fluid.dygraph.load_persistables(my_layer.parameters(), param_path)
param_1 = param_dict['PtbModel_0.w_1']
sgd.load(optimizer_dict)
"""
return _load_var_from_file(dirname)
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册