未验证 提交 768dab44 编写于 作者: C Chen Weihang 提交者: GitHub

polish two api doc detail, test=document_fix (#28971)

上级 7c7cdf08
......@@ -1714,8 +1714,7 @@ All parameter, weight, gradient are variables in Paddle.
m.def("init_gflags", framework::InitGflags);
m.def("init_glog", framework::InitGLOG);
m.def("load_op_library", framework::LoadOpLib);
m.def("init_devices",
[]() { framework::InitDevices(); });
m.def("init_devices", []() { framework::InitDevices(); });
m.def("is_compiled_with_cuda", IsCompiledWithCUDA);
m.def("is_compiled_with_xpu", IsCompiledWithXPU);
......@@ -2280,7 +2279,7 @@ All parameter, weight, gradient are variables in Paddle.
"configured again."));
self.gradient_scale_ = strategy;
},
R"DOC((fluid.BuildStrategy.GradientScaleStrategy, optional): there are three
R"DOC((paddle.static.BuildStrategy.GradientScaleStrategy, optional): there are three
ways of defining :math:`loss@grad` in ParallelExecutor, that is, CoeffNumDevice,
One and Customized. By default, ParallelExecutor sets the :math:`loss@grad`
according to the number of devices. If you want to customize :math:`loss@grad`,
......
......@@ -566,12 +566,12 @@ def _construct_params_and_buffers(model_path,
class TranslatedLayer(layers.Layer):
"""
TranslatedLayer is a imperative Layer for holding the model loaded by
:ref:`api_imperative_jit_load` . It can be used like a general Layer
object in eval or train mode.
TranslatedLayer is a ``paddle.nn.Layer`` for holding the model
loaded by :ref:`api_paddle_jit_load` . It can be used like a
general Layer object in eval or train mode.
.. note:
The TranslatedLayer objects should not be created by constructor, it only can be loaded and constructed by :ref:`api_imperative_jit_load` .
The TranslatedLayer objects should not be created by constructor, it only can be loaded and constructed by :ref:`api_paddle_jit_load` .
Examples:
.. code-block:: python
......@@ -621,10 +621,6 @@ class TranslatedLayer(layers.Layer):
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
# enable dygraph mode
place = paddle.CPUPlace()
paddle.disable_static(place)
# 1. train & save model.
# create network
......@@ -635,7 +631,6 @@ class TranslatedLayer(layers.Layer):
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
......@@ -896,10 +891,6 @@ class TranslatedLayer(layers.Layer):
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
# enable dygraph mode
place = paddle.CPUPlace()
paddle.disable_static(place)
# create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
......@@ -908,7 +899,6 @@ class TranslatedLayer(layers.Layer):
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册