From 768dab441ec4fbb566c88860bfa9f8da10dea03a Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Mon, 23 Nov 2020 19:58:41 +0800 Subject: [PATCH] polish two api doc detail, test=document_fix (#28971) --- paddle/fluid/pybind/pybind.cc | 5 ++--- python/paddle/fluid/dygraph/io.py | 18 ++++-------------- 2 files changed, 6 insertions(+), 17 deletions(-) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 879748c7db..b2d1cac37e 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1714,8 +1714,7 @@ All parameter, weight, gradient are variables in Paddle. m.def("init_gflags", framework::InitGflags); m.def("init_glog", framework::InitGLOG); m.def("load_op_library", framework::LoadOpLib); - m.def("init_devices", - []() { framework::InitDevices(); }); + m.def("init_devices", []() { framework::InitDevices(); }); m.def("is_compiled_with_cuda", IsCompiledWithCUDA); m.def("is_compiled_with_xpu", IsCompiledWithXPU); @@ -2280,7 +2279,7 @@ All parameter, weight, gradient are variables in Paddle. "configured again.")); self.gradient_scale_ = strategy; }, - R"DOC((fluid.BuildStrategy.GradientScaleStrategy, optional): there are three + R"DOC((paddle.static.BuildStrategy.GradientScaleStrategy, optional): there are three ways of defining :math:`loss@grad` in ParallelExecutor, that is, CoeffNumDevice, One and Customized. By default, ParallelExecutor sets the :math:`loss@grad` according to the number of devices. If you want to customize :math:`loss@grad`, diff --git a/python/paddle/fluid/dygraph/io.py b/python/paddle/fluid/dygraph/io.py index c84e855d17..8797bbcf92 100644 --- a/python/paddle/fluid/dygraph/io.py +++ b/python/paddle/fluid/dygraph/io.py @@ -566,12 +566,12 @@ def _construct_params_and_buffers(model_path, class TranslatedLayer(layers.Layer): """ - TranslatedLayer is a imperative Layer for holding the model loaded by - :ref:`api_imperative_jit_load` . It can be used like a general Layer - object in eval or train mode. + TranslatedLayer is a ``paddle.nn.Layer`` for holding the model + loaded by :ref:`api_paddle_jit_load` . It can be used like a + general Layer object in eval or train mode. .. note: - The TranslatedLayer objects should not be created by constructor, it only can be loaded and constructed by :ref:`api_imperative_jit_load` . + The TranslatedLayer objects should not be created by constructor, it only can be loaded and constructed by :ref:`api_paddle_jit_load` . Examples: .. code-block:: python @@ -621,10 +621,6 @@ class TranslatedLayer(layers.Layer): print("Epoch {} batch {}: loss = {}".format( epoch_id, batch_id, np.mean(loss.numpy()))) - # enable dygraph mode - place = paddle.CPUPlace() - paddle.disable_static(place) - # 1. train & save model. # create network @@ -635,7 +631,6 @@ class TranslatedLayer(layers.Layer): # create data loader dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) loader = paddle.io.DataLoader(dataset, - places=place, batch_size=BATCH_SIZE, shuffle=True, drop_last=True, @@ -896,10 +891,6 @@ class TranslatedLayer(layers.Layer): print("Epoch {} batch {}: loss = {}".format( epoch_id, batch_id, np.mean(loss.numpy()))) - # enable dygraph mode - place = paddle.CPUPlace() - paddle.disable_static(place) - # create network layer = LinearNet() loss_fn = nn.CrossEntropyLoss() @@ -908,7 +899,6 @@ class TranslatedLayer(layers.Layer): # create data loader dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) loader = paddle.io.DataLoader(dataset, - places=place, batch_size=BATCH_SIZE, shuffle=True, drop_last=True, -- GitLab