未验证 提交 f7d1a940 编写于 作者: 张春乔 提交者: GitHub

[xdoctest] reformat example code with google style in No. 246 and 247 (#56475)

上级 17d6da6b
......@@ -56,29 +56,29 @@ def set_config(config=None):
Examples:
.. code-block:: python
import paddle
import json
# config is a dict.
config = {
"kernel": {
"enable": True,
"tuning_range": [1, 5],
},
"layout": {
"enable": True,
},
"dataloader": {
"enable": True,
}
}
paddle.incubate.autotune.set_config(config)
# config is the path of json file.
config_json = json.dumps(config)
with open('config.json', 'w') as json_file:
json_file.write(config_json)
paddle.incubate.autotune.set_config('config.json')
>>> import paddle
>>> import json
>>> # config is a dict.
>>> config = {
... "kernel": {
... "enable": True,
... "tuning_range": [1, 5],
... },
... "layout": {
... "enable": True,
... },
... "dataloader": {
... "enable": True,
... }
>>> }
>>> paddle.incubate.autotune.set_config(config)
>>> # config is the path of json file.
>>> config_json = json.dumps(config)
>>> with open('config.json', 'w') as json_file:
... json_file.write(config_json)
>>> paddle.incubate.autotune.set_config('config.json')
"""
if config is None:
......
......@@ -343,12 +343,13 @@ class DistributedOptimizer(metaclass=abc.ABCMeta):
Examples:
.. code-block:: python
loss = network()
optimizer = fluid.optimizer.SGD(learning_rate=0.1)
params_grads = optimizer.backward(loss)
# you may append operations for params_grads here
# ...
optimizer.apply_gradients(params_grads)
>>> # doctest: +SKIP('The network is not defined.')
>>> loss = network()
>>> optimizer = fluid.optimizer.SGD(learning_rate=0.1)
>>> params_grads = optimizer.backward(loss)
>>> # you may append operations for params_grads here
>>> # ...
>>> optimizer.apply_gradients(params_grads)
"""
pass
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册