diff --git a/python/paddle/incubate/autotune.py b/python/paddle/incubate/autotune.py index dfad1dc58c92819f8522035e146ca5a677461084..a4b9cbe7006bdaed00863a63961f4e0b815d9b2e 100644 --- a/python/paddle/incubate/autotune.py +++ b/python/paddle/incubate/autotune.py @@ -56,29 +56,29 @@ def set_config(config=None): Examples: .. code-block:: python - import paddle - import json - - # config is a dict. - config = { - "kernel": { - "enable": True, - "tuning_range": [1, 5], - }, - "layout": { - "enable": True, - }, - "dataloader": { - "enable": True, - } - } - paddle.incubate.autotune.set_config(config) - - # config is the path of json file. - config_json = json.dumps(config) - with open('config.json', 'w') as json_file: - json_file.write(config_json) - paddle.incubate.autotune.set_config('config.json') + >>> import paddle + >>> import json + + >>> # config is a dict. + >>> config = { + ... "kernel": { + ... "enable": True, + ... "tuning_range": [1, 5], + ... }, + ... "layout": { + ... "enable": True, + ... }, + ... "dataloader": { + ... "enable": True, + ... } + >>> } + >>> paddle.incubate.autotune.set_config(config) + + >>> # config is the path of json file. + >>> config_json = json.dumps(config) + >>> with open('config.json', 'w') as json_file: + ... json_file.write(config_json) + >>> paddle.incubate.autotune.set_config('config.json') """ if config is None: diff --git a/python/paddle/incubate/distributed/fleet/base.py b/python/paddle/incubate/distributed/fleet/base.py index a9eda099f7211fb22d5930bad7ce2e6236953cdf..81d071bf98226eaf76e2fee3d11986cd90784ded 100644 --- a/python/paddle/incubate/distributed/fleet/base.py +++ b/python/paddle/incubate/distributed/fleet/base.py @@ -343,12 +343,13 @@ class DistributedOptimizer(metaclass=abc.ABCMeta): Examples: .. code-block:: python - loss = network() - optimizer = fluid.optimizer.SGD(learning_rate=0.1) - params_grads = optimizer.backward(loss) - # you may append operations for params_grads here - # ... - optimizer.apply_gradients(params_grads) + >>> # doctest: +SKIP('The network is not defined.') + >>> loss = network() + >>> optimizer = fluid.optimizer.SGD(learning_rate=0.1) + >>> params_grads = optimizer.backward(loss) + >>> # you may append operations for params_grads here + >>> # ... + >>> optimizer.apply_gradients(params_grads) """ pass