From ed9d603a8ac5ff3ef7a3eae473a9c8841a7e9bfb Mon Sep 17 00:00:00 2001 From: lujun Date: Fri, 31 May 2019 10:02:39 +0800 Subject: [PATCH] fix api doc: Optimizer.ModelAverage (#17395) --- paddle/fluid/API.spec | 4 +-- python/paddle/dataset/mnist.py | 4 +-- python/paddle/fluid/optimizer.py | 50 +++++++++++++++++++++++++------- 3 files changed, 44 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 219274acf..49b7b50cf 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -500,13 +500,13 @@ paddle.fluid.optimizer.AdadeltaOptimizer.backward (ArgSpec(args=['self', 'loss', paddle.fluid.optimizer.AdadeltaOptimizer.get_opti_var_name_list (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.optimizer.AdadeltaOptimizer.minimize (ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'grad_clip'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'b15cffad0903fc81af77a0580ceb2a9b')) paddle.fluid.optimizer.ModelAverage.__init__ (ArgSpec(args=['self', 'average_window_rate', 'min_average_window', 'max_average_window', 'regularization', 'name'], varargs=None, keywords=None, defaults=(10000, 10000, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.optimizer.ModelAverage.apply (ArgSpec(args=['self', 'executor', 'need_restore'], varargs=None, keywords=None, defaults=(True,)), ('document', '46234a5470590feb336346f70a3db715')) +paddle.fluid.optimizer.ModelAverage.apply (ArgSpec(args=['self', 'executor', 'need_restore'], varargs=None, keywords=None, defaults=(True,)), ('document', '648010d0ac1fa707dac0b89f74b0e35c')) paddle.fluid.optimizer.ModelAverage.apply_gradients (ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')) paddle.fluid.optimizer.ModelAverage.apply_optimize (ArgSpec(args=['self', 'loss', 'startup_program', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', '5c46d1926a40f1f873ffe9f37ac89dae')) paddle.fluid.optimizer.ModelAverage.backward (ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f')) paddle.fluid.optimizer.ModelAverage.get_opti_var_name_list (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.optimizer.ModelAverage.minimize (ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'grad_clip'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'b15cffad0903fc81af77a0580ceb2a9b')) -paddle.fluid.optimizer.ModelAverage.restore (ArgSpec(args=['self', 'executor'], varargs=None, keywords=None, defaults=None), ('document', '18db9c70be9c4dd466f9844457b21bfe')) +paddle.fluid.optimizer.ModelAverage.restore (ArgSpec(args=['self', 'executor'], varargs=None, keywords=None, defaults=None), ('document', '5f14ea4adda2791e1c3b37ff327f6a83')) paddle.fluid.optimizer.LarsMomentumOptimizer.__init__ (ArgSpec(args=['self', 'learning_rate', 'momentum', 'lars_coeff', 'lars_weight_decay', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.001, 0.0005, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.optimizer.LarsMomentumOptimizer.apply_gradients (ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')) paddle.fluid.optimizer.LarsMomentumOptimizer.apply_optimize (ArgSpec(args=['self', 'loss', 'startup_program', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', '5c46d1926a40f1f873ffe9f37ac89dae')) diff --git a/python/paddle/dataset/mnist.py b/python/paddle/dataset/mnist.py index 847ca1872..ab0c62df2 100644 --- a/python/paddle/dataset/mnist.py +++ b/python/paddle/dataset/mnist.py @@ -90,7 +90,7 @@ def train(): MNIST training set creator. It returns a reader creator, each sample in the reader is image pixels in - [0, 1] and label in [0, 9]. + [-1, 1] and label in [0, 9]. :return: Training reader creator :rtype: callable @@ -107,7 +107,7 @@ def test(): MNIST test set creator. It returns a reader creator, each sample in the reader is image pixels in - [0, 1] and label in [0, 9]. + [-1, 1] and label in [0, 9]. :return: Test reader creator. :rtype: callable diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index f8c6683e3..c3d136c29 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -2145,22 +2145,45 @@ class ModelAverage(Optimizer): regularization: A Regularizer, such as fluid.regularizer.L2DecayRegularizer. name: A optional name prefix. + Examples: .. code-block:: python - optimizer = fluid.optimizer.Momentum() - optimizer.minimize(cost) - model_average = fluid.optimizer.ModelAverage(0.15, - min_average_window=10000, - max_average_window=20000) - for pass_id in range(args.pass_num): - for data in train_reader(): - exe.run(fluid.default_main_program()...) + import paddle.fluid as fluid + import numpy + + # First create the Executor. + place = fluid.CPUPlace() # fluid.CUDAPlace(0) + exe = fluid.Executor(place) + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + # build net + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1) + optimizer.minimize(loss) + + # build ModelAverage optimizer + model_average = fluid.optimizer.ModelAverage(0.15, + min_average_window=10000, + max_average_window=20000) + + exe.run(startup_program) + x = numpy.random.random(size=(10, 1)).astype('float32') + outs = exe.run(program=train_program, + feed={'X': x}, + fetch_list=[loss.name]) + + # apply ModelAverage with model_average.apply(exe): - for data in test_reader(): - exe.run(inference_program...) + x = numpy.random.random(size=(10, 1)).astype('float32') + exe.run(program=train_program, + feed={'X': x}, + fetch_list=[loss.name]) """ def __init__(self, @@ -2275,6 +2298,10 @@ class ModelAverage(Optimizer): @signature_safe_contextmanager def apply(self, executor, need_restore=True): """Apply average values to parameters of current model. + + Args: + executor(fluid.Executor): current executor. + need_restore(bool): If you finally need to do restore, set it to True. Default is True. """ executor.run(self.apply_program) try: @@ -2285,6 +2312,9 @@ class ModelAverage(Optimizer): def restore(self, executor): """Restore parameter values of current model. + + Args: + executor(fluid.Executor): current executor. """ executor.run(self.restore_program) -- GitLab