From 93107ce138681b78c689c4e28440d4c50ff237d8 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 14 Mar 2018 10:25:08 +0800 Subject: [PATCH] add regularization for test_machine_tranlation --- python/paddle/fluid/regularizer.py | 1 - python/paddle/fluid/tests/book/test_machine_translation.py | 5 ++++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index dc641cdd1..029db7d2d 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -176,7 +176,6 @@ class L1DecayRegularizer(WeightDecayRegularizer): dtype="float32", shape=param.shape, lod_level=param.lod_level) if grad.type == core.VarDesc.VarType.SELECTED_ROWS: - # add concat_rows decay = block.create_var( dtype="float32", shape=param.shape, diff --git a/python/paddle/fluid/tests/book/test_machine_translation.py b/python/paddle/fluid/tests/book/test_machine_translation.py index caa9596a1..fa38bd376 100644 --- a/python/paddle/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/test_machine_translation.py @@ -181,7 +181,10 @@ def train_main(use_cuda, is_sparse, is_local=True): cost = pd.cross_entropy(input=rnn_out, label=label) avg_cost = pd.mean(cost) - optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) + optimizer = fluid.optimizer.Adagrad( + learning_rate=1e-4, + regularization=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.1)) optimize_ops, params_grads = optimizer.minimize(avg_cost) train_data = paddle.batch( -- GitLab