From 7c370e42f9923068a91625fe3c555a458cfb8b4f Mon Sep 17 00:00:00 2001 From: chengduo Date: Mon, 22 Apr 2019 20:39:46 +0800 Subject: [PATCH] Fix test_recurrent_op (#17001) * fix ramdom fail test=develop --- .../tests/unittests/test_recurrent_op.py | 10 +++++++-- .../tests/unittests/test_weight_decay.py | 21 ++++++++++--------- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_recurrent_op.py index cf86ebf0a8..6c35557906 100644 --- a/python/paddle/fluid/tests/unittests/test_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_recurrent_op.py @@ -182,7 +182,7 @@ class RecurrentOpTest1(unittest.TestCase): fetch_list=fetch_list, return_numpy=False) - def test_backward(self): + def test_backward(self, rtol=0.1): self.check_forward() with fluid.program_guard(self.main_program, self.startup_program): @@ -195,7 +195,10 @@ class RecurrentOpTest1(unittest.TestCase): self.assertEqual(num_grad[idx].shape, ana_grad[idx].shape) self.assertTrue( np.isclose( - num_grad[idx], ana_grad[idx], rtol=0.1).all()) + num_grad[idx], ana_grad[idx], rtol=rtol).all(), + "num_grad (" + name + ") has diff at " + str(self.place) + + "\nExpect " + str(num_grad[idx]) + "\n" + "But Got" + + str(ana_grad[idx]) + " in class " + self.__class__.__name__) def check_forward(self): pd_output = self.forward() @@ -287,6 +290,9 @@ class RecurrentOpTest2(RecurrentOpTest1): return rnn() + def test_backward(self): + super(RecurrentOpTest2, self).test_backward(rtol=0.2) + class RecurrentOpMultipleMemoryTest(RecurrentOpTest1): ''' diff --git a/python/paddle/fluid/tests/unittests/test_weight_decay.py b/python/paddle/fluid/tests/unittests/test_weight_decay.py index e5e7e76737..38db4ef193 100644 --- a/python/paddle/fluid/tests/unittests/test_weight_decay.py +++ b/python/paddle/fluid/tests/unittests/test_weight_decay.py @@ -95,7 +95,6 @@ class TestWeightDecay(unittest.TestCase): place, feed_list, loss, - use_cuda=True, use_reduce=False, use_fast_executor=False, use_ir_memory_optimize=False): @@ -136,11 +135,9 @@ class TestWeightDecay(unittest.TestCase): startup_prog = fluid.framework.Program() startup_prog.random_seed = 1 with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog): - data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) label = fluid.layers.data(name="label", shape=[1], dtype="int64") - avg_cost = model(data, label, len(self.word_dict)) param_list = [(var, var * self.learning_rate) @@ -148,7 +145,6 @@ class TestWeightDecay(unittest.TestCase): optimizer = fluid.optimizer.Adagrad( learning_rate=self.learning_rate) - optimizer.minimize(avg_cost) for params in param_list: @@ -158,10 +154,7 @@ class TestWeightDecay(unittest.TestCase): if use_parallel_exe: loss = self.run_parallel_exe( - place, [data, label], - loss=avg_cost, - use_cuda=True, - use_reduce=use_reduce) + place, [data, label], loss=avg_cost, use_reduce=use_reduce) else: loss = self.run_executor(place, [data, label], loss=avg_cost) @@ -176,13 +169,21 @@ class TestWeightDecay(unittest.TestCase): place, model, use_parallel_exe=True, use_reduce=False) for i in range(len(loss)): - assert np.isclose(a=loss[i], b=loss2[i], rtol=5e-5) + self.assertTrue( + np.isclose( + a=loss[i], b=loss2[i], rtol=5e-5), + "Expect " + str(loss[i]) + "\n" + "But Got" + str(loss2[i]) + + " in class " + self.__class__.__name__) loss3 = self.check_weight_decay( place, model, use_parallel_exe=True, use_reduce=True) for i in range(len(loss)): - assert np.isclose(a=loss[i], b=loss3[i], rtol=5e-5) + self.assertTrue( + np.isclose( + a=loss[i], b=loss3[i], rtol=5e-5), + "Expect " + str(loss[i]) + "\n" + "But Got" + str(loss2[i]) + + " in class " + self.__class__.__name__) if __name__ == '__main__': -- GitLab