diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py index d16e7a95a6825cbbc97bc859bfc3d1e3a2fd592a..530964a7314582ce0171d570fa99c4db70909c62 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py @@ -22,12 +22,15 @@ import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.layers as layers import time +import os from paddle.fluid import ParamAttr from paddle.fluid.contrib.layers import basic_lstm from paddle.fluid.executor import Executor from paddle.fluid.layers.control_flow import StaticRNN as PaddingRNN +os.environ["CPU_NUM"] = "1" + class RnnConfig(object): def __init__(self, model_type, rnn_model): diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py index 8cd7371159a8353b70b0337b4d1f0f2d5422c9dd..556f64bd48386fa178172b6187da2ced18ce4be9 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py @@ -14,6 +14,7 @@ from __future__ import print_function +import os import numpy as np import paddle.fluid as fluid import paddle.fluid.compiler as compiler @@ -21,10 +22,13 @@ import paddle.fluid.core as core import paddle.fluid.layers as layers import unittest +from paddle.fluid import ParamAttr from paddle.fluid.framework import Program, grad_var_name from paddle.fluid.executor import Executor from paddle.fluid.backward import append_backward +np.random.seed(123) +os.environ["CPU_NUM"] = "1" fluid.core._set_eager_deletion_mode(0.0, 1.0, True) @@ -71,8 +75,8 @@ class PySimpleRNN2(PyRNNBase): super(PySimpleRNN2, self).__init__(input_shape, output_shape) seq_len, batch_size, input_dim = input_shape - self.W = np.random.normal(size=(input_dim, input_dim)).astype("float32") - self.U = np.random.normal(size=(input_dim, input_dim)).astype("float32") + self.W = np.ones(shape=(input_dim, input_dim)).astype("float32") + self.U = np.zeros(shape=(input_dim, input_dim)).astype("float32") self.h_boot = np.ones(shape=(batch_size, input_dim)).astype("float32") men_dim = (seq_len, batch_size, input_dim) @@ -186,7 +190,7 @@ class EagerDeletionRecurrentOpTest1(unittest.TestCase): fetch_list=fetch_list, return_numpy=False) - def test_backward(self, rtol=0.1): + def test_backward(self, rtol=0.01): self.check_forward() with fluid.program_guard(self.main_program, self.startup_program): @@ -208,7 +212,7 @@ class EagerDeletionRecurrentOpTest1(unittest.TestCase): pd_output = self.forward() py_output = self.py_rnn.forward() self.assertEqual(pd_output.shape, py_output.shape) - self.assertTrue(np.isclose(pd_output, py_output, rtol=0.1).all()) + self.assertTrue(np.isclose(pd_output, py_output, rtol=0.01).all()) def get_numerical_gradient(self, delta=0.005): dloss_dout = 1.0 @@ -278,14 +282,20 @@ class EagerDeletionRecurrentOpTest2(EagerDeletionRecurrentOpTest1): h_pre = rnn.memory(init=h_boot) x_t = rnn.step_input(x) - temp_l = layers.fc(input=x_t, - size=self.input_dim, - param_attr='W', - bias_attr=False) - temp_r = layers.fc(input=h_pre, - size=self.input_dim, - param_attr='U', - bias_attr=False) + temp_l = layers.fc( + input=x_t, + size=self.input_dim, + param_attr=ParamAttr( + name='W', + initializer=fluid.initializer.ConstantInitializer(1.0)), + bias_attr=False) + temp_r = layers.fc( + input=h_pre, + size=self.input_dim, + param_attr=ParamAttr( + name='U', + initializer=fluid.initializer.ConstantInitializer(0.0)), + bias_attr=False) h = layers.sigmoid(x=layers.elementwise_add(x=temp_l, y=temp_r)) @@ -295,7 +305,7 @@ class EagerDeletionRecurrentOpTest2(EagerDeletionRecurrentOpTest1): return rnn() def test_backward(self): - super(EagerDeletionRecurrentOpTest2, self).test_backward(rtol=0.2) + super(EagerDeletionRecurrentOpTest2, self).test_backward(rtol=0.01) class EagerDeletionRecurrentOpMultipleMemoryTest(EagerDeletionRecurrentOpTest1): @@ -496,7 +506,6 @@ class EagerDeletionTwoRecurrentOpsTest(EagerDeletionRecurrentOpTest1): # Second RNN pre_mem = np.zeros_like(x) if step_id == 0 else self.mem_1[step_id - 1] - # print(np.sum(self.rnn_0_output)) self.mem_1[step_id] = x + np.sum(self.rnn_0_output) self.y[step_id] = self.mem_1[step_id] + pre_mem @@ -675,8 +684,8 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest( self.assertEqual(pd_output.shape, py_output.shape) self.assertTrue( np.isclose( - forward_only_output, py_output, rtol=0.1).all) - self.assertTrue(np.isclose(pd_output, py_output, rtol=0.1).all()) + forward_only_output, py_output, rtol=0.01).all) + self.assertTrue(np.isclose(pd_output, py_output, rtol=0.01).all()) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_recurrent_op.py index e2a65a99837e77f26576a75e7280770dc46db6eb..02a8f38514d6ed68be0d9a2d959c44158f061150 100644 --- a/python/paddle/fluid/tests/unittests/test_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_recurrent_op.py @@ -17,11 +17,13 @@ from __future__ import print_function import unittest import paddle.fluid as fluid import paddle.fluid.layers as layers +import numpy as np +import paddle.fluid.core as core + +from paddle.fluid import ParamAttr from paddle.fluid.framework import Program, grad_var_name from paddle.fluid.executor import Executor from paddle.fluid.backward import append_backward -import numpy as np -import paddle.fluid.core as core np.random.seed(123) @@ -69,8 +71,8 @@ class PySimpleRNN2(PyRNNBase): super(PySimpleRNN2, self).__init__(input_shape, output_shape) seq_len, batch_size, input_dim = input_shape - self.W = np.random.normal(size=(input_dim, input_dim)).astype("float32") - self.U = np.random.normal(size=(input_dim, input_dim)).astype("float32") + self.W = np.ones(shape=(input_dim, input_dim)).astype("float32") + self.U = np.zeros(shape=(input_dim, input_dim)).astype("float32") self.h_boot = np.ones(shape=(batch_size, input_dim)).astype("float32") men_dim = (seq_len, batch_size, input_dim) @@ -184,7 +186,7 @@ class RecurrentOpTest1(unittest.TestCase): fetch_list=fetch_list, return_numpy=False) - def test_backward(self, rtol=0.1): + def test_backward(self, rtol=0.01): self.check_forward() with fluid.program_guard(self.main_program, self.startup_program): @@ -206,7 +208,7 @@ class RecurrentOpTest1(unittest.TestCase): pd_output = self.forward() py_output = self.py_rnn.forward() self.assertEqual(pd_output.shape, py_output.shape) - self.assertTrue(np.isclose(pd_output, py_output, rtol=0.1).all()) + self.assertTrue(np.isclose(pd_output, py_output, rtol=0.01).all()) def get_numerical_gradient(self, delta=0.005): dloss_dout = 1.0 @@ -276,14 +278,20 @@ class RecurrentOpTest2(RecurrentOpTest1): h_pre = rnn.memory(init=h_boot) x_t = rnn.step_input(x) - temp_l = layers.fc(input=x_t, - size=self.input_dim, - param_attr='W', - bias_attr=False) - temp_r = layers.fc(input=h_pre, - size=self.input_dim, - param_attr='U', - bias_attr=False) + temp_l = layers.fc( + input=x_t, + size=self.input_dim, + param_attr=ParamAttr( + name='W', + initializer=fluid.initializer.ConstantInitializer(1.0)), + bias_attr=False) + temp_r = layers.fc( + input=h_pre, + size=self.input_dim, + param_attr=ParamAttr( + name='U', + initializer=fluid.initializer.ConstantInitializer(0.0)), + bias_attr=False) h = layers.sigmoid(x=layers.elementwise_add(x=temp_l, y=temp_r)) @@ -293,7 +301,7 @@ class RecurrentOpTest2(RecurrentOpTest1): return rnn() def test_backward(self): - super(RecurrentOpTest2, self).test_backward(rtol=0.2) + super(RecurrentOpTest2, self).test_backward(rtol=0.01) class RecurrentOpMultipleMemoryTest(RecurrentOpTest1):