未验证 提交 6261076c 编写于 作者: W wanghuancoder 提交者: GitHub

Del old dygraph optest5 (#51686)

* delete old dygraph op test
上级 3d78e759
...@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus ...@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestCollectiveAllToAllSingle(TestMultipleGpus): class TestCollectiveAllToAllSingle(TestMultipleGpus):
def test_collective_alltoall_single(self): def test_collective_alltoall_single(self):
self.run_mnist_2gpu('collective_alltoall_single.py', eager_mode=True) self.run_mnist_2gpu('collective_alltoall_single.py')
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus ...@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestCollectiveBatchIsendIrecv(TestMultipleGpus): class TestCollectiveBatchIsendIrecv(TestMultipleGpus):
def test_collective_batch_isend_irecv(self): def test_collective_batch_isend_irecv(self):
self.run_mnist_2gpu('collective_batch_isend_irecv.py', eager_mode=True) self.run_mnist_2gpu('collective_batch_isend_irecv.py')
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus ...@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestCollectiveReduceScatter(TestMultipleGpus): class TestCollectiveReduceScatter(TestMultipleGpus):
def test_collective_reduce_scatter(self): def test_collective_reduce_scatter(self):
self.run_mnist_2gpu('collective_reduce_scatter.py', eager_mode=True) self.run_mnist_2gpu('collective_reduce_scatter.py')
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -45,10 +45,10 @@ class TestGatherOp(OpTest): ...@@ -45,10 +45,10 @@ class TestGatherOp(OpTest):
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
def config(self): def config(self):
""" """
...@@ -194,10 +194,10 @@ class TestGatherBF16Op(OpTest): ...@@ -194,10 +194,10 @@ class TestGatherBF16Op(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=0.5, check_eager=True) self.check_grad(['X'], 'Out', numeric_grad_delta=0.5)
def config(self): def config(self):
""" """
...@@ -223,10 +223,10 @@ class TestGatherOp1(OpTest): ...@@ -223,10 +223,10 @@ class TestGatherOp1(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def config(self): def config(self):
""" """
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
...@@ -36,7 +36,7 @@ class TestGatherTreeOp(OpTest): ...@@ -36,7 +36,7 @@ class TestGatherTreeOp(OpTest):
self.outputs = {'Out': self.backtrace(ids, parents)} self.outputs = {'Out': self.backtrace(ids, parents)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
@staticmethod @staticmethod
def backtrace(ids, parents): def backtrace(ids, parents):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -49,12 +49,10 @@ class TestGraphSendRecvMaxOp(OpTest): ...@@ -49,12 +49,10 @@ class TestGraphSendRecvMaxOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Out', user_defined_grads=[self.gradient])
['X'], 'Out', user_defined_grads=[self.gradient], check_eager=True
)
class TestGraphSendRecvMinOp(OpTest): class TestGraphSendRecvMinOp(OpTest):
...@@ -79,12 +77,10 @@ class TestGraphSendRecvMinOp(OpTest): ...@@ -79,12 +77,10 @@ class TestGraphSendRecvMinOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Out', user_defined_grads=[self.gradient])
['X'], 'Out', user_defined_grads=[self.gradient], check_eager=True
)
class TestGraphSendRecvSumOp(OpTest): class TestGraphSendRecvSumOp(OpTest):
...@@ -107,10 +103,10 @@ class TestGraphSendRecvSumOp(OpTest): ...@@ -107,10 +103,10 @@ class TestGraphSendRecvSumOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestGraphSendRecvMeanOp(OpTest): class TestGraphSendRecvMeanOp(OpTest):
...@@ -135,10 +131,10 @@ class TestGraphSendRecvMeanOp(OpTest): ...@@ -135,10 +131,10 @@ class TestGraphSendRecvMeanOp(OpTest):
self.outputs = {'Out': out, 'Dst_count': dst_count} self.outputs = {'Out': out, 'Dst_count': dst_count}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def compute_graph_send_recv_for_sum_mean(inputs, attributes): def compute_graph_send_recv_for_sum_mean(inputs, attributes):
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -314,10 +314,10 @@ class TestGraphSendUERecvSumOp(OpTest): ...@@ -314,10 +314,10 @@ class TestGraphSendUERecvSumOp(OpTest):
self.message_op = 'ADD' self.message_op = 'ADD'
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
class TestSumCase1(TestGraphSendUERecvSumOp): class TestSumCase1(TestGraphSendUERecvSumOp):
...@@ -420,10 +420,10 @@ class TestGraphSendUERecvMeanOp(OpTest): ...@@ -420,10 +420,10 @@ class TestGraphSendUERecvMeanOp(OpTest):
self.message_op = 'ADD' self.message_op = 'ADD'
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
class TestMeanCase1(TestGraphSendUERecvMeanOp): class TestMeanCase1(TestGraphSendUERecvMeanOp):
...@@ -526,14 +526,13 @@ class TestGraphSendUERecvMaxOp(OpTest): ...@@ -526,14 +526,13 @@ class TestGraphSendUERecvMaxOp(OpTest):
self.message_op = 'ADD' self.message_op = 'ADD'
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X', 'Y'], ['X', 'Y'],
'Out', 'Out',
user_defined_grads=self.gradients, user_defined_grads=self.gradients,
check_eager=True,
) )
...@@ -637,14 +636,13 @@ class TestGraphSendUERecvMinOp(OpTest): ...@@ -637,14 +636,13 @@ class TestGraphSendUERecvMinOp(OpTest):
self.message_op = 'ADD' self.message_op = 'ADD'
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X', 'Y'], ['X', 'Y'],
'Out', 'Out',
user_defined_grads=self.gradients, user_defined_grads=self.gradients,
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -63,10 +63,10 @@ class TestGraphSendUVOp(OpTest): ...@@ -63,10 +63,10 @@ class TestGraphSendUVOp(OpTest):
self.outputs = {'out': out} self.outputs = {'out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x', 'y'], 'out', check_eager=True) self.check_grad(['x', 'y'], 'out')
def set_config(self): def set_config(self):
self.x = np.random.random((10, 20)).astype("float64") self.x = np.random.random((10, 20)).astype("float64")
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, skip_check_grad_ci
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -379,7 +379,7 @@ class TestGridSamplerOp(OpTest): ...@@ -379,7 +379,7 @@ class TestGridSamplerOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
...@@ -387,7 +387,6 @@ class TestGridSamplerOp(OpTest): ...@@ -387,7 +387,6 @@ class TestGridSamplerOp(OpTest):
'Output', 'Output',
max_relative_error=0.01, max_relative_error=0.01,
numeric_grad_delta=self.numeric_grad_delta, numeric_grad_delta=self.numeric_grad_delta,
check_eager=True,
) )
def initTestCase(self): def initTestCase(self):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestHingeLossOp(OpTest): class TestHingeLossOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -154,7 +154,7 @@ class TestHistogramOp(OpTest): ...@@ -154,7 +154,7 @@ class TestHistogramOp(OpTest):
self.attrs = {"bins": self.bins, "min": self.min, "max": self.max} self.attrs = {"bins": self.bins, "min": self.min, "max": self.max}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestHistogramOp_ZeroDim(TestHistogramOp): class TestHistogramOp_ZeroDim(TestHistogramOp):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, skip_check_grad_ci
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -219,14 +219,13 @@ class TestHSigmoidOp(OpTest): ...@@ -219,14 +219,13 @@ class TestHSigmoidOp(OpTest):
self.user_grads = hsigmoid_grad(x, w, label, bias, num_classes) self.user_grads = hsigmoid_grad(x, w, label, bias, num_classes)
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X', 'W', 'Bias'], ['X', 'W', 'Bias'],
['Out'], ['Out'],
user_defined_grads=self.user_grads, user_defined_grads=self.user_grads,
check_eager=True,
) )
...@@ -280,7 +279,7 @@ class TestHSigmoidOpSparse(OpTest): ...@@ -280,7 +279,7 @@ class TestHSigmoidOpSparse(OpTest):
self.outputs = {'PreOut': pre_output, 'Out': out} self.outputs = {'PreOut': pre_output, 'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestHSigmoidOpWithSparseGrad(unittest.TestCase): class TestHSigmoidOpWithSparseGrad(unittest.TestCase):
...@@ -416,14 +415,13 @@ class TestHSigmoidOpWithCostumTree(OpTest): ...@@ -416,14 +415,13 @@ class TestHSigmoidOpWithCostumTree(OpTest):
self.outputs = {'PreOut': pre_output, 'Out': out} self.outputs = {'PreOut': pre_output, 'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['Bias', 'X', 'W'], ['Bias', 'X', 'W'],
['Out'], ['Out'],
no_grad_set=set('Label'), no_grad_set=set('Label'),
check_eager=True,
) )
...@@ -482,12 +480,10 @@ class TestHSigmoidOpWithCostumTreeWithoutBias(OpTest): ...@@ -482,12 +480,10 @@ class TestHSigmoidOpWithCostumTreeWithoutBias(OpTest):
self.outputs = {'PreOut': pre_output, 'Out': out} self.outputs = {'PreOut': pre_output, 'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X', 'W'], ['Out'], no_grad_set=set('Label'))
['X', 'W'], ['Out'], no_grad_set=set('Label'), check_eager=True
)
class TestHSigmoidLossAPI(unittest.TestCase): class TestHSigmoidLossAPI(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -48,12 +48,12 @@ class TestIdentityLossOp(OpTest): ...@@ -48,12 +48,12 @@ class TestIdentityLossOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
paddle.disable_static() paddle.disable_static()
def test_check_grad_normal(self): def test_check_grad_normal(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
paddle.disable_static() paddle.disable_static()
def initTestCase(self): def initTestCase(self):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid import Program from paddle.fluid import Program
...@@ -93,10 +93,10 @@ class TestIndexAddOp(OpTest): ...@@ -93,10 +93,10 @@ class TestIndexAddOp(OpTest):
self.add_value_shape = (3, 3) self.add_value_shape = (3, 3)
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True, atol=1e-2) self.check_output(atol=1e-2)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'AddValue'], 'Out', check_eager=True) self.check_grad(['X', 'AddValue'], 'Out')
class TestIndexAddAPI(unittest.TestCase): class TestIndexAddAPI(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -40,10 +40,10 @@ class TestIndexSampleOp(OpTest): ...@@ -40,10 +40,10 @@ class TestIndexSampleOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def config(self): def config(self):
""" """
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -40,10 +40,10 @@ class TestInverseOp(OpTest): ...@@ -40,10 +40,10 @@ class TestInverseOp(OpTest):
self.outputs = {'Output': inverse} self.outputs = {'Output': inverse}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_grad(self): def test_grad(self):
self.check_grad(['Input'], 'Output', check_eager=True) self.check_grad(['Input'], 'Output')
class TestInverseOpBatched(TestInverseOp): class TestInverseOpBatched(TestInverseOp):
...@@ -60,9 +60,7 @@ class TestInverseOpLarge(TestInverseOp): ...@@ -60,9 +60,7 @@ class TestInverseOpLarge(TestInverseOp):
self.python_api = paddle.tensor.math.inverse self.python_api = paddle.tensor.math.inverse
def test_grad(self): def test_grad(self):
self.check_grad( self.check_grad(['Input'], 'Output', max_relative_error=1e-6)
['Input'], 'Output', max_relative_error=1e-6, check_eager=True
)
class TestInverseOpFP32(TestInverseOp): class TestInverseOpFP32(TestInverseOp):
...@@ -72,9 +70,7 @@ class TestInverseOpFP32(TestInverseOp): ...@@ -72,9 +70,7 @@ class TestInverseOpFP32(TestInverseOp):
self.python_api = paddle.tensor.math.inverse self.python_api = paddle.tensor.math.inverse
def test_grad(self): def test_grad(self):
self.check_grad( self.check_grad(['Input'], 'Output', max_relative_error=1e-2)
['Input'], 'Output', max_relative_error=1e-2, check_eager=True
)
class TestInverseOpBatchedFP32(TestInverseOpFP32): class TestInverseOpBatchedFP32(TestInverseOpFP32):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy.random as random import numpy.random as random
from op_test import OpTest from eager_op_test import OpTest
class TestIOUSimilarityOp(OpTest): class TestIOUSimilarityOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -56,7 +56,7 @@ class TestIscloseOp(OpTest): ...@@ -56,7 +56,7 @@ class TestIscloseOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestIscloseOpException(TestIscloseOp): class TestIscloseOpException(TestIscloseOp):
...@@ -64,28 +64,28 @@ class TestIscloseOpException(TestIscloseOp): ...@@ -64,28 +64,28 @@ class TestIscloseOpException(TestIscloseOp):
def test_rtol_num(): def test_rtol_num():
self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64") self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08]).astype("float64") self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output(check_eager=True) self.check_output()
self.assertRaises(ValueError, test_rtol_num) self.assertRaises(ValueError, test_rtol_num)
def test_rtol_type(): def test_rtol_type():
self.inputs['Rtol'] = np.array([5]).astype("int32") self.inputs['Rtol'] = np.array([5]).astype("int32")
self.inputs['Atol'] = np.array([1e-08]).astype("float64") self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output(check_eager=True) self.check_output()
self.assertRaises(ValueError, test_rtol_type) self.assertRaises(ValueError, test_rtol_type)
def test_atol_num(): def test_atol_num():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64") self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08, 1e-08]).astype("float64") self.inputs['Atol'] = np.array([1e-08, 1e-08]).astype("float64")
self.check_output(check_eager=True) self.check_output()
self.assertRaises(ValueError, test_atol_num) self.assertRaises(ValueError, test_atol_num)
def test_atol_type(): def test_atol_type():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64") self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([8]).astype("int32") self.inputs['Atol'] = np.array([8]).astype("int32")
self.check_output(check_eager=True) self.check_output()
self.assertRaises(ValueError, test_atol_type) self.assertRaises(ValueError, test_atol_type)
...@@ -239,7 +239,7 @@ class TestIscloseOpFloat16(TestIscloseOp): ...@@ -239,7 +239,7 @@ class TestIscloseOpFloat16(TestIscloseOp):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
class TestIscloseOpFloat32(TestIscloseOp): class TestIscloseOpFloat32(TestIscloseOp):
...@@ -260,7 +260,7 @@ class TestIscloseOpFloat64(TestIscloseOp): ...@@ -260,7 +260,7 @@ class TestIscloseOpFloat64(TestIscloseOp):
self.equal_nan = False self.equal_nan = False
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestIscloseOpLargeDimInput(TestIscloseOp): class TestIscloseOpLargeDimInput(TestIscloseOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid.core as core import paddle.fluid.core as core
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
from paddle.nn.functional import kl_div from paddle.nn.functional import kl_div
...@@ -55,12 +55,10 @@ class TestKLDivLossOp(OpTest): ...@@ -55,12 +55,10 @@ class TestKLDivLossOp(OpTest):
self.outputs = {'Loss': loss.astype('float64')} self.outputs = {'Loss': loss.astype('float64')}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Loss', no_grad_set=set(["Target"]))
['X'], 'Loss', no_grad_set=set(["Target"]), check_eager=True
)
def initTestCase(self): def initTestCase(self):
self.x_shape = (4, 5, 5) self.x_shape = (4, 5, 5)
...@@ -114,12 +112,13 @@ class TestKLDivLossDygraph(unittest.TestCase): ...@@ -114,12 +112,13 @@ class TestKLDivLossDygraph(unittest.TestCase):
self.run_kl_loss('none') self.run_kl_loss('none')
def test_kl_loss_static_api(self): def test_kl_loss_static_api(self):
input = paddle.static.data(name='input', shape=[5, 20]) with paddle_static_guard():
label = paddle.static.data(name='label', shape=[5, 20]) input = paddle.static.data(name='input', shape=[5, 20])
label = paddle.static.data(name='label', shape=[5, 20])
paddle.nn.functional.kl_div(input, label) paddle.nn.functional.kl_div(input, label)
paddle.nn.functional.kl_div(input, label, 'sum') paddle.nn.functional.kl_div(input, label, 'sum')
paddle.nn.functional.kl_div(input, label, 'batchmean') paddle.nn.functional.kl_div(input, label, 'batchmean')
class TestKLDivLossTypePromotion(unittest.TestCase): class TestKLDivLossTypePromotion(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -37,16 +37,16 @@ class TestKronOp(OpTest): ...@@ -37,16 +37,16 @@ class TestKronOp(OpTest):
return "float64" return "float64"
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ignore_x(self): def test_check_grad_ignore_x(self):
self.check_grad(['Y'], 'Out', no_grad_set=set('X'), check_eager=True) self.check_grad(['Y'], 'Out', no_grad_set=set('X'))
def test_check_grad_ignore_y(self): def test_check_grad_ignore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y'), check_eager=True) self.check_grad(['X'], 'Out', no_grad_set=set('Y'))
class TestKronOp2(TestKronOp): class TestKronOp2(TestKronOp):
...@@ -168,7 +168,7 @@ class TestComplexKronOp(OpTest): ...@@ -168,7 +168,7 @@ class TestComplexKronOp(OpTest):
return grad_y return grad_y
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
...@@ -176,7 +176,6 @@ class TestComplexKronOp(OpTest): ...@@ -176,7 +176,6 @@ class TestComplexKronOp(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_x, self.grad_y], user_defined_grads=[self.grad_x, self.grad_y],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True,
) )
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
...@@ -186,7 +185,6 @@ class TestComplexKronOp(OpTest): ...@@ -186,7 +185,6 @@ class TestComplexKronOp(OpTest):
no_grad_set=set("X"), no_grad_set=set("X"),
user_defined_grads=[self.grad_y], user_defined_grads=[self.grad_y],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -196,7 +194,6 @@ class TestComplexKronOp(OpTest): ...@@ -196,7 +194,6 @@ class TestComplexKronOp(OpTest):
no_grad_set=set('Y'), no_grad_set=set('Y'),
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -54,11 +54,11 @@ class TestKthvalueOp(OpTest): ...@@ -54,11 +54,11 @@ class TestKthvalueOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True) self.check_grad(set(['X']), 'Out')
class TestKthvalueOpWithKeepdim(OpTest): class TestKthvalueOpWithKeepdim(OpTest):
...@@ -81,11 +81,11 @@ class TestKthvalueOpWithKeepdim(OpTest): ...@@ -81,11 +81,11 @@ class TestKthvalueOpWithKeepdim(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True) self.check_grad(set(['X']), 'Out')
class TestKthvalueOpKernels(unittest.TestCase): class TestKthvalueOpKernels(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestL1NormOp(OpTest): class TestL1NormOp(OpTest):
......
...@@ -17,7 +17,7 @@ from functools import reduce ...@@ -17,7 +17,7 @@ from functools import reduce
from operator import mul from operator import mul
import numpy as np import numpy as np
from op_test import _set_use_system_allocator from eager_op_test import _set_use_system_allocator
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -43,10 +43,10 @@ class TestLerp(OpTest): ...@@ -43,10 +43,10 @@ class TestLerp(OpTest):
self.shape = [100] self.shape = [100]
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
class TestLerpWithDim2(TestLerp): class TestLerpWithDim2(TestLerp):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from scipy import special from scipy import special
import paddle import paddle
...@@ -42,10 +42,10 @@ class TestLgammaOp(OpTest): ...@@ -42,10 +42,10 @@ class TestLgammaOp(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=1e-7, check_eager=True) self.check_grad(['X'], 'Out', numeric_grad_delta=1e-7)
class TestLgammaOpFp32(TestLgammaOp): class TestLgammaOpFp32(TestLgammaOp):
...@@ -53,9 +53,7 @@ class TestLgammaOpFp32(TestLgammaOp): ...@@ -53,9 +53,7 @@ class TestLgammaOpFp32(TestLgammaOp):
self.dtype = np.float32 self.dtype = np.float32
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(['X'], 'Out', numeric_grad_delta=0.005)
['X'], 'Out', numeric_grad_delta=0.005, check_eager=True
)
class TestLgammaOpApi(unittest.TestCase): class TestLgammaOpApi(unittest.TestCase):
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class LinearChainCrfForward: class LinearChainCrfForward:
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -37,7 +37,7 @@ class TestLinspaceOpCommonCase(OpTest): ...@@ -37,7 +37,7 @@ class TestLinspaceOpCommonCase(OpTest):
self.outputs = {'Out': np.arange(0, 11).astype(dtype)} self.outputs = {'Out': np.arange(0, 11).astype(dtype)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestLinspaceOpReverseCase(OpTest): class TestLinspaceOpReverseCase(OpTest):
...@@ -55,7 +55,7 @@ class TestLinspaceOpReverseCase(OpTest): ...@@ -55,7 +55,7 @@ class TestLinspaceOpReverseCase(OpTest):
self.outputs = {'Out': np.arange(10, -1, -1).astype(dtype)} self.outputs = {'Out': np.arange(10, -1, -1).astype(dtype)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestLinspaceOpNumOneCase(OpTest): class TestLinspaceOpNumOneCase(OpTest):
...@@ -73,56 +73,55 @@ class TestLinspaceOpNumOneCase(OpTest): ...@@ -73,56 +73,55 @@ class TestLinspaceOpNumOneCase(OpTest):
self.outputs = {'Out': np.array(10, dtype=dtype)} self.outputs = {'Out': np.array(10, dtype=dtype)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestLinspaceAPI(unittest.TestCase): class TestLinspaceAPI(unittest.TestCase):
def test_variable_input1(self): def test_variable_input1(self):
start = paddle.full(shape=[1], fill_value=0, dtype='float32') with paddle_static_guard():
stop = paddle.full(shape=[1], fill_value=10, dtype='float32') start = paddle.full(shape=[1], fill_value=0, dtype='float32')
num = paddle.full(shape=[1], fill_value=5, dtype='int32') stop = paddle.full(shape=[1], fill_value=10, dtype='float32')
out = paddle.linspace(start, stop, num, dtype='float32') num = paddle.full(shape=[1], fill_value=5, dtype='int32')
exe = fluid.Executor(place=fluid.CPUPlace()) out = paddle.linspace(start, stop, num, dtype='float32')
res = exe.run(fluid.default_main_program(), fetch_list=[out]) exe = fluid.Executor(place=fluid.CPUPlace())
np_res = np.linspace(0, 10, 5, dtype='float32') res = exe.run(fluid.default_main_program(), fetch_list=[out])
self.assertEqual((res == np_res).all(), True) np_res = np.linspace(0, 10, 5, dtype='float32')
self.assertEqual((res == np_res).all(), True)
def test_variable_input2(self): def test_variable_input2(self):
paddle.disable_static()
start = paddle.full(shape=[1], fill_value=0, dtype='float32') start = paddle.full(shape=[1], fill_value=0, dtype='float32')
stop = paddle.full(shape=[1], fill_value=10, dtype='float32') stop = paddle.full(shape=[1], fill_value=10, dtype='float32')
num = paddle.full(shape=[1], fill_value=5, dtype='int32') num = paddle.full(shape=[1], fill_value=5, dtype='int32')
out = paddle.linspace(start, stop, num, dtype='float32') out = paddle.linspace(start, stop, num, dtype='float32')
np_res = np.linspace(0, 10, 5, dtype='float32') np_res = np.linspace(0, 10, 5, dtype='float32')
self.assertEqual((out.numpy() == np_res).all(), True) self.assertEqual((out.numpy() == np_res).all(), True)
paddle.enable_static()
def test_dtype(self): def test_dtype(self):
out_1 = paddle.linspace(0, 10, 5, dtype='float32') with paddle_static_guard():
out_2 = paddle.linspace(0, 10, 5, dtype=np.float32) out_1 = paddle.linspace(0, 10, 5, dtype='float32')
out_3 = paddle.linspace(0, 10, 5, dtype=core.VarDesc.VarType.FP32) out_2 = paddle.linspace(0, 10, 5, dtype=np.float32)
exe = fluid.Executor(place=fluid.CPUPlace()) out_3 = paddle.linspace(0, 10, 5, dtype=core.VarDesc.VarType.FP32)
res_1, res_2, res_3 = exe.run( exe = fluid.Executor(place=fluid.CPUPlace())
fluid.default_main_program(), fetch_list=[out_1, out_2, out_3] res_1, res_2, res_3 = exe.run(
) fluid.default_main_program(), fetch_list=[out_1, out_2, out_3]
assert np.array_equal(res_1, res_2) )
assert np.array_equal(res_1, res_2)
def test_name(self): def test_name(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle_static_guard():
out = paddle.linspace( with paddle.static.program_guard(paddle.static.Program()):
0, 10, 5, dtype='float32', name='linspace_res' out = paddle.linspace(
) 0, 10, 5, dtype='float32', name='linspace_res'
assert 'linspace_res' in out.name )
assert 'linspace_res' in out.name
def test_imperative(self): def test_imperative(self):
paddle.disable_static()
out1 = paddle.linspace(0, 10, 5, dtype='float32') out1 = paddle.linspace(0, 10, 5, dtype='float32')
np_out1 = np.linspace(0, 10, 5, dtype='float32') np_out1 = np.linspace(0, 10, 5, dtype='float32')
out2 = paddle.linspace(0, 10, 5, dtype='int32') out2 = paddle.linspace(0, 10, 5, dtype='int32')
np_out2 = np.linspace(0, 10, 5, dtype='int32') np_out2 = np.linspace(0, 10, 5, dtype='int32')
out3 = paddle.linspace(0, 10, 200, dtype='int32') out3 = paddle.linspace(0, 10, 200, dtype='int32')
np_out3 = np.linspace(0, 10, 200, dtype='int32') np_out3 = np.linspace(0, 10, 200, dtype='int32')
paddle.enable_static()
self.assertEqual((out1.numpy() == np_out1).all(), True) self.assertEqual((out1.numpy() == np_out1).all(), True)
self.assertEqual((out2.numpy() == np_out2).all(), True) self.assertEqual((out2.numpy() == np_out2).all(), True)
self.assertEqual((out3.numpy() == np_out3).all(), True) self.assertEqual((out3.numpy() == np_out3).all(), True)
...@@ -130,52 +129,57 @@ class TestLinspaceAPI(unittest.TestCase): ...@@ -130,52 +129,57 @@ class TestLinspaceAPI(unittest.TestCase):
class TestLinspaceOpError(unittest.TestCase): class TestLinspaceOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with paddle_static_guard():
with program_guard(Program(), Program()):
def test_dtype(): def test_dtype():
paddle.linspace(0, 10, 1, dtype="int8") paddle.linspace(0, 10, 1, dtype="int8")
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
def test_dtype1(): def test_dtype1():
paddle.linspace(0, 10, 1.33, dtype="int32") paddle.linspace(0, 10, 1.33, dtype="int32")
self.assertRaises(TypeError, test_dtype1) self.assertRaises(TypeError, test_dtype1)
def test_start_type(): def test_start_type():
paddle.linspace([0], 10, 1, dtype="float32") paddle.linspace([0], 10, 1, dtype="float32")
self.assertRaises(TypeError, test_start_type) self.assertRaises(TypeError, test_start_type)
def test_end_type(): def test_end_type():
paddle.linspace(0, [10], 1, dtype="float32") paddle.linspace(0, [10], 1, dtype="float32")
self.assertRaises(TypeError, test_end_type) self.assertRaises(TypeError, test_end_type)
def test_step_dtype(): def test_step_dtype():
paddle.linspace(0, 10, [0], dtype="float32") paddle.linspace(0, 10, [0], dtype="float32")
self.assertRaises(TypeError, test_step_dtype) self.assertRaises(TypeError, test_step_dtype)
def test_start_dtype(): def test_start_dtype():
start = paddle.static.data( start = paddle.static.data(
shape=[1], dtype="float64", name="start" shape=[1], dtype="float64", name="start"
) )
paddle.linspace(start, 10, 1, dtype="float32") paddle.linspace(start, 10, 1, dtype="float32")
self.assertRaises(ValueError, test_start_dtype) self.assertRaises(ValueError, test_start_dtype)
def test_end_dtype(): def test_end_dtype():
end = paddle.static.data(shape=[1], dtype="float64", name="end") end = paddle.static.data(
paddle.linspace(0, end, 1, dtype="float32") shape=[1], dtype="float64", name="end"
)
paddle.linspace(0, end, 1, dtype="float32")
self.assertRaises(ValueError, test_end_dtype) self.assertRaises(ValueError, test_end_dtype)
def test_num_dtype(): def test_num_dtype():
num = paddle.static.data(shape=[1], dtype="int32", name="step") num = paddle.static.data(
paddle.linspace(0, 10, num, dtype="float32") shape=[1], dtype="int32", name="step"
)
paddle.linspace(0, 10, num, dtype="float32")
self.assertRaises(TypeError, test_step_dtype) self.assertRaises(TypeError, test_step_dtype)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,7 +16,7 @@ import copy ...@@ -16,7 +16,7 @@ import copy
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_multiclass_nms_op import iou from test_multiclass_nms_op import iou
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestLodResetOpByAttr(OpTest): class TestLodResetOpByAttr(OpTest):
......
...@@ -63,12 +63,10 @@ class TestLogSoftmaxOp(OpTest): ...@@ -63,12 +63,10 @@ class TestLogSoftmaxOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad])
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True
)
class TestLogSoftmaxOp_ZeroDim(TestLogSoftmaxOp): class TestLogSoftmaxOp_ZeroDim(TestLogSoftmaxOp):
...@@ -85,10 +83,10 @@ class TestLogSoftmaxOp_ZeroDim(TestLogSoftmaxOp): ...@@ -85,10 +83,10 @@ class TestLogSoftmaxOp_ZeroDim(TestLogSoftmaxOp):
self.attrs = {'axis': -1} self.attrs = {'axis': -1}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], ['Out'], check_eager=True) self.check_grad(['X'], ['Out'])
class TestLogSoftmaxShape(TestLogSoftmaxOp): class TestLogSoftmaxShape(TestLogSoftmaxOp):
...@@ -122,7 +120,7 @@ class TestLogSoftmaxBF16Op(OpTest): ...@@ -122,7 +120,7 @@ class TestLogSoftmaxBF16Op(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -131,7 +129,6 @@ class TestLogSoftmaxBF16Op(OpTest): ...@@ -131,7 +129,6 @@ class TestLogSoftmaxBF16Op(OpTest):
['X'], ['X'],
['Out'], ['Out'],
user_defined_grads=[self.x_grad], user_defined_grads=[self.x_grad],
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -59,12 +59,10 @@ class TestLogitOp(OpTest): ...@@ -59,12 +59,10 @@ class TestLogitOp(OpTest):
self.eps = 1e-8 self.eps = 1e-8
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad])
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True
)
class TestLogitOpFp32(TestLogitOp): class TestLogitOpFp32(TestLogitOp):
...@@ -74,12 +72,10 @@ class TestLogitOpFp32(TestLogitOp): ...@@ -74,12 +72,10 @@ class TestLogitOpFp32(TestLogitOp):
self.eps = 1e-8 self.eps = 1e-8
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad])
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True
)
class TestLogitOpFp16(TestLogitOp): class TestLogitOpFp16(TestLogitOp):
...@@ -89,12 +85,10 @@ class TestLogitOpFp16(TestLogitOp): ...@@ -89,12 +85,10 @@ class TestLogitOpFp16(TestLogitOp):
self.eps = 1e-8 self.eps = 1e-8
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad])
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True
)
@unittest.skipIf( @unittest.skipIf(
...@@ -122,7 +116,7 @@ class TestLogitOpBf16(OpTest): ...@@ -122,7 +116,7 @@ class TestLogitOpBf16(OpTest):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
...@@ -132,7 +126,6 @@ class TestLogitOpBf16(OpTest): ...@@ -132,7 +126,6 @@ class TestLogitOpBf16(OpTest):
['X'], ['X'],
['Out'], ['Out'],
user_defined_grads=[self.x_grad], user_defined_grads=[self.x_grad],
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -87,7 +87,7 @@ class TestLogsumexp(OpTest): ...@@ -87,7 +87,7 @@ class TestLogsumexp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -95,7 +95,6 @@ class TestLogsumexp(OpTest): ...@@ -95,7 +95,6 @@ class TestLogsumexp(OpTest):
['Out'], ['Out'],
user_defined_grads=self.user_defined_grads, user_defined_grads=self.user_defined_grads,
user_defined_grad_outputs=self.user_defined_grad_outputs, user_defined_grad_outputs=self.user_defined_grad_outputs,
check_eager=True,
) )
def calc_grad(self): def calc_grad(self):
......
...@@ -16,7 +16,7 @@ import struct ...@@ -16,7 +16,7 @@ import struct
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestLookupTableDequantOp(OpTest): class TestLookupTableDequantOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -116,94 +116,104 @@ class TestLocalResponseNormFAPI(unittest.TestCase): ...@@ -116,94 +116,104 @@ class TestLocalResponseNormFAPI(unittest.TestCase):
self.places.append(fluid.CUDAPlace(0)) self.places.append(fluid.CUDAPlace(0))
def check_static_3d_input(self, place): def check_static_3d_input(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()): with paddle_static_guard():
in_np1 = np.random.random([3, 40, 40]).astype("float32") with fluid.program_guard(fluid.Program(), fluid.Program()):
in_np2 = np.transpose(in_np1, (0, 2, 1)) in_np1 = np.random.random([3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 1))
input1 = paddle.static.data( input1 = paddle.static.data(
name="input1", shape=[3, 40, 40], dtype="float32" name="input1", shape=[3, 40, 40], dtype="float32"
) )
input2 = paddle.static.data( input2 = paddle.static.data(
name="input2", shape=[3, 40, 40], dtype="float32" name="input2", shape=[3, 40, 40], dtype="float32"
) )
res1 = paddle.nn.functional.local_response_norm( res1 = paddle.nn.functional.local_response_norm(
x=input1, size=5, data_format='NCL' x=input1, size=5, data_format='NCL'
) )
res2 = paddle.nn.functional.local_response_norm( res2 = paddle.nn.functional.local_response_norm(
x=input2, size=5, data_format='NLC' x=input2, size=5, data_format='NLC'
) )
exe = fluid.Executor(place) exe = fluid.Executor(place)
fetches = exe.run( fetches = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
feed={"input1": in_np1, "input2": in_np2}, feed={"input1": in_np1, "input2": in_np2},
fetch_list=[res1, res2], fetch_list=[res1, res2],
) )
fetches1_tran = np.transpose(fetches[1], (0, 2, 1)) fetches1_tran = np.transpose(fetches[1], (0, 2, 1))
np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05) np.testing.assert_allclose(
fetches[0], fetches1_tran, rtol=1e-05
)
def check_static_4d_input(self, place): def check_static_4d_input(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()): with paddle_static_guard():
input1 = paddle.static.data( with fluid.program_guard(fluid.Program(), fluid.Program()):
name="input1", shape=[3, 3, 40, 40], dtype="float32" input1 = paddle.static.data(
) name="input1", shape=[3, 3, 40, 40], dtype="float32"
input2 = paddle.static.data( )
name="input2", shape=[3, 40, 40, 3], dtype="float32" input2 = paddle.static.data(
) name="input2", shape=[3, 40, 40, 3], dtype="float32"
)
res1 = paddle.nn.functional.local_response_norm( res1 = paddle.nn.functional.local_response_norm(
x=input1, size=5, data_format='NCHW' x=input1, size=5, data_format='NCHW'
) )
res2 = paddle.nn.functional.local_response_norm( res2 = paddle.nn.functional.local_response_norm(
x=input2, size=5, data_format='NHWC' x=input2, size=5, data_format='NHWC'
) )
in_np1 = np.random.random([3, 3, 40, 40]).astype("float32") in_np1 = np.random.random([3, 3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 3, 1)) in_np2 = np.transpose(in_np1, (0, 2, 3, 1))
exe = fluid.Executor(place) exe = fluid.Executor(place)
fetches = exe.run( fetches = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
feed={"input1": in_np1, "input2": in_np2}, feed={"input1": in_np1, "input2": in_np2},
fetch_list=[res1, res2], fetch_list=[res1, res2],
) )
fetches1_tran = np.transpose(fetches[1], (0, 3, 1, 2)) fetches1_tran = np.transpose(fetches[1], (0, 3, 1, 2))
np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05) np.testing.assert_allclose(
fetches[0], fetches1_tran, rtol=1e-05
)
def check_static_5d_input(self, place): def check_static_5d_input(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()): with paddle_static_guard():
input1 = paddle.static.data( with fluid.program_guard(fluid.Program(), fluid.Program()):
name="input1", shape=[3, 3, 3, 40, 40], dtype="float32" input1 = paddle.static.data(
) name="input1", shape=[3, 3, 3, 40, 40], dtype="float32"
input2 = paddle.static.data( )
name="input2", shape=[3, 3, 40, 40, 3], dtype="float32" input2 = paddle.static.data(
) name="input2", shape=[3, 3, 40, 40, 3], dtype="float32"
res1 = paddle.nn.functional.local_response_norm( )
x=input1, size=5, data_format='NCDHW' res1 = paddle.nn.functional.local_response_norm(
) x=input1, size=5, data_format='NCDHW'
res2 = paddle.nn.functional.local_response_norm( )
x=input2, size=5, data_format='NDHWC' res2 = paddle.nn.functional.local_response_norm(
) x=input2, size=5, data_format='NDHWC'
)
in_np1 = np.random.random([3, 3, 3, 40, 40]).astype("float32") in_np1 = np.random.random([3, 3, 3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 3, 4, 1)) in_np2 = np.transpose(in_np1, (0, 2, 3, 4, 1))
exe = fluid.Executor(place) exe = fluid.Executor(place)
fetches = exe.run( fetches = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
feed={"input1": in_np1, "input2": in_np2}, feed={"input1": in_np1, "input2": in_np2},
fetch_list=[res1, res2], fetch_list=[res1, res2],
) )
fetches1_tran = np.transpose(fetches[1], (0, 4, 1, 2, 3)) fetches1_tran = np.transpose(fetches[1], (0, 4, 1, 2, 3))
np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05) np.testing.assert_allclose(
fetches[0], fetches1_tran, rtol=1e-05
)
def test_static(self): def test_static(self):
for place in self.places: with paddle_static_guard():
self.check_static_3d_input(place=place) for place in self.places:
self.check_static_4d_input(place=place) self.check_static_3d_input(place=place)
self.check_static_5d_input(place=place) self.check_static_4d_input(place=place)
self.check_static_5d_input(place=place)
def check_dygraph_3d_input(self, place): def check_dygraph_3d_input(self, place):
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
...@@ -268,46 +278,51 @@ class TestLocalResponseNormFAPI(unittest.TestCase): ...@@ -268,46 +278,51 @@ class TestLocalResponseNormFAPI(unittest.TestCase):
class TestLocalResponseNormFAPIError(unittest.TestCase): class TestLocalResponseNormFAPIError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with paddle_static_guard():
with program_guard(Program(), Program()):
def test_Variable(): def test_Variable():
# the input of lrn must be Variable. # the input of lrn must be Variable.
x1 = fluid.create_lod_tensor( x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() np.array([-1, 3, 5, 5]),
) [[1, 1, 1, 1]],
paddle.nn.functional.local_response_norm(x1, size=5) fluid.CPUPlace(),
)
paddle.nn.functional.local_response_norm(x1, size=5)
self.assertRaises(TypeError, test_Variable) self.assertRaises(TypeError, test_Variable)
def test_datatype(): def test_datatype():
x = paddle.static.data( x = paddle.static.data(
name='x', shape=[3, 4, 5, 6], dtype="int32" name='x', shape=[3, 4, 5, 6], dtype="int32"
) )
paddle.nn.functional.local_response_norm(x, size=5) paddle.nn.functional.local_response_norm(x, size=5)
self.assertRaises(TypeError, test_datatype) self.assertRaises(TypeError, test_datatype)
def test_dataformat(): def test_dataformat():
x = paddle.static.data( x = paddle.static.data(
name='x', shape=[3, 4, 5, 6], dtype="float32" name='x', shape=[3, 4, 5, 6], dtype="float32"
) )
paddle.nn.functional.local_response_norm( paddle.nn.functional.local_response_norm(
x, size=5, data_format="NCTHW" x, size=5, data_format="NCTHW"
) )
self.assertRaises(ValueError, test_dataformat) self.assertRaises(ValueError, test_dataformat)
def test_dim(): def test_dim():
x = paddle.static.data(name='x', shape=[3, 4], dtype="float32") x = paddle.static.data(
paddle.nn.functional.local_response_norm(x, size=5) name='x', shape=[3, 4], dtype="float32"
)
paddle.nn.functional.local_response_norm(x, size=5)
self.assertRaises(ValueError, test_dim) self.assertRaises(ValueError, test_dim)
def test_shape(): def test_shape():
x = paddle.rand(shape=[0, 0, 2, 3], dtype="float32") x = paddle.rand(shape=[0, 0, 2, 3], dtype="float32")
paddle.nn.functional.local_response_norm(x, size=5) paddle.nn.functional.local_response_norm(x, size=5)
self.assertRaises(ValueError, test_shape) self.assertRaises(ValueError, test_shape)
class TestLocalResponseNormCAPI(unittest.TestCase): class TestLocalResponseNormCAPI(unittest.TestCase):
...@@ -335,28 +350,29 @@ class TestLocalResponseNormCAPI(unittest.TestCase): ...@@ -335,28 +350,29 @@ class TestLocalResponseNormCAPI(unittest.TestCase):
def test_static_fp16_gpu(self): def test_static_fp16_gpu(self):
if paddle.fluid.core.is_compiled_with_cuda(): if paddle.fluid.core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0) place = paddle.CUDAPlace(0)
with paddle.static.program_guard( with paddle_static_guard():
paddle.static.Program(), paddle.static.Program() with paddle.static.program_guard(
): paddle.static.Program(), paddle.static.Program()
input = np.random.random([3, 3, 112, 112]).astype("float16") ):
input = np.random.random([3, 3, 112, 112]).astype("float16")
x = paddle.static.data(
name="x", shape=[3, 3, 112, 112], dtype="float16" x = paddle.static.data(
) name="x", shape=[3, 3, 112, 112], dtype="float16"
)
m = paddle.nn.LocalResponseNorm(size=5)
y = m(x) m = paddle.nn.LocalResponseNorm(size=5)
y = m(x)
exe = paddle.static.Executor(place)
res = exe.run( exe = paddle.static.Executor(place)
paddle.static.default_main_program(), res = exe.run(
feed={ paddle.static.default_main_program(),
"x": input, feed={
}, "x": input,
fetch_list=[y], },
) fetch_list=[y],
)
assert np.array_equal(res[0].shape, input.shape)
assert np.array_equal(res[0].shape, input.shape)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
SIGMOID_THRESHOLD_MIN = -40.0 SIGMOID_THRESHOLD_MIN = -40.0
SIGMOID_THRESHOLD_MAX = 13.0 SIGMOID_THRESHOLD_MAX = 13.0
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def sigmoid_np(x): def sigmoid_np(x):
......
...@@ -19,7 +19,7 @@ import unittest ...@@ -19,7 +19,7 @@ import unittest
import numpy as np import numpy as np
import scipy import scipy
import scipy.linalg import scipy.linalg
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -156,10 +156,10 @@ class TestLUOp(OpTest): ...@@ -156,10 +156,10 @@ class TestLUOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], ['Out'], check_eager=True) self.check_grad(['X'], ['Out'])
# m = n 2D # m = n 2D
......
...@@ -19,7 +19,7 @@ import unittest ...@@ -19,7 +19,7 @@ import unittest
import numpy as np import numpy as np
import scipy import scipy
import scipy.linalg import scipy.linalg
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -168,10 +168,10 @@ class TestLU_UnpackOp(OpTest): ...@@ -168,10 +168,10 @@ class TestLU_UnpackOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], ['L', 'U'], check_eager=True) self.check_grad(['X'], ['L', 'U'])
# m = n # m = n
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
from paddle.fluid import Program, core, program_guard from paddle.fluid import Program, core, program_guard
...@@ -148,14 +148,10 @@ class TestMarginCrossEntropyOp(OpTest): ...@@ -148,14 +148,10 @@ class TestMarginCrossEntropyOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(core.CUDAPlace(0), atol=1e-5)
core.CUDAPlace(0), atol=1e-5, check_eager=True
)
def test_check_grad(self): def test_check_grad(self):
self.check_grad_with_place( self.check_grad_with_place(core.CUDAPlace(0), ["Logits"], "Loss")
core.CUDAPlace(0), ["Logits"], "Loss", check_eager=True
)
@unittest.skipIf( @unittest.skipIf(
...@@ -172,7 +168,6 @@ class TestMarginCrossEntropyOpFP32(TestMarginCrossEntropyOp): ...@@ -172,7 +168,6 @@ class TestMarginCrossEntropyOpFP32(TestMarginCrossEntropyOp):
"Loss", "Loss",
numeric_grad_delta=5e-2, numeric_grad_delta=5e-2,
max_relative_error=5e-2, max_relative_error=5e-2,
check_eager=True,
) )
...@@ -184,9 +179,7 @@ class TestMarginCrossEntropyOpFP16(TestMarginCrossEntropyOp): ...@@ -184,9 +179,7 @@ class TestMarginCrossEntropyOpFP16(TestMarginCrossEntropyOp):
self.dtype = np.float16 self.dtype = np.float16
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(core.CUDAPlace(0), atol=5e-2)
core.CUDAPlace(0), atol=5e-2, check_eager=True
)
def test_check_grad(self): def test_check_grad(self):
self.check_grad_with_place( self.check_grad_with_place(
...@@ -195,7 +188,6 @@ class TestMarginCrossEntropyOpFP16(TestMarginCrossEntropyOp): ...@@ -195,7 +188,6 @@ class TestMarginCrossEntropyOpFP16(TestMarginCrossEntropyOp):
"Loss", "Loss",
numeric_grad_delta=6e-1, numeric_grad_delta=6e-1,
max_relative_error=6e-1, max_relative_error=6e-1,
check_eager=True,
) )
...@@ -224,17 +216,13 @@ class TestMarginCrossEntropyOpSphereFace(TestMarginCrossEntropyOp): ...@@ -224,17 +216,13 @@ class TestMarginCrossEntropyOpSphereFace(TestMarginCrossEntropyOp):
class TestMarginCrossEntropyOpCPU(TestMarginCrossEntropyOp): class TestMarginCrossEntropyOpCPU(TestMarginCrossEntropyOp):
def test_check_output(self): def test_check_output(self):
try: try:
self.check_output_with_place( self.check_output_with_place(core.CPUPlace(), atol=1e-5)
core.CPUPlace(), atol=1e-5, check_eager=True
)
except RuntimeError: except RuntimeError:
pass pass
def test_check_grad(self): def test_check_grad(self):
try: try:
self.check_grad_with_place( self.check_grad_with_place(core.CPUPlace(), ["Logits"], "Loss")
core.CPUPlace(), ["Logits"], "Loss", check_eager=True
)
except RuntimeError: except RuntimeError:
pass pass
...@@ -279,63 +267,64 @@ class TestMarginCrossEntropyOpV2(unittest.TestCase): ...@@ -279,63 +267,64 @@ class TestMarginCrossEntropyOpV2(unittest.TestCase):
self.check_static_result(place=place) self.check_static_result(place=place)
def check_static_result(self, place): def check_static_result(self, place):
with program_guard(Program(), Program()): with paddle_static_guard():
datas = np.random.uniform( with program_guard(Program(), Program()):
-0.99, 0.99, [self.batch_dim, self.feat_dim] datas = np.random.uniform(
).astype(self.dtype) -0.99, 0.99, [self.batch_dim, self.feat_dim]
datas = datas / np.sqrt( ).astype(self.dtype)
np.sum(np.square(datas), axis=1, keepdims=True) datas = datas / np.sqrt(
) np.sum(np.square(datas), axis=1, keepdims=True)
weights = np.random.uniform( )
-0.99, 0.99, [self.feat_dim, self.num_class] weights = np.random.uniform(
).astype(self.dtype) -0.99, 0.99, [self.feat_dim, self.num_class]
weights = weights / np.sqrt( ).astype(self.dtype)
np.sum(np.square(weights), axis=0, keepdims=True) weights = weights / np.sqrt(
) np.sum(np.square(weights), axis=0, keepdims=True)
)
logits_np = np.matmul(datas, weights)
labels_np = np.random.randint( logits_np = np.matmul(datas, weights)
0, self.num_class, (self.batch_dim,), dtype="int64" labels_np = np.random.randint(
) 0, self.num_class, (self.batch_dim,), dtype="int64"
)
loss_np, softmax_np = margin_cross_entropy(
logits_np, loss_np, softmax_np = margin_cross_entropy(
labels_np, logits_np,
self.axis, labels_np,
self.margin1, self.axis,
self.margin2, self.margin1,
self.margin3, self.margin2,
self.scale, self.margin3,
self.reduction, self.scale,
) self.reduction,
)
logits = paddle.static.data(
name='logits', logits = paddle.static.data(
shape=[self.batch_dim, self.num_class], name='logits',
dtype=self.dtype, shape=[self.batch_dim, self.num_class],
) dtype=self.dtype,
label = paddle.static.data( )
name='label', shape=[self.batch_dim], dtype="int64" label = paddle.static.data(
) name='label', shape=[self.batch_dim], dtype="int64"
loss, softmax = paddle.nn.functional.margin_cross_entropy( )
logits, loss, softmax = paddle.nn.functional.margin_cross_entropy(
label, logits,
margin1=self.margin1, label,
margin2=self.margin2, margin1=self.margin1,
margin3=self.margin3, margin2=self.margin2,
scale=self.scale, margin3=self.margin3,
return_softmax=True, scale=self.scale,
reduction=self.reduction, return_softmax=True,
) reduction=self.reduction,
)
exe = paddle.fluid.Executor(place)
[loss_res, softmax_res] = exe.run( exe = paddle.fluid.Executor(place)
paddle.fluid.default_main_program(), [loss_res, softmax_res] = exe.run(
feed={'logits': logits_np, 'label': labels_np}, paddle.fluid.default_main_program(),
fetch_list=[loss, softmax], feed={'logits': logits_np, 'label': labels_np},
) fetch_list=[loss, softmax],
np.testing.assert_allclose(loss_res, loss_np) )
np.testing.assert_allclose(softmax_res, softmax_np) np.testing.assert_allclose(loss_res, loss_np)
np.testing.assert_allclose(softmax_res, softmax_np)
def test_dynamic(self): def test_dynamic(self):
for place in self.places: for place in self.places:
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -83,27 +83,32 @@ class TestMarginRankLossLayer(unittest.TestCase): ...@@ -83,27 +83,32 @@ class TestMarginRankLossLayer(unittest.TestCase):
self.check_identity(place) self.check_identity(place)
def check_identity(self, place): def check_identity(self, place):
main = fluid.Program() with paddle_static_guard():
start = fluid.Program() main = fluid.Program()
with fluid.unique_name.guard(): start = fluid.Program()
with fluid.program_guard(main, start): with fluid.unique_name.guard():
label = paddle.static.data( with fluid.program_guard(main, start):
"label", (self.batch_size, 1), "float32" label = paddle.static.data(
) "label", (self.batch_size, 1), "float32"
x1 = paddle.static.data("x1", (self.batch_size, 1), "float32") )
x2 = paddle.static.data("x2", (self.batch_size, 1), "float32") x1 = paddle.static.data(
out = paddle.nn.functional.margin_ranking_loss( "x1", (self.batch_size, 1), "float32"
x1, x2, label, self.margin, 'none' )
) x2 = paddle.static.data(
"x2", (self.batch_size, 1), "float32"
exe = fluid.Executor(place) )
exe.run(start) out = paddle.nn.functional.margin_ranking_loss(
(out_np,) = exe.run( x1, x2, label, self.margin, 'none'
main, )
feed={"label": self.label, "x1": self.x1, "x2": self.x2},
fetch_list=[out], exe = fluid.Executor(place)
) exe.run(start)
np.testing.assert_allclose(out_np, self.loss) (out_np,) = exe.run(
main,
feed={"label": self.label, "x1": self.x1, "x2": self.x2},
fetch_list=[out],
)
np.testing.assert_allclose(out_np, self.loss)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -40,10 +40,10 @@ class TestMaskedSelectOp(OpTest): ...@@ -40,10 +40,10 @@ class TestMaskedSelectOp(OpTest):
self.outputs = {'Y': out} self.outputs = {'Y': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', check_eager=True) self.check_grad(['X'], 'Y')
def init(self): def init(self):
self.shape = (50, 3) self.shape = (50, 3)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestMatchMatrixTensorOp(OpTest): class TestMatchMatrixTensorOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -163,28 +163,33 @@ for dim in [4]: ...@@ -163,28 +163,33 @@ for dim in [4]:
class API_TestMm(unittest.TestCase): class API_TestMm(unittest.TestCase):
def test_out(self): def test_out(self):
with fluid.program_guard(fluid.Program()): with paddle_static_guard():
x = paddle.static.data(name="x", shape=[2], dtype="float64") with fluid.program_guard(fluid.Program()):
y = paddle.static.data(name='y', shape=[2], dtype='float64') x = paddle.static.data(name="x", shape=[2], dtype="float64")
res = paddle.static.data(name="output", shape=[1], dtype="float64") y = paddle.static.data(name='y', shape=[2], dtype='float64')
result = paddle.mm(x, y) res = paddle.static.data(
exe = fluid.Executor(fluid.CPUPlace()) name="output", shape=[1], dtype="float64"
data1 = np.random.rand(2) )
data2 = np.random.rand(2) result = paddle.mm(x, y)
np_res = exe.run(feed={'x': data1, 'y': data2}, fetch_list=[result]) exe = fluid.Executor(fluid.CPUPlace())
expected_result = np.matmul( data1 = np.random.rand(2)
data1.reshape(1, 2), data2.reshape(2, 1) data2 = np.random.rand(2)
) np_res = exe.run(
feed={'x': data1, 'y': data2}, fetch_list=[result]
)
expected_result = np.matmul(
data1.reshape(1, 2), data2.reshape(2, 1)
)
np.testing.assert_allclose( np.testing.assert_allclose(
np_res, np_res,
expected_result, expected_result,
rtol=1e-05, rtol=1e-05,
atol=1e-05, atol=1e-05,
err_msg='two value is {}\n{}, check diff!'.format( err_msg='two value is {}\n{}, check diff!'.format(
np_res, expected_result np_res, expected_result
), ),
) )
def test_dygraph_without_out(self): def test_dygraph_without_out(self):
device = fluid.CPUPlace() device = fluid.CPUPlace()
...@@ -213,41 +218,43 @@ class Test_API_Matmul(unittest.TestCase): ...@@ -213,41 +218,43 @@ class Test_API_Matmul(unittest.TestCase):
class API_TestMmError(unittest.TestCase): class API_TestMmError(unittest.TestCase):
def test_errors(self): def test_errors(self):
def test_error1(): with paddle_static_guard():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.static.data( def test_error1():
name="data1", shape=[10, 2], dtype="float32" with fluid.program_guard(fluid.Program(), fluid.Program()):
) data1 = paddle.static.data(
data2 = paddle.static.data( name="data1", shape=[10, 2], dtype="float32"
name="data2", shape=[3, 10], dtype="float32" )
) data2 = paddle.static.data(
paddle.mm(data1, data2) name="data2", shape=[3, 10], dtype="float32"
)
self.assertRaises(ValueError, test_error1) paddle.mm(data1, data2)
def test_error2(): self.assertRaises(ValueError, test_error1)
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.static.data( def test_error2():
name="data1", shape=[-1, 10, 2], dtype="float32" with fluid.program_guard(fluid.Program(), fluid.Program()):
) data1 = paddle.static.data(
data2 = paddle.static.data( name="data1", shape=[-1, 10, 2], dtype="float32"
name="data2", shape=[-1, 2, 10], dtype="float32" )
) data2 = paddle.static.data(
paddle.mm(data1, data2) name="data2", shape=[-1, 2, 10], dtype="float32"
)
test_error2() paddle.mm(data1, data2)
def test_error3(): test_error2()
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.static.data( def test_error3():
name="data1", shape=[10, 10, 2], dtype="float32" with fluid.program_guard(fluid.Program(), fluid.Program()):
) data1 = paddle.static.data(
data2 = paddle.static.data( name="data1", shape=[10, 10, 2], dtype="float32"
name="data2", shape=[3, 2, 10], dtype="float32" )
) data2 = paddle.static.data(
paddle.mm(data1, data2) name="data2", shape=[3, 2, 10], dtype="float32"
)
self.assertRaises(ValueError, test_error3) paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error3)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,7 +16,7 @@ import copy ...@@ -16,7 +16,7 @@ import copy
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
...@@ -296,7 +296,7 @@ class TestMatrixNMSOp(OpTest): ...@@ -296,7 +296,7 @@ class TestMatrixNMSOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestMatrixNMSOpNoOutput(TestMatrixNMSOp): class TestMatrixNMSOpNoOutput(TestMatrixNMSOp):
......
...@@ -45,7 +45,7 @@ class TestMatrixRankOP(OpTest): ...@@ -45,7 +45,7 @@ class TestMatrixRankOP(OpTest):
self.outputs = {'Out': self.out} self.outputs = {'Out': self.out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def init_data(self): def init_data(self):
self.x = np.eye(3, dtype=np.float32) self.x = np.eye(3, dtype=np.float32)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import check_out_dtype from eager_op_test import check_out_dtype
from test_sum_op import TestReduceOPTensorAxisBase from test_sum_op import TestReduceOPTensorAxisBase
import paddle import paddle
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -57,10 +57,10 @@ class TestMaxOutOp(OpTest): ...@@ -57,10 +57,10 @@ class TestMaxOutOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestMaxOutOpAxis0(TestMaxOutOp): class TestMaxOutOpAxis0(TestMaxOutOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def compute_mean_iou( def compute_mean_iou(
...@@ -137,7 +137,7 @@ class TestCase1(TestMeanIOUOp): ...@@ -137,7 +137,7 @@ class TestCase1(TestMeanIOUOp):
# NOTE(dev): Skip check_dygraph becuase Python API doesn't expose # NOTE(dev): Skip check_dygraph becuase Python API doesn't expose
# in_wrong_num/in_correct_num/in_mean_iou_num argument # in_wrong_num/in_correct_num/in_mean_iou_num argument
def test_check_output(self): def test_check_output(self):
self.check_output(check_dygraph=False, check_eager=False) self.check_output(check_dygraph=False)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import gradient_checker import gradient_checker
import numpy as np import numpy as np
from decorator_helper import prog_scope from decorator_helper import prog_scope
from op_test import OpTest, OpTestTool, convert_float_to_uint16 from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16
from test_sum_op import TestReduceOPTensorAxisBase from test_sum_op import TestReduceOPTensorAxisBase
import paddle import paddle
...@@ -53,10 +53,10 @@ class TestMeanOp(OpTest): ...@@ -53,10 +53,10 @@ class TestMeanOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_checkout_grad(self): def test_checkout_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestMeanOp_ZeroDim(OpTest): class TestMeanOp_ZeroDim(OpTest):
...@@ -68,10 +68,10 @@ class TestMeanOp_ZeroDim(OpTest): ...@@ -68,10 +68,10 @@ class TestMeanOp_ZeroDim(OpTest):
self.outputs = {'Out': np.mean(self.inputs["X"])} self.outputs = {'Out': np.mean(self.inputs["X"])}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_checkout_grad(self): def test_checkout_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestMeanOpError(unittest.TestCase): class TestMeanOpError(unittest.TestCase):
...@@ -102,7 +102,7 @@ class TestFP16MeanOp(TestMeanOp): ...@@ -102,7 +102,7 @@ class TestFP16MeanOp(TestMeanOp):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
def test_checkout_grad(self): def test_checkout_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -126,11 +126,11 @@ class TestBF16MeanOp(TestMeanOp): ...@@ -126,11 +126,11 @@ class TestBF16MeanOp(TestMeanOp):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output_with_place(core.CPUPlace(), check_eager=True) self.check_output_with_place(core.CPUPlace())
def test_checkout_grad(self): def test_checkout_grad(self):
place = core.CPUPlace() place = core.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
def ref_reduce_mean(x, axis=None, keepdim=False, reduce_all=False): def ref_reduce_mean(x, axis=None, keepdim=False, reduce_all=False):
...@@ -181,14 +181,14 @@ class TestReduceMeanOp(OpTest): ...@@ -181,14 +181,14 @@ class TestReduceMeanOp(OpTest):
def test_check_output(self): def test_check_output(self):
if self.dtype != 'float16': if self.dtype != 'float16':
self.check_output(check_eager=True, check_prim=True) self.check_output(check_prim=True)
else: else:
place = paddle.CUDAPlace(0) place = paddle.CUDAPlace(0)
self.check_output_with_place(place=place, check_prim=True) self.check_output_with_place(place=place, check_prim=True)
def test_check_grad(self): def test_check_grad(self):
if self.dtype != 'float16': if self.dtype != 'float16':
self.check_grad(['X'], ['Out'], check_eager=True, check_prim=True) self.check_grad(['X'], ['Out'], check_prim=True)
else: else:
place = paddle.CUDAPlace(0) place = paddle.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import check_out_dtype from eager_op_test import check_out_dtype
from test_sum_op import TestReduceOPTensorAxisBase from test_sum_op import TestReduceOPTensorAxisBase
import paddle import paddle
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -74,11 +74,11 @@ class TestModeOp(OpTest): ...@@ -74,11 +74,11 @@ class TestModeOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True) self.check_grad(set(['X']), 'Out')
class TestModeOpLastdim(OpTest): class TestModeOpLastdim(OpTest):
...@@ -99,11 +99,11 @@ class TestModeOpLastdim(OpTest): ...@@ -99,11 +99,11 @@ class TestModeOpLastdim(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True) self.check_grad(set(['X']), 'Out')
class TestModeOpKernels(unittest.TestCase): class TestModeOpKernels(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def modified_huber_loss_forward(val): def modified_huber_loss_forward(val):
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
import unittest import unittest
import numpy as np import numpy as np
from eager_op_test import OpTest
from numpy.linalg import multi_dot from numpy.linalg import multi_dot
from op_test import OpTest
import paddle import paddle
...@@ -42,11 +42,11 @@ class TestMultiDotOp(OpTest): ...@@ -42,11 +42,11 @@ class TestMultiDotOp(OpTest):
self.outputs = {'Out': multi_dot([self.A, self.B])} self.outputs = {'Out': multi_dot([self.A, self.B])}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True) self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out', check_eager=True) self.check_grad(['x1'], 'Out')
# (A*B)*C # (A*B)*C
...@@ -59,9 +59,9 @@ class TestMultiDotOp3Mat(TestMultiDotOp): ...@@ -59,9 +59,9 @@ class TestMultiDotOp3Mat(TestMultiDotOp):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True) self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out', check_eager=True) self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out', check_eager=True) self.check_grad(['x2'], 'Out')
# A*(B*C) # A*(B*C)
...@@ -74,9 +74,9 @@ class TestMultiDotOp3Mat2(TestMultiDotOp): ...@@ -74,9 +74,9 @@ class TestMultiDotOp3Mat2(TestMultiDotOp):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True) self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out', check_eager=True) self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out', check_eager=True) self.check_grad(['x2'], 'Out')
class TestMultiDotOp4Mat(TestMultiDotOp): class TestMultiDotOp4Mat(TestMultiDotOp):
...@@ -96,10 +96,10 @@ class TestMultiDotOp4Mat(TestMultiDotOp): ...@@ -96,10 +96,10 @@ class TestMultiDotOp4Mat(TestMultiDotOp):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C, self.D])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C, self.D])}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True) self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out', check_eager=True) self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out', check_eager=True) self.check_grad(['x2'], 'Out')
self.check_grad(['x3'], 'Out', check_eager=True) self.check_grad(['x3'], 'Out')
class TestMultiDotOpFirst1D(TestMultiDotOp): class TestMultiDotOpFirst1D(TestMultiDotOp):
...@@ -153,9 +153,9 @@ class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat): ...@@ -153,9 +153,9 @@ class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True) self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out', check_eager=True) self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out', check_eager=True) self.check_grad(['x2'], 'Out')
class TestMultiDotOp4MatLast1D(TestMultiDotOp4Mat): class TestMultiDotOp4MatLast1D(TestMultiDotOp4Mat):
......
...@@ -16,7 +16,7 @@ import copy ...@@ -16,7 +16,7 @@ import copy
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
...@@ -797,7 +797,7 @@ class TestMulticlassNMS3Op(TestMulticlassNMS2Op): ...@@ -797,7 +797,7 @@ class TestMulticlassNMS3Op(TestMulticlassNMS2Op):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestMulticlassNMS3OpNoOutput(TestMulticlassNMS3Op): class TestMulticlassNMS3OpNoOutput(TestMulticlassNMS3Op):
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_attribute_var import UnittestBase from test_attribute_var import UnittestBase
import paddle import paddle
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.static import Program, program_guard from paddle.static import Program, program_guard
...@@ -30,10 +30,10 @@ class TestMVOp(OpTest): ...@@ -30,10 +30,10 @@ class TestMVOp(OpTest):
self.outputs = {'Out': np.dot(self.x, self.vec)} self.outputs = {'Out': np.dot(self.x, self.vec)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Vec'], 'Out', check_eager=True) self.check_grad(['X', 'Vec'], 'Out')
def init_config(self): def init_config(self):
self.x = np.random.random((2, 100)).astype("float64") self.x = np.random.random((2, 100)).astype("float64")
......
...@@ -20,7 +20,7 @@ import numpy as np ...@@ -20,7 +20,7 @@ import numpy as np
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
# from op_test import OpTest # from eager_op_test import OpTest
def np_nan_to_num( def np_nan_to_num(
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -314,10 +314,10 @@ class TestNearestInterpOp(OpTest): ...@@ -314,10 +314,10 @@ class TestNearestInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True, check_eager=True) self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'nearest' self.interp_method = 'nearest'
...@@ -481,9 +481,7 @@ class TestNearestInterpOpUint8(OpTest): ...@@ -481,9 +481,7 @@ class TestNearestInterpOpUint8(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(place=core.CPUPlace(), atol=1)
place=core.CPUPlace(), atol=1, check_eager=True
)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'nearest' self.interp_method = 'nearest'
...@@ -631,10 +629,10 @@ class TestNearestInterpOp_attr_tensor(OpTest): ...@@ -631,10 +629,10 @@ class TestNearestInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True, check_eager=True) self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'nearest' self.interp_method = 'nearest'
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -958,19 +958,19 @@ class TestNLLLossOp1DWithReduce(OpTest): ...@@ -958,19 +958,19 @@ class TestNLLLossOp1DWithReduce(OpTest):
self.attrs = {'reduction': 'mean', 'ignore_index': -100} self.attrs = {'reduction': 'mean', 'ignore_index': -100}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_output_with_weight(self): def test_check_output_with_weight(self):
self.with_weight = True self.with_weight = True
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.with_weight = True self.with_weight = True
place = fluid.CPUPlace() place = fluid.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0) place = fluid.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.input_shape = [10, 10] self.input_shape = [10, 10]
...@@ -1009,19 +1009,19 @@ class TestNLLLossOp1DNoReduce(OpTest): ...@@ -1009,19 +1009,19 @@ class TestNLLLossOp1DNoReduce(OpTest):
self.attrs = {'reduction': 'none', 'ignore_index': -100} self.attrs = {'reduction': 'none', 'ignore_index': -100}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_output_with_weight(self): def test_check_output_with_weight(self):
self.with_weight = True self.with_weight = True
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.with_weight = True self.with_weight = True
place = fluid.CPUPlace() place = fluid.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0) place = fluid.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.input_shape = [10, 10] self.input_shape = [10, 10]
...@@ -1059,19 +1059,19 @@ class TestNLLLossOp2DWithReduce(OpTest): ...@@ -1059,19 +1059,19 @@ class TestNLLLossOp2DWithReduce(OpTest):
self.attrs = {'reduction': 'mean', 'ignore_index': -100} self.attrs = {'reduction': 'mean', 'ignore_index': -100}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_output_with_weight(self): def test_check_output_with_weight(self):
self.with_weight = True self.with_weight = True
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.with_weight = True self.with_weight = True
place = fluid.CPUPlace() place = fluid.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0) place = fluid.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.input_shape = [2, 3, 5, 5] self.input_shape = [2, 3, 5, 5]
...@@ -1110,19 +1110,19 @@ class TestNLLLossOp2DNoReduce(OpTest): ...@@ -1110,19 +1110,19 @@ class TestNLLLossOp2DNoReduce(OpTest):
self.attrs = {'reduction': 'none', 'ignore_index': -100} self.attrs = {'reduction': 'none', 'ignore_index': -100}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_output_with_weight(self): def test_check_output_with_weight(self):
self.with_weight = True self.with_weight = True
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.with_weight = True self.with_weight = True
place = fluid.CPUPlace() place = fluid.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0) place = fluid.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.input_shape = [5, 3, 5, 5] self.input_shape = [5, 3, 5, 5]
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -90,7 +90,7 @@ class TestNMSOp(OpTest): ...@@ -90,7 +90,7 @@ class TestNMSOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -103,10 +103,10 @@ class TestFrobeniusNormOp(OpTest): ...@@ -103,10 +103,10 @@ class TestFrobeniusNormOp(OpTest):
self.outputs = {'Out': norm} self.outputs = {'Out': norm}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
...@@ -127,7 +127,7 @@ class TestFrobeniusNormOp2(TestFrobeniusNormOp): ...@@ -127,7 +127,7 @@ class TestFrobeniusNormOp2(TestFrobeniusNormOp):
self.dtype = "float32" self.dtype = "float32"
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestPnormOp(OpTest): class TestPnormOp(OpTest):
...@@ -150,10 +150,10 @@ class TestPnormOp(OpTest): ...@@ -150,10 +150,10 @@ class TestPnormOp(OpTest):
self.gradient = self.calc_gradient() self.gradient = self.calc_gradient()
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
...@@ -349,7 +349,7 @@ class TestPnormBF16Op(OpTest): ...@@ -349,7 +349,7 @@ class TestPnormBF16Op(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3, check_eager=True) self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -358,7 +358,6 @@ class TestPnormBF16Op(OpTest): ...@@ -358,7 +358,6 @@ class TestPnormBF16Op(OpTest):
['X'], ['X'],
'Out', 'Out',
user_defined_grads=self.gradient, user_defined_grads=self.gradient,
check_eager=True,
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -92,12 +92,12 @@ class TestOverlapAddOp(OpTest): ...@@ -92,12 +92,12 @@ class TestOverlapAddOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
paddle.disable_static() paddle.disable_static()
def test_check_grad_normal(self): def test_check_grad_normal(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
paddle.disable_static() paddle.disable_static()
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -77,10 +77,10 @@ class TestPad3dOp(OpTest): ...@@ -77,10 +77,10 @@ class TestPad3dOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def initTestCase(self): def initTestCase(self):
self.shape = (2, 3, 4, 5, 6) self.shape = (2, 3, 4, 5, 6)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestPadConstantLikeOp(OpTest): class TestPadConstantLikeOp(OpTest):
......
...@@ -102,7 +102,6 @@ def start_local_trainers( ...@@ -102,7 +102,6 @@ def start_local_trainers(
pod, pod,
training_script, training_script,
training_script_args, training_script_args,
eager_mode=True,
allocator_strategy="auto_growth", allocator_strategy="auto_growth",
log_dir=None, log_dir=None,
): ):
...@@ -158,7 +157,6 @@ class TestMultipleGpus(unittest.TestCase): ...@@ -158,7 +157,6 @@ class TestMultipleGpus(unittest.TestCase):
def run_mnist_2gpu( def run_mnist_2gpu(
self, self,
target_file_name, target_file_name,
eager_mode=True,
allocator_strategy="auto_growth", allocator_strategy="auto_growth",
): ):
if ( if (
...@@ -176,7 +174,6 @@ class TestMultipleGpus(unittest.TestCase): ...@@ -176,7 +174,6 @@ class TestMultipleGpus(unittest.TestCase):
procs = start_local_trainers( procs = start_local_trainers(
cluster, cluster,
pod, pod,
eager_mode=eager_mode,
allocator_strategy=allocator_strategy, allocator_strategy=allocator_strategy,
training_script=target_file_name, training_script=target_file_name,
training_script_args=[], training_script_args=[],
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def np_partial_concat(inputs, start, length): def np_partial_concat(inputs, start, length):
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestPartialSumOp(OpTest): class TestPartialSumOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def PolygonBoxRestore(input): def PolygonBoxRestore(input):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -76,7 +76,7 @@ class TestPriorBoxOp(OpTest): ...@@ -76,7 +76,7 @@ class TestPriorBoxOp(OpTest):
self.outputs = {'Boxes': self.out_boxes, 'Variances': self.out_var} self.outputs = {'Boxes': self.out_boxes, 'Variances': self.out_var}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def setUp(self): def setUp(self):
self.op_type = "prior_box" self.op_type = "prior_box"
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestProximalAdagradOp(OpTest): class TestProximalAdagradOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestProximalGDOp(OpTest): class TestProximalGDOp(OpTest):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -174,10 +174,10 @@ class TestPSROIPoolOp(OpTest): ...@@ -174,10 +174,10 @@ class TestPSROIPoolOp(OpTest):
self.set_data() self.set_data()
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase): class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase):
......
...@@ -16,7 +16,7 @@ import itertools ...@@ -16,7 +16,7 @@ import itertools
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -71,13 +71,12 @@ class TestQrOp(OpTest): ...@@ -71,13 +71,12 @@ class TestQrOp(OpTest):
return a, q, r return a, q, r
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
['X'], ['X'],
['Q', 'R'], ['Q', 'R'],
check_eager=True,
numeric_grad_delta=1e-5, numeric_grad_delta=1e-5,
max_relative_error=1e-6, max_relative_error=1e-6,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestRandomCropOp(OpTest): class TestRandomCropOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
from functools import partial from functools import partial
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -47,7 +47,7 @@ class TestRangeOp(OpTest): ...@@ -47,7 +47,7 @@ class TestRangeOp(OpTest):
self.case = (0, 1, 0.2) self.case = (0, 1, 0.2)
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestFloatRangeOpCase0(TestRangeOp): class TestFloatRangeOpCase0(TestRangeOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -58,7 +58,7 @@ class TestRealOp(OpTest): ...@@ -58,7 +58,7 @@ class TestRealOp(OpTest):
) )
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -66,7 +66,6 @@ class TestRealOp(OpTest): ...@@ -66,7 +66,6 @@ class TestRealOp(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -58,10 +58,10 @@ class TestRepeatInterleaveOp(OpTest): ...@@ -58,10 +58,10 @@ class TestRepeatInterleaveOp(OpTest):
self.index_size = self.x_shape[self.dim] self.index_size = self.x_shape[self.dim]
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestRepeatInterleaveOp2(OpTest): class TestRepeatInterleaveOp2(OpTest):
...@@ -96,10 +96,10 @@ class TestRepeatInterleaveOp2(OpTest): ...@@ -96,10 +96,10 @@ class TestRepeatInterleaveOp2(OpTest):
self.index_size = self.x_shape[self.dim] self.index_size = self.x_shape[self.dim]
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestIndexSelectAPI(unittest.TestCase): class TestIndexSelectAPI(unittest.TestCase):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python from test_anchor_generator_op import anchor_generator_in_python
from test_multiclass_nms_op import nms from test_multiclass_nms_op import nms
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -37,10 +37,10 @@ class TestReverseOp(OpTest): ...@@ -37,10 +37,10 @@ class TestReverseOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestCase0(TestReverseOp): class TestCase0(TestReverseOp):
......
...@@ -17,7 +17,7 @@ import sys ...@@ -17,7 +17,7 @@ import sys
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -31,6 +31,38 @@ np.set_printoptions(threshold=np.inf) ...@@ -31,6 +31,38 @@ np.set_printoptions(threshold=np.inf)
paddle.enable_static() paddle.enable_static()
def rnn_warpper(
Input,
PreState,
WeightList=None,
SequenceLength=None,
dropout_prob=0.0,
is_bidirec=False,
input_size=10,
hidden_size=100,
num_layers=1,
mode="LSTM",
seed=0,
is_test=False,
):
dropout_state_in = paddle.Tensor()
return paddle._C_ops.rnn(
Input,
PreState,
WeightList,
SequenceLength,
dropout_state_in,
dropout_prob,
is_bidirec,
input_size,
hidden_size,
num_layers,
mode,
seed,
is_test,
)
class TestRNNOp(OpTest): class TestRNNOp(OpTest):
def get_weight_names(self): def get_weight_names(self):
weight_names = [] weight_names = []
...@@ -44,6 +76,9 @@ class TestRNNOp(OpTest): ...@@ -44,6 +76,9 @@ class TestRNNOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "rnn" self.op_type = "rnn"
self.python_api = rnn_warpper
self.python_out_sig = ["Out", "DropoutState", "State"]
self.python_out_sig_sub_name = {"State": ["last_hidden", "last_cell"]}
self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
self.sequence_length = ( self.sequence_length = (
None None
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -233,10 +233,10 @@ class TestROIAlignOp(OpTest): ...@@ -233,10 +233,10 @@ class TestROIAlignOp(OpTest):
self.set_data() self.set_data()
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestROIAlignInLodOp(TestROIAlignOp): class TestROIAlignInLodOp(TestROIAlignOp):
......
...@@ -18,7 +18,7 @@ import unittest ...@@ -18,7 +18,7 @@ import unittest
from decimal import ROUND_HALF_UP, Decimal from decimal import ROUND_HALF_UP, Decimal
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -170,10 +170,10 @@ class TestROIPoolOp(OpTest): ...@@ -170,10 +170,10 @@ class TestROIPoolOp(OpTest):
self.set_data() self.set_data()
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestROIPoolInLodOp(TestROIPoolOp): class TestROIPoolInLodOp(TestROIPoolOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -47,10 +47,10 @@ class TestRollOp(OpTest): ...@@ -47,10 +47,10 @@ class TestRollOp(OpTest):
self.axis = [0, -2] self.axis = [0, -2]
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestRollOpCase2(TestRollOp): class TestRollOpCase2(TestRollOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle import fluid from paddle import fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python from test_anchor_generator_op import anchor_generator_in_python
from test_generate_proposal_labels_op import ( from test_generate_proposal_labels_op import (
_bbox_overlaps, _bbox_overlaps,
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -345,10 +345,10 @@ class RReluTest(OpTest): ...@@ -345,10 +345,10 @@ class RReluTest(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(no_check_set=['Noise'], check_eager=True) self.check_output(no_check_set=['Noise'])
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class RReluTrainingTest(RReluTest): class RReluTrainingTest(RReluTest):
......
...@@ -16,7 +16,7 @@ import collections ...@@ -16,7 +16,7 @@ import collections
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestSampleLogitsOp(OpTest): class TestSampleLogitsOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -91,10 +91,10 @@ class TestScatterNdAddSimpleOp(OpTest): ...@@ -91,10 +91,10 @@ class TestScatterNdAddSimpleOp(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=True) self.check_grad(['X', 'Updates'], 'Out')
class TestScatterNdAddSimpleFP16Op(TestScatterNdAddSimpleOp): class TestScatterNdAddSimpleFP16Op(TestScatterNdAddSimpleOp):
...@@ -122,14 +122,12 @@ class TestScatterNdAddSimpleBF16Op(TestScatterNdAddSimpleOp): ...@@ -122,14 +122,12 @@ class TestScatterNdAddSimpleBF16Op(TestScatterNdAddSimpleOp):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=True
)
class TestScatterNdAddWithEmptyIndex(OpTest): class TestScatterNdAddWithEmptyIndex(OpTest):
...@@ -165,10 +163,10 @@ class TestScatterNdAddWithEmptyIndex(OpTest): ...@@ -165,10 +163,10 @@ class TestScatterNdAddWithEmptyIndex(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=True) self.check_grad(['X', 'Updates'], 'Out')
class TestScatterNdAddWithEmptyIndexFP16(TestScatterNdAddWithEmptyIndex): class TestScatterNdAddWithEmptyIndexFP16(TestScatterNdAddWithEmptyIndex):
...@@ -196,14 +194,12 @@ class TestScatterNdAddWithEmptyIndexBF16(TestScatterNdAddWithEmptyIndex): ...@@ -196,14 +194,12 @@ class TestScatterNdAddWithEmptyIndexBF16(TestScatterNdAddWithEmptyIndex):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=True
)
class TestScatterNdAddWithHighRankSame(OpTest): class TestScatterNdAddWithHighRankSame(OpTest):
...@@ -242,10 +238,10 @@ class TestScatterNdAddWithHighRankSame(OpTest): ...@@ -242,10 +238,10 @@ class TestScatterNdAddWithHighRankSame(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=True) self.check_grad(['X', 'Updates'], 'Out')
class TestScatterNdAddWithHighRankSameFP16(TestScatterNdAddWithHighRankSame): class TestScatterNdAddWithHighRankSameFP16(TestScatterNdAddWithHighRankSame):
...@@ -273,14 +269,12 @@ class TestScatterNdAddWithHighRankSameBF16(TestScatterNdAddWithHighRankSame): ...@@ -273,14 +269,12 @@ class TestScatterNdAddWithHighRankSameBF16(TestScatterNdAddWithHighRankSame):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=True
)
class TestScatterNdAddWithHighRankDiff(OpTest): class TestScatterNdAddWithHighRankDiff(OpTest):
...@@ -303,10 +297,10 @@ class TestScatterNdAddWithHighRankDiff(OpTest): ...@@ -303,10 +297,10 @@ class TestScatterNdAddWithHighRankDiff(OpTest):
self.outputs = {'Out': expect_np} self.outputs = {'Out': expect_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=True) self.check_grad(['X', 'Updates'], 'Out')
# Test Python API # Test Python API
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -46,10 +46,10 @@ class TestScatterOp(OpTest): ...@@ -46,10 +46,10 @@ class TestScatterOp(OpTest):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out", check_eager=False) self.check_grad(["X", "Updates"], "Out")
class TestScatterFP16Op(TestScatterOp): class TestScatterFP16Op(TestScatterOp):
...@@ -69,14 +69,12 @@ class TestScatterBF16Op(TestScatterOp): ...@@ -69,14 +69,12 @@ class TestScatterBF16Op(TestScatterOp):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
class TestScatterOp0(OpTest): class TestScatterOp0(OpTest):
...@@ -102,10 +100,10 @@ class TestScatterOp0(OpTest): ...@@ -102,10 +100,10 @@ class TestScatterOp0(OpTest):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out", check_eager=False) self.check_grad(["X", "Updates"], "Out")
class TestScatterFP16Op0(TestScatterOp0): class TestScatterFP16Op0(TestScatterOp0):
...@@ -125,14 +123,12 @@ class TestScatterBF16Op0(TestScatterOp0): ...@@ -125,14 +123,12 @@ class TestScatterBF16Op0(TestScatterOp0):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
class TestScatterOp1(OpTest): class TestScatterOp1(OpTest):
...@@ -161,10 +157,10 @@ class TestScatterOp1(OpTest): ...@@ -161,10 +157,10 @@ class TestScatterOp1(OpTest):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out", check_eager=False) self.check_grad(["X", "Updates"], "Out")
class TestScatterFP16Op1(TestScatterOp1): class TestScatterFP16Op1(TestScatterOp1):
...@@ -184,14 +180,12 @@ class TestScatterBF16Op1(TestScatterOp1): ...@@ -184,14 +180,12 @@ class TestScatterBF16Op1(TestScatterOp1):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
@unittest.skipIf( @unittest.skipIf(
...@@ -221,14 +215,12 @@ class TestScatterOp2(OpTest): ...@@ -221,14 +215,12 @@ class TestScatterOp2(OpTest):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3, check_eager=False) self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
@unittest.skipIf( @unittest.skipIf(
...@@ -280,14 +272,12 @@ class TestScatterOp3(OpTest): ...@@ -280,14 +272,12 @@ class TestScatterOp3(OpTest):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3, check_eager=False) self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
@unittest.skipIf( @unittest.skipIf(
...@@ -330,10 +320,10 @@ class TestScatterOp4(OpTest): ...@@ -330,10 +320,10 @@ class TestScatterOp4(OpTest):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=False) self.check_grad(['X', 'Updates'], 'Out')
class TestScatterFP16Op4(TestScatterOp4): class TestScatterFP16Op4(TestScatterOp4):
...@@ -353,14 +343,12 @@ class TestScatterBF16Op4(TestScatterOp4): ...@@ -353,14 +343,12 @@ class TestScatterBF16Op4(TestScatterOp4):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
@unittest.skipIf( @unittest.skipIf(
...@@ -390,14 +378,12 @@ class TestScatterOp5(OpTest): ...@@ -390,14 +378,12 @@ class TestScatterOp5(OpTest):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3, check_eager=False) self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
@unittest.skipIf( @unittest.skipIf(
...@@ -440,10 +426,10 @@ class TestScatterOp6(OpTest): ...@@ -440,10 +426,10 @@ class TestScatterOp6(OpTest):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out", check_eager=False) self.check_grad(["X", "Updates"], "Out")
class TestScatterFP16Op6(TestScatterOp6): class TestScatterFP16Op6(TestScatterOp6):
...@@ -463,14 +449,12 @@ class TestScatterBF16Op6(TestScatterOp6): ...@@ -463,14 +449,12 @@ class TestScatterBF16Op6(TestScatterOp6):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
class TestScatterAPI(unittest.TestCase): class TestScatterAPI(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -42,7 +42,7 @@ class TestSearchSorted(OpTest): ...@@ -42,7 +42,7 @@ class TestSearchSorted(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def init_test_case(self): def init_test_case(self):
self.sorted_sequence = np.array([1, 3, 5, 7, 9]).astype("float32") self.sorted_sequence = np.array([1, 3, 5, 7, 9]).astype("float32")
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.static as static import paddle.static as static
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -116,10 +116,10 @@ class TestSegmentOps(OpTest): ...@@ -116,10 +116,10 @@ class TestSegmentOps(OpTest):
self.outputs = {'Out': result.astype(self.dtype)} self.outputs = {'Out': result.astype(self.dtype)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True) self.check_grad(["X"], "Out")
class TestSegmentSum2(TestSegmentOps): class TestSegmentSum2(TestSegmentOps):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -18,7 +18,7 @@ import unittest ...@@ -18,7 +18,7 @@ import unittest
from functools import reduce from functools import reduce
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -36,7 +36,7 @@ class TestShapeOp(OpTest): ...@@ -36,7 +36,7 @@ class TestShapeOp(OpTest):
self.shape = [2, 3] self.shape = [2, 3]
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class case1(TestShapeOp): class case1(TestShapeOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
......
...@@ -17,7 +17,7 @@ import os ...@@ -17,7 +17,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestShuffleChannelOp(OpTest): class TestShuffleChannelOp(OpTest):
......
...@@ -17,7 +17,7 @@ import math ...@@ -17,7 +17,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import gradient_checker import gradient_checker
import numpy as np import numpy as np
from decorator_helper import prog_scope from decorator_helper import prog_scope
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestSimilarityFocusOp(OpTest): class TestSimilarityFocusOp(OpTest):
......
...@@ -17,20 +17,52 @@ import sys ...@@ -17,20 +17,52 @@ import sys
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
sys.path.append("./rnn") sys.path.append("./rnn")
from convert import get_params_for_net from rnn.convert import get_params_for_net
from rnn_numpy import SimpleRNN from rnn.rnn_numpy import SimpleRNN
random.seed(2) random.seed(2)
np.set_printoptions(threshold=np.inf) np.set_printoptions(threshold=np.inf)
paddle.enable_static() paddle.enable_static()
def rnn_warpper(
Input,
PreState,
WeightList=None,
SequenceLength=None,
dropout_prob=0.0,
is_bidirec=False,
input_size=10,
hidden_size=100,
num_layers=1,
mode="LSTM",
seed=0,
is_test=False,
):
dropout_state_in = paddle.Tensor()
return paddle._C_ops.rnn(
Input,
[PreState],
WeightList,
SequenceLength,
dropout_state_in,
dropout_prob,
is_bidirec,
input_size,
hidden_size,
num_layers,
mode,
seed,
is_test,
)
class TestSimpleRNNOp(OpTest): class TestSimpleRNNOp(OpTest):
def get_weight_names(self): def get_weight_names(self):
weight_names = [] weight_names = []
...@@ -44,6 +76,10 @@ class TestSimpleRNNOp(OpTest): ...@@ -44,6 +76,10 @@ class TestSimpleRNNOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "rnn" self.op_type = "rnn"
self.python_api = rnn_warpper
self.python_out_sig = ["Out", "DropoutState", "State"]
self.python_out_sig_sub_name = {"State": ["last_hidden"]}
self.dtype = "float32" if core.is_compiled_with_rocm() else "float64" self.dtype = "float32" if core.is_compiled_with_rocm() else "float64"
self.sequence_length = ( self.sequence_length = (
None None
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def smooth_l1_loss_forward(val, sigma2): def smooth_l1_loss_forward(val, sigma2):
...@@ -46,12 +46,10 @@ class TestSmoothL1LossOp1(OpTest): ...@@ -46,12 +46,10 @@ class TestSmoothL1LossOp1(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.02)
['X', 'Y'], 'Out', max_relative_error=0.02, check_eager=True
)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
...@@ -59,7 +57,6 @@ class TestSmoothL1LossOp1(OpTest): ...@@ -59,7 +57,6 @@ class TestSmoothL1LossOp1(OpTest):
'Out', 'Out',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set("X"), no_grad_set=set("X"),
check_eager=True,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -68,7 +65,6 @@ class TestSmoothL1LossOp1(OpTest): ...@@ -68,7 +65,6 @@ class TestSmoothL1LossOp1(OpTest):
'Out', 'Out',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_eager=True,
) )
...@@ -96,12 +92,10 @@ class TestSmoothL1LossOp2(OpTest): ...@@ -96,12 +92,10 @@ class TestSmoothL1LossOp2(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.03)
['X', 'Y'], 'Out', max_relative_error=0.03, check_eager=True
)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
...@@ -109,7 +103,6 @@ class TestSmoothL1LossOp2(OpTest): ...@@ -109,7 +103,6 @@ class TestSmoothL1LossOp2(OpTest):
'Out', 'Out',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight']), no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight']),
check_eager=True,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -118,7 +111,6 @@ class TestSmoothL1LossOp2(OpTest): ...@@ -118,7 +111,6 @@ class TestSmoothL1LossOp2(OpTest):
'Out', 'Out',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight']), no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight']),
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_softmax_op import stable_softmax from test_softmax_op import stable_softmax
import paddle import paddle
...@@ -153,7 +153,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): ...@@ -153,7 +153,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest):
def test_check_output(self): def test_check_output(self):
if self.python_api is not None: if self.python_api is not None:
self.check_output(check_eager=True) self.check_output()
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
...@@ -163,7 +163,6 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): ...@@ -163,7 +163,6 @@ class TestSoftmaxWithCrossEntropyOp(OpTest):
["Logits"], ["Logits"],
"Loss", "Loss",
max_relative_error=5e-1, max_relative_error=5e-1,
check_eager=True,
) )
# HIP will have accuracy fail when using float32 in CPU place # HIP will have accuracy fail when using float32 in CPU place
self.check_grad(["Logits"], "Loss", max_relative_error=5e-1) self.check_grad(["Logits"], "Loss", max_relative_error=5e-1)
...@@ -173,7 +172,6 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): ...@@ -173,7 +172,6 @@ class TestSoftmaxWithCrossEntropyOp(OpTest):
["Logits"], ["Logits"],
"Loss", "Loss",
numeric_grad_delta=0.001, numeric_grad_delta=0.001,
check_eager=True,
) )
self.check_grad(["Logits"], "Loss", numeric_grad_delta=0.001) self.check_grad(["Logits"], "Loss", numeric_grad_delta=0.001)
...@@ -510,14 +508,12 @@ class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp): ...@@ -510,14 +508,12 @@ class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp):
def test_check_output(self): def test_check_output(self):
if self.python_api is not None: if self.python_api is not None:
self.check_output(atol=1e-2, check_eager=True) self.check_output(atol=1e-2)
self.check_output(atol=1e-2) self.check_output(atol=1e-2)
def test_check_grad(self): def test_check_grad(self):
if self.python_api is not None: if self.python_api is not None:
self.check_grad( self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
["Logits"], "Loss", max_relative_error=0.1, check_eager=True
)
self.check_grad(["Logits"], "Loss", max_relative_error=0.1) self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
...@@ -537,9 +533,7 @@ class TestSoftmaxWithCrossEntropyOpNoCudnnFp16( ...@@ -537,9 +533,7 @@ class TestSoftmaxWithCrossEntropyOpNoCudnnFp16(
def test_check_grad(self): def test_check_grad(self):
if self.python_api is not None: if self.python_api is not None:
self.check_grad( self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
["Logits"], "Loss", max_relative_error=0.1, check_eager=True
)
self.check_grad(["Logits"], "Loss", max_relative_error=0.1) self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
...@@ -562,20 +556,18 @@ class TestSoftmaxWithCrossEntropyOp2(TestSoftmaxWithCrossEntropyOp): ...@@ -562,20 +556,18 @@ class TestSoftmaxWithCrossEntropyOp2(TestSoftmaxWithCrossEntropyOp):
def test_check_output(self): def test_check_output(self):
if self.python_api is not None: if self.python_api is not None:
self.check_output(check_eager=True) self.check_output()
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_rocm(): if core.is_compiled_with_rocm():
# HIP will have accuracy fail when using float32 in CPU place # HIP will have accuracy fail when using float32 in CPU place
if self.python_api is not None: if self.python_api is not None:
self.check_grad( self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
["Logits"], "Loss", max_relative_error=0.1, check_eager=True
)
self.check_grad(["Logits"], "Loss", max_relative_error=0.1) self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
else: else:
if self.python_api is not None: if self.python_api is not None:
self.check_grad(["Logits"], "Loss", check_eager=True) self.check_grad(["Logits"], "Loss")
self.check_grad(["Logits"], "Loss") self.check_grad(["Logits"], "Loss")
......
...@@ -21,7 +21,7 @@ import paddle ...@@ -21,7 +21,7 @@ import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
sys.path.append("..") sys.path.append("..")
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
...@@ -50,10 +50,10 @@ class TestSolveOp(OpTest): ...@@ -50,10 +50,10 @@ class TestSolveOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
# x broadcast + 3D batch case # x broadcast + 3D batch case
...@@ -71,12 +71,10 @@ class TestSolveOpBatched_case0(OpTest): ...@@ -71,12 +71,10 @@ class TestSolveOpBatched_case0(OpTest):
self.outputs = {'Out': result} self.outputs = {'Out': result}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(['X', 'Y'], 'Out', max_relative_error=1e-1)
['X', 'Y'], 'Out', max_relative_error=1e-1, check_eager=True
)
# 3D batch + y vector case # 3D batch + y vector case
...@@ -94,12 +92,10 @@ class TestSolveOpBatched_case1(OpTest): ...@@ -94,12 +92,10 @@ class TestSolveOpBatched_case1(OpTest):
self.outputs = {'Out': result} self.outputs = {'Out': result}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.04)
['X', 'Y'], 'Out', max_relative_error=0.04, check_eager=True
)
# 3D batch + y broadcast case # 3D batch + y broadcast case
...@@ -117,12 +113,10 @@ class TestSolveOpBatched_case2(OpTest): ...@@ -117,12 +113,10 @@ class TestSolveOpBatched_case2(OpTest):
self.outputs = {'Out': result} self.outputs = {'Out': result}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.02)
['X', 'Y'], 'Out', max_relative_error=0.02, check_eager=True
)
# x broadcast + 3D batch case # x broadcast + 3D batch case
...@@ -140,12 +134,10 @@ class TestSolveOpBatched_case3(OpTest): ...@@ -140,12 +134,10 @@ class TestSolveOpBatched_case3(OpTest):
self.outputs = {'Out': result} self.outputs = {'Out': result}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.02)
['X', 'Y'], 'Out', max_relative_error=0.02, check_eager=True
)
# 3D normal batch case # 3D normal batch case
...@@ -163,10 +155,10 @@ class TestSolveOpBatched_case4(OpTest): ...@@ -163,10 +155,10 @@ class TestSolveOpBatched_case4(OpTest):
self.outputs = {'Out': result} self.outputs = {'Out': result}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
# 4D normal batch case # 4D normal batch case
...@@ -184,10 +176,10 @@ class TestSolveOpBatched_case5(OpTest): ...@@ -184,10 +176,10 @@ class TestSolveOpBatched_case5(OpTest):
self.outputs = {'Out': result} self.outputs = {'Out': result}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
# 4D batch + y broadcast case # 4D batch + y broadcast case
...@@ -205,10 +197,10 @@ class TestSolveOpBatched_case6(OpTest): ...@@ -205,10 +197,10 @@ class TestSolveOpBatched_case6(OpTest):
self.outputs = {'Out': result} self.outputs = {'Out': result}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
# 5D normal batch case # 5D normal batch case
...@@ -226,12 +218,10 @@ class TestSolveOpBatched_case7(OpTest): ...@@ -226,12 +218,10 @@ class TestSolveOpBatched_case7(OpTest):
self.outputs = {'Out': result} self.outputs = {'Out': result}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.04)
['X', 'Y'], 'Out', max_relative_error=0.04, check_eager=True
)
# 5D batch + y broadcast case # 5D batch + y broadcast case
...@@ -249,12 +239,10 @@ class TestSolveOpBatched_case8(OpTest): ...@@ -249,12 +239,10 @@ class TestSolveOpBatched_case8(OpTest):
self.outputs = {'Out': result} self.outputs = {'Out': result}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.04)
['X', 'Y'], 'Out', max_relative_error=0.04, check_eager=True
)
class TestSolveOpError(unittest.TestCase): class TestSolveOpError(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -18,7 +18,7 @@ import re ...@@ -18,7 +18,7 @@ import re
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def calculate_sparse_momentum_by_numpy( def calculate_sparse_momentum_by_numpy(
...@@ -175,9 +175,7 @@ class TestSparseMomentumOp(OpTest): ...@@ -175,9 +175,7 @@ class TestSparseMomentumOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output( self.check_output(atol=5e-3 if self.multi_precision else 1e-5)
atol=5e-3 if self.multi_precision else 1e-5, check_eager=True
)
class TestSparseMomentumOpDtype1(TestSparseMomentumOp): class TestSparseMomentumOpDtype1(TestSparseMomentumOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_pool2d_op import avg_pool2D_forward_naive, max_pool2D_forward_naive from test_pool2d_op import avg_pool2D_forward_naive, max_pool2D_forward_naive
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestSquaredL2DistanceOp_f0(OpTest): class TestSquaredL2DistanceOp_f0(OpTest):
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
import unittest import unittest
import numpy as np import numpy as np
from eager_op_test import OpTest
from numpy import linalg as LA from numpy import linalg as LA
from op_test import OpTest
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
...@@ -84,14 +84,13 @@ class TestL2LossOp(OpTest): ...@@ -84,14 +84,13 @@ class TestL2LossOp(OpTest):
self.outputs = {'Out': np.square(LA.norm(X))} self.outputs = {'Out': np.square(LA.norm(X))}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X'], ['X'],
'Out', 'Out',
max_relative_error=self.max_relative_error, max_relative_error=self.max_relative_error,
check_eager=True,
) )
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_attribute_var import UnittestBase from test_attribute_var import UnittestBase
import paddle import paddle
...@@ -44,12 +44,10 @@ class TestSqueezeOp(OpTest): ...@@ -44,12 +44,10 @@ class TestSqueezeOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output( self.check_output(no_check_set=['XShape'], check_prim=True)
no_check_set=['XShape'], check_eager=True, check_prim=True
)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True, check_prim=True) self.check_grad(["X"], "Out", check_prim=True)
def init_test_case(self): def init_test_case(self):
self.ori_shape = (1, 3, 1, 40) self.ori_shape = (1, 3, 1, 40)
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import gradient_checker import gradient_checker
import numpy as np import numpy as np
from decorator_helper import prog_scope from decorator_helper import prog_scope
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -63,12 +63,10 @@ class TestStackOpBase(OpTest): ...@@ -63,12 +63,10 @@ class TestStackOpBase(OpTest):
self.attrs = {'axis': self.axis} self.attrs = {'axis': self.axis}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True, check_prim=True) self.check_output(check_prim=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(self.get_x_names(), 'Y', check_prim=True)
self.get_x_names(), 'Y', check_eager=True, check_prim=True
)
class TestStackOp1(TestStackOpBase): class TestStackOp1(TestStackOpBase):
...@@ -149,11 +147,11 @@ class TestStackBF16Op(OpTest): ...@@ -149,11 +147,11 @@ class TestStackBF16Op(OpTest):
self.attrs = {'axis': self.axis} self.attrs = {'axis': self.axis}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True, check_prim=True) self.check_output(check_prim=True)
def test_check_grad(self): def test_check_grad(self):
# concat_grad unspport bfloat16 dtype, skip check_prim # concat_grad unspport bfloat16 dtype, skip check_prim
self.check_grad(self.get_x_names(), 'Y', check_eager=True) self.check_grad(self.get_x_names(), 'Y')
class TestStackAPIWithLoDTensorArray(unittest.TestCase): class TestStackAPIWithLoDTensorArray(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, skip_check_grad_ci
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -51,7 +51,7 @@ class TestSvdOp(OpTest): ...@@ -51,7 +51,7 @@ class TestSvdOp(OpTest):
self._output_data = np.linalg.svd(self._input_data) self._output_data = np.linalg.svd(self._input_data)
def test_check_output(self): def test_check_output(self):
self.check_output(no_check_set=['U', 'VH'], check_eager=True) self.check_output(no_check_set=['U', 'VH'])
def test_svd_forward(self): def test_svd_forward(self):
"""u matmul diag(s) matmul vt must become X""" """u matmul diag(s) matmul vt must become X"""
...@@ -71,19 +71,13 @@ class TestSvdOp(OpTest): ...@@ -71,19 +71,13 @@ class TestSvdOp(OpTest):
paddle.enable_static() paddle.enable_static()
def check_S_grad(self): def check_S_grad(self):
self.check_grad( self.check_grad(['X'], ['S'], numeric_grad_delta=0.001)
['X'], ['S'], numeric_grad_delta=0.001, check_eager=True
)
def check_U_grad(self): def check_U_grad(self):
self.check_grad( self.check_grad(['X'], ['U'], numeric_grad_delta=0.001)
['X'], ['U'], numeric_grad_delta=0.001, check_eager=True
)
def check_V_grad(self): def check_V_grad(self):
self.check_grad( self.check_grad(['X'], ['VH'], numeric_grad_delta=0.001)
['X'], ['VH'], numeric_grad_delta=0.001, check_eager=True
)
def test_check_grad(self): def test_check_grad(self):
""" """
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -140,31 +140,45 @@ class TestCase4(TestTDMChildOp): ...@@ -140,31 +140,45 @@ class TestCase4(TestTDMChildOp):
class TestTDMChildShape(unittest.TestCase): class TestTDMChildShape(unittest.TestCase):
def test_shape(self): def test_shape(self):
x = paddle.static.data( with paddle_static_guard():
name='x', shape=[-1, 1], dtype='int32', lod_level=1 x = paddle.static.data(
) name='x', shape=[-1, 1], dtype='int32', lod_level=1
tdm_tree_info = create_tdm_tree() )
tree_info_np = np.array(tdm_tree_info).astype('int32') tdm_tree_info = create_tdm_tree()
tree_info_np = np.array(tdm_tree_info).astype('int32')
child, leaf_mask = fluid.contrib.layers.tdm_child(
x=x, child, leaf_mask = fluid.contrib.layers.tdm_child(
node_nums=26, x=x,
child_nums=2, node_nums=26,
param_attr=fluid.ParamAttr( child_nums=2,
initializer=paddle.nn.initializer.Assign(tree_info_np) param_attr=fluid.ParamAttr(
), initializer=paddle.nn.initializer.Assign(tree_info_np)
) ),
)
place = fluid.CPUPlace()
exe = fluid.Executor(place=place) place = fluid.CPUPlace()
exe.run(fluid.default_startup_program()) exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program())
feed = {
'x': np.array( feed = {
[[1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12]] 'x': np.array(
).astype('int32') [
} [1],
exe.run(feed=feed) [2],
[3],
[4],
[5],
[6],
[7],
[8],
[9],
[10],
[11],
[12],
]
).astype('int32')
}
exe.run(feed=feed)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -267,65 +267,66 @@ class TestCase7(TestTDMSamplerOp): ...@@ -267,65 +267,66 @@ class TestCase7(TestTDMSamplerOp):
class TestTDMSamplerShape(unittest.TestCase): class TestTDMSamplerShape(unittest.TestCase):
def test_shape(self): def test_shape(self):
x = paddle.static.data( with paddle_static_guard():
name='x', shape=[-1, 1], dtype='int32', lod_level=1 x = paddle.static.data(
) name='x', shape=[-1, 1], dtype='int32', lod_level=1
tdm_tree_travel = create_tdm_travel() )
tdm_tree_layer = create_tdm_layer() tdm_tree_travel = create_tdm_travel()
layer_node_num_list = [len(i) for i in tdm_tree_layer] tdm_tree_layer = create_tdm_layer()
layer_node_num_list = [len(i) for i in tdm_tree_layer]
tree_layer_flat = []
for layer_idx, layer_node in enumerate(layer_node_num_list): tree_layer_flat = []
tree_layer_flat += tdm_tree_layer[layer_idx] for layer_idx, layer_node in enumerate(layer_node_num_list):
tree_layer_flat += tdm_tree_layer[layer_idx]
travel_array = np.array(tdm_tree_travel).astype('int32')
layer_array = np.array(tree_layer_flat).astype('int32') travel_array = np.array(tdm_tree_travel).astype('int32')
layer_array = np.array(tree_layer_flat).astype('int32')
neg_samples_num_list = [1, 2, 3, 4]
leaf_node_num = 13 neg_samples_num_list = [1, 2, 3, 4]
leaf_node_num = 13
sample, label, mask = fluid.contrib.layers.tdm_sampler(
x, sample, label, mask = fluid.contrib.layers.tdm_sampler(
neg_samples_num_list, x,
layer_node_num_list, neg_samples_num_list,
leaf_node_num, layer_node_num_list,
tree_travel_attr=fluid.ParamAttr( leaf_node_num,
initializer=paddle.nn.initializer.Assign(travel_array) tree_travel_attr=fluid.ParamAttr(
), initializer=paddle.nn.initializer.Assign(travel_array)
tree_layer_attr=fluid.ParamAttr( ),
initializer=paddle.nn.initializer.Assign(layer_array) tree_layer_attr=fluid.ParamAttr(
), initializer=paddle.nn.initializer.Assign(layer_array)
output_positive=True, ),
output_list=True, output_positive=True,
seed=0, output_list=True,
tree_dtype='int32', seed=0,
dtype='int32', tree_dtype='int32',
) dtype='int32',
)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place=place) exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
feed = { feed = {
'x': np.array( 'x': np.array(
[ [
[0], [0],
[1], [1],
[2], [2],
[3], [3],
[4], [4],
[5], [5],
[6], [6],
[7], [7],
[8], [8],
[9], [9],
[10], [10],
[11], [11],
[12], [12],
] ]
).astype('int32') ).astype('int32')
} }
exe.run(feed=feed) exe.run(feed=feed)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from math import exp, log from math import exp, log
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from scipy.special import logit from scipy.special import logit
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -69,10 +69,10 @@ class TestTemporalShift(OpTest): ...@@ -69,10 +69,10 @@ class TestTemporalShift(OpTest):
self.dtype = 'float64' self.dtype = 'float64'
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_ignore_uv(self): def test_check_grad_ignore_uv(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def initTestCase(self): def initTestCase(self):
self.x_shape = (6, 4, 4, 4) self.x_shape = (6, 4, 4, 4)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -59,10 +59,10 @@ class TestTopkOp(OpTest): ...@@ -59,10 +59,10 @@ class TestTopkOp(OpTest):
self.outputs = {'Out': output, 'Indices': indices} self.outputs = {'Out': output, 'Indices': indices}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
class TestTopkOp1(TestTopkOp): class TestTopkOp1(TestTopkOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -31,10 +31,10 @@ class TestTraceOp(OpTest): ...@@ -31,10 +31,10 @@ class TestTraceOp(OpTest):
self.outputs = {'Out': self.target} self.outputs = {'Out': self.target}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['Input'], 'Out', check_eager=True) self.check_grad(['Input'], 'Out')
def init_config(self): def init_config(self):
self.case = np.random.randn(20, 6).astype('float64') self.case = np.random.randn(20, 6).astype('float64')
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def collect_node_patch(og, max_depth): def collect_node_patch(og, max_depth):
......
...@@ -18,7 +18,7 @@ import unittest ...@@ -18,7 +18,7 @@ import unittest
import numpy as np import numpy as np
sys.path.append("..") sys.path.append("..")
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -64,10 +64,10 @@ class TestTriangularSolveOp(OpTest): ...@@ -64,10 +64,10 @@ class TestTriangularSolveOp(OpTest):
self.outputs = {'Out': self.output} self.outputs = {'Out': self.output}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
# 2D(broadcast) + 3D, test 'transpose' # 2D(broadcast) + 3D, test 'transpose'
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -45,10 +45,10 @@ class TrilTriuOpDefaultTest(OpTest): ...@@ -45,10 +45,10 @@ class TrilTriuOpDefaultTest(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def initTestCase(self): def initTestCase(self):
self.real_op_type = np.random.choice(['triu', 'tril']) self.real_op_type = np.random.choice(['triu', 'tril'])
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -204,8 +204,6 @@ class TestTrilinearInterpOp(OpTest): ...@@ -204,8 +204,6 @@ class TestTrilinearInterpOp(OpTest):
self.init_test_case() self.init_test_case()
self.op_type = "trilinear_interp_v2" self.op_type = "trilinear_interp_v2"
# NOTE(dev): some AsDispensible input is not used under imperative mode. # NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float32") input_np = np.random.random(self.input_shape).astype("float32")
scale_w = 0 scale_w = 0
...@@ -255,11 +253,9 @@ class TestTrilinearInterpOp(OpTest): ...@@ -255,11 +253,9 @@ class TestTrilinearInterpOp(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False if self.actual_shape is not None:
if self.actual_shape is not None: self.inputs['OutSize'] = self.actual_shape
self.inputs['OutSize'] = self.actual_shape # c++ end treat NCDHW the same way as NCHW
self.check_eager = False
# c++ end treat NCDHW the same way as NCHW
if self.data_layout == 'NCDHW': if self.data_layout == 'NCDHW':
data_layout = 'NCHW' data_layout = 'NCHW'
else: else:
...@@ -283,12 +279,10 @@ class TestTrilinearInterpOp(OpTest): ...@@ -283,12 +279,10 @@ class TestTrilinearInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=self.check_eager) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Out', in_place=True)
['X'], 'Out', in_place=True, check_eager=self.check_eager
)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'trilinear' self.interp_method = 'trilinear'
...@@ -435,7 +429,6 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -435,7 +429,6 @@ class TestTrilinearInterpOpUint8(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "trilinear_interp_v2" self.op_type = "trilinear_interp_v2"
self.check_eager = True
input_np = np.random.randint( input_np = np.random.randint(
low=0, high=256, size=self.input_shape low=0, high=256, size=self.input_shape
).astype("uint8") ).astype("uint8")
...@@ -474,7 +467,6 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -474,7 +467,6 @@ class TestTrilinearInterpOpUint8(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
self.attrs = { self.attrs = {
'out_d': self.out_d, 'out_d': self.out_d,
...@@ -494,9 +486,7 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -494,9 +486,7 @@ class TestTrilinearInterpOpUint8(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(place=core.CPUPlace(), atol=1)
place=core.CPUPlace(), atol=1, check_eager=self.check_eager
)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'trilinear' self.interp_method = 'trilinear'
...@@ -607,7 +597,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -607,7 +597,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "trilinear_interp_v2" self.op_type = "trilinear_interp_v2"
self.check_eager = True
self.shape_by_1Dtensor = False self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False self.scale_by_1Dtensor = False
self.attrs = { self.attrs = {
...@@ -641,7 +630,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -641,7 +630,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
if self.shape_by_1Dtensor: if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
elif self.out_size is not None: elif self.out_size is not None:
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
...@@ -649,7 +637,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -649,7 +637,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones((1)).astype('int32') * ele)
) )
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
self.check_eager = False
self.attrs['out_d'] = self.out_d self.attrs['out_d'] = self.out_d
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
...@@ -677,12 +664,10 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -677,12 +664,10 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=self.check_eager) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Out', in_place=True)
['X'], 'Out', in_place=True, check_eager=self.check_eager
)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'trilinear' self.interp_method = 'trilinear'
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -35,10 +35,10 @@ class TestTruncOp(OpTest): ...@@ -35,10 +35,10 @@ class TestTruncOp(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=1e-5, check_eager=True) self.check_grad(['X'], 'Out', numeric_grad_delta=1e-5)
class TestFloatTruncOp(TestTruncOp): class TestFloatTruncOp(TestTruncOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_uint16_to_float from eager_op_test import OpTest, convert_uint16_to_float
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_uint16_to_float from eager_op_test import OpTest, convert_uint16_to_float
from test_attribute_var import UnittestBase from test_attribute_var import UnittestBase
import paddle import paddle
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -99,7 +99,7 @@ class TestUniqueConsecutiveOp(OpTest): ...@@ -99,7 +99,7 @@ class TestUniqueConsecutiveOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestUniqueConsecutiveOp2(TestUniqueConsecutiveOp): class TestUniqueConsecutiveOp2(TestUniqueConsecutiveOp):
...@@ -347,7 +347,7 @@ class TestUniqueConsecutiveEmptyInput(OpTest): ...@@ -347,7 +347,7 @@ class TestUniqueConsecutiveEmptyInput(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
...@@ -143,10 +143,10 @@ class TestUnpool3DOp(OpTest): ...@@ -143,10 +143,10 @@ class TestUnpool3DOp(OpTest):
self.outputs = {'Out': output.astype('float64')} self.outputs = {'Out': output.astype('float64')}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.unpool3d_forward_naive = unpool3dmax_forward_naive self.unpool3d_forward_naive = unpool3dmax_forward_naive
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_attribute_var import UnittestBase from test_attribute_var import UnittestBase
import paddle import paddle
...@@ -130,10 +130,10 @@ class TestUnpoolOp(OpTest): ...@@ -130,10 +130,10 @@ class TestUnpoolOp(OpTest):
self.outputs = {'Out': output.astype('float64')} self.outputs = {'Out': output.astype('float64')}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.unpool2d_forward_naive = unpool2dmax_forward_naive self.unpool2d_forward_naive = unpool2dmax_forward_naive
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -39,12 +39,10 @@ class TestUnsqueezeOp(OpTest): ...@@ -39,12 +39,10 @@ class TestUnsqueezeOp(OpTest):
self.prim_op_type = "comp" self.prim_op_type = "comp"
def test_check_output(self): def test_check_output(self):
self.check_output( self.check_output(no_check_set=["XShape"], check_prim=True)
no_check_set=["XShape"], check_eager=True, check_prim=True
)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True) self.check_grad(["X"], "Out")
def init_test_case(self): def init_test_case(self):
self.ori_shape = (3, 40) self.ori_shape = (3, 40)
...@@ -136,10 +134,10 @@ class TestUnsqueezeOp_AxesTensorList(OpTest): ...@@ -136,10 +134,10 @@ class TestUnsqueezeOp_AxesTensorList(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(no_check_set=["XShape"], check_eager=True) self.check_output(no_check_set=["XShape"])
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True) self.check_grad(["X"], "Out")
def init_test_case(self): def init_test_case(self):
self.ori_shape = (20, 5) self.ori_shape = (20, 5)
...@@ -197,10 +195,10 @@ class TestUnsqueezeOp_AxesTensor(OpTest): ...@@ -197,10 +195,10 @@ class TestUnsqueezeOp_AxesTensor(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(no_check_set=["XShape"], check_eager=True) self.check_output(no_check_set=["XShape"])
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True) self.check_grad(["X"], "Out")
def init_test_case(self): def init_test_case(self):
self.ori_shape = (20, 5) self.ori_shape = (20, 5)
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import gradient_checker import gradient_checker
import numpy as np import numpy as np
from decorator_helper import prog_scope from decorator_helper import prog_scope
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -58,10 +58,10 @@ class TestUnStackOpBase(OpTest): ...@@ -58,10 +58,10 @@ class TestUnStackOpBase(OpTest):
self.attrs = {'axis': self.axis, 'num': self.input_dim[self.axis]} self.attrs = {'axis': self.axis, 'num': self.input_dim[self.axis]}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], self.get_y_names(), check_eager=True) self.check_grad(['X'], self.get_y_names())
class TestStackOp3(TestUnStackOpBase): class TestStackOp3(TestUnStackOpBase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -95,7 +95,7 @@ class TestUpdateLossScalingOp(OpTest): ...@@ -95,7 +95,7 @@ class TestUpdateLossScalingOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(no_check_set=['Out'], check_eager=True) self.check_output(no_check_set=['Out'])
class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp):
...@@ -132,188 +132,199 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): ...@@ -132,188 +132,199 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestUpdateLossScalingLayer(unittest.TestCase): class TestUpdateLossScalingLayer(unittest.TestCase):
def loss_scaling_check(self, use_cuda=True, scope=fluid.Scope()): def loss_scaling_check(self, use_cuda=True, scope=fluid.Scope()):
a = paddle.static.data(name="a", shape=[1024, 1024], dtype='float32') with paddle_static_guard():
b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') a = paddle.static.data(
x = [a, b] name="a", shape=[1024, 1024], dtype='float32'
found_inf = paddle.static.data(
name="found_inf", shape=[1], dtype='bool'
)
prev_loss_scaling = paddle.static.data(
name="prev_loss_scaling", shape=[1], dtype='float32'
)
num_good_steps = paddle.static.data(
name="num_good_steps", shape=[1], dtype='int32'
)
num_bad_steps = paddle.static.data(
name="num_bad_steps", shape=[1], dtype='int32'
)
a_v = np.random.random([1024, 1024]).astype('float32')
b_v = np.random.random([512, 128]).astype('float32')
found_inf_v = np.array([False]).astype('bool')
prev_loss_scaling_v = np.array([2048]).astype('float32')
num_good_steps_v = np.array([999], dtype=np.int32)
num_bad_steps_v = np.array([1], dtype=np.int32)
incr_every_n_steps = 1000
decr_every_n_nan_or_inf = 2
incr_ratio = 2
decr_ratio = 0.8
result = amp_nn.update_loss_scaling(
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
incr_every_n_steps,
decr_every_n_nan_or_inf,
incr_ratio,
decr_ratio,
name="update_loss_scaling",
)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(scope):
exe.run(fluid.default_startup_program())
result_v = exe.run(
feed={
'a': a_v,
'b': b_v,
'found_inf': found_inf_v,
'prev_loss_scaling': prev_loss_scaling_v,
'num_good_steps': num_good_steps_v,
'num_bad_steps': num_bad_steps_v,
},
fetch_list=[
result,
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
],
) )
assert np.array_equal(result_v[0], a_v) b = paddle.static.data(name="b", shape=[512, 128], dtype='float32')
assert np.array_equal(result_v[1], b_v) x = [a, b]
assert np.array_equal(result_v[0], result_v[2]) found_inf = paddle.static.data(
assert np.array_equal(result_v[1], result_v[3]) name="found_inf", shape=[1], dtype='bool'
assert np.array_equal(result_v[4], found_inf_v) )
assert np.array_equal(result_v[5], prev_loss_scaling_v * incr_ratio) prev_loss_scaling = paddle.static.data(
assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v)) name="prev_loss_scaling", shape=[1], dtype='float32'
assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) )
num_good_steps = paddle.static.data(
name="num_good_steps", shape=[1], dtype='int32'
)
num_bad_steps = paddle.static.data(
name="num_bad_steps", shape=[1], dtype='int32'
)
a_v = np.random.random([1024, 1024]).astype('float32')
b_v = np.random.random([512, 128]).astype('float32')
found_inf_v = np.array([False]).astype('bool')
prev_loss_scaling_v = np.array([2048]).astype('float32')
num_good_steps_v = np.array([999], dtype=np.int32)
num_bad_steps_v = np.array([1], dtype=np.int32)
incr_every_n_steps = 1000
decr_every_n_nan_or_inf = 2
incr_ratio = 2
decr_ratio = 0.8
result = amp_nn.update_loss_scaling(
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
incr_every_n_steps,
decr_every_n_nan_or_inf,
incr_ratio,
decr_ratio,
name="update_loss_scaling",
)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(scope):
exe.run(fluid.default_startup_program())
result_v = exe.run(
feed={
'a': a_v,
'b': b_v,
'found_inf': found_inf_v,
'prev_loss_scaling': prev_loss_scaling_v,
'num_good_steps': num_good_steps_v,
'num_bad_steps': num_bad_steps_v,
},
fetch_list=[
result,
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
],
)
assert np.array_equal(result_v[0], a_v)
assert np.array_equal(result_v[1], b_v)
assert np.array_equal(result_v[0], result_v[2])
assert np.array_equal(result_v[1], result_v[3])
assert np.array_equal(result_v[4], found_inf_v)
assert np.array_equal(result_v[5], prev_loss_scaling_v * incr_ratio)
assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v))
assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v))
def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()):
a = paddle.static.data(name="a", shape=[1024, 1024], dtype='float32') with paddle_static_guard():
b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') a = paddle.static.data(
x = [a, b] name="a", shape=[1024, 1024], dtype='float32'
found_inf = paddle.static.data( )
name="found_inf", shape=[1], dtype='bool' b = paddle.static.data(name="b", shape=[512, 128], dtype='float32')
) x = [a, b]
prev_loss_scaling = paddle.static.data( found_inf = paddle.static.data(
name="prev_loss_scaling", shape=[1], dtype='float32' name="found_inf", shape=[1], dtype='bool'
) )
num_good_steps = paddle.static.data( prev_loss_scaling = paddle.static.data(
name="num_good_steps", shape=[1], dtype='int32' name="prev_loss_scaling", shape=[1], dtype='float32'
) )
num_bad_steps = paddle.static.data( num_good_steps = paddle.static.data(
name="num_bad_steps", shape=[1], dtype='int32' name="num_good_steps", shape=[1], dtype='int32'
) )
num_bad_steps = paddle.static.data(
a_v = np.random.random([1024, 1024]).astype('float32') name="num_bad_steps", shape=[1], dtype='int32'
b_v = np.random.random([512, 128]).astype('float32')
i = np.random.randint(0, 1024, 1)
j = np.random.randint(0, 1024, 1)
a_v[i[0]][j[0]] = np.inf
found_inf_v = np.array([True]).astype('bool')
prev_loss_scaling_v = np.array([2048]).astype('float32')
num_good_steps_v = np.array([999], dtype=np.int32)
num_bad_steps_v = np.array([1], dtype=np.int32)
incr_every_n_steps = 1000
decr_every_n_nan_or_inf = 2
incr_ratio = 2
decr_ratio = 0.8
result = amp_nn.update_loss_scaling(
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
incr_every_n_steps,
decr_every_n_nan_or_inf,
incr_ratio,
decr_ratio,
name="update_loss_scaling",
)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(scope):
exe.run(fluid.default_startup_program())
result_v = exe.run(
feed={
'a': a_v,
'b': b_v,
'found_inf': found_inf_v,
'prev_loss_scaling': prev_loss_scaling_v,
'num_good_steps': num_good_steps_v,
'num_bad_steps': num_bad_steps_v,
},
fetch_list=[
result,
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
],
) )
assert np.array_equal(result_v[0], np.zeros_like(a_v))
assert np.array_equal(result_v[1], np.zeros_like(b_v))
assert np.array_equal(result_v[2], np.zeros_like(a_v))
assert np.array_equal(result_v[3], np.zeros_like(b_v))
assert np.array_equal(result_v[4], found_inf_v)
assert np.array_equal(result_v[5], prev_loss_scaling_v * decr_ratio)
assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v))
assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v))
def test_loss_scaling_cpu(self): a_v = np.random.random([1024, 1024]).astype('float32')
main = fluid.Program() b_v = np.random.random([512, 128]).astype('float32')
startup = fluid.Program() i = np.random.randint(0, 1024, 1)
with fluid.unique_name.guard(): j = np.random.randint(0, 1024, 1)
with fluid.program_guard(main, startup): a_v[i[0]][j[0]] = np.inf
self.loss_scaling_check(use_cuda=False) found_inf_v = np.array([True]).astype('bool')
prev_loss_scaling_v = np.array([2048]).astype('float32')
num_good_steps_v = np.array([999], dtype=np.int32)
num_bad_steps_v = np.array([1], dtype=np.int32)
incr_every_n_steps = 1000
decr_every_n_nan_or_inf = 2
incr_ratio = 2
decr_ratio = 0.8
result = amp_nn.update_loss_scaling(
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
incr_every_n_steps,
decr_every_n_nan_or_inf,
incr_ratio,
decr_ratio,
name="update_loss_scaling",
)
def test_loss_scaling_cpu_inf(self): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
main = fluid.Program() exe = fluid.Executor(place)
startup = fluid.Program() with fluid.scope_guard(scope):
with fluid.unique_name.guard(): exe.run(fluid.default_startup_program())
with fluid.program_guard(main, startup): result_v = exe.run(
self.loss_scaling_check_inf(use_cuda=False) feed={
'a': a_v,
'b': b_v,
'found_inf': found_inf_v,
'prev_loss_scaling': prev_loss_scaling_v,
'num_good_steps': num_good_steps_v,
'num_bad_steps': num_bad_steps_v,
},
fetch_list=[
result,
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
],
)
assert np.array_equal(result_v[0], np.zeros_like(a_v))
assert np.array_equal(result_v[1], np.zeros_like(b_v))
assert np.array_equal(result_v[2], np.zeros_like(a_v))
assert np.array_equal(result_v[3], np.zeros_like(b_v))
assert np.array_equal(result_v[4], found_inf_v)
assert np.array_equal(result_v[5], prev_loss_scaling_v * decr_ratio)
assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v))
assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v))
def test_loss_scaling_gpu(self): def test_loss_scaling_cpu(self):
if fluid.core.is_compiled_with_cuda(): with paddle_static_guard():
main = fluid.Program() main = fluid.Program()
startup = fluid.Program() startup = fluid.Program()
with fluid.unique_name.guard(): with fluid.unique_name.guard():
with fluid.program_guard(main, startup): with fluid.program_guard(main, startup):
self.loss_scaling_check(use_cuda=True) self.loss_scaling_check(use_cuda=False)
def test_loss_scaling_gpu_inf(self): def test_loss_scaling_cpu_inf(self):
if fluid.core.is_compiled_with_cuda(): with paddle_static_guard():
main = fluid.Program() main = fluid.Program()
startup = fluid.Program() startup = fluid.Program()
with fluid.unique_name.guard(): with fluid.unique_name.guard():
with fluid.program_guard(main, startup): with fluid.program_guard(main, startup):
self.loss_scaling_check_inf(use_cuda=True) self.loss_scaling_check_inf(use_cuda=False)
def test_loss_scaling_gpu(self):
if fluid.core.is_compiled_with_cuda():
with paddle_static_guard():
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.loss_scaling_check(use_cuda=True)
def test_loss_scaling_gpu_inf(self):
if fluid.core.is_compiled_with_cuda():
with paddle_static_guard():
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.loss_scaling_check_inf(use_cuda=True)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, skip_check_grad_ci
class TestVarConv2DOp(OpTest): class TestVarConv2DOp(OpTest):
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -99,7 +99,7 @@ class TestViterbiOp(OpTest): ...@@ -99,7 +99,7 @@ class TestViterbiOp(OpTest):
self.outputs = {'Scores': scores, 'Path': path} self.outputs = {'Scores': scores, 'Path': path}
def test_output(self): def test_output(self):
self.check_output(check_eager=True) self.check_output()
class TestViterbiAPI(unittest.TestCase): class TestViterbiAPI(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -228,7 +228,7 @@ class TestWarpRNNTOp(OpTest): ...@@ -228,7 +228,7 @@ class TestWarpRNNTOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.outputs["warprnntgrad"] = self.gradient self.outputs["warprnntgrad"] = self.gradient
...@@ -237,21 +237,19 @@ class TestWarpRNNTOp(OpTest): ...@@ -237,21 +237,19 @@ class TestWarpRNNTOp(OpTest):
["input"], ["input"],
"loss", "loss",
numeric_grad_delta=0.009, numeric_grad_delta=0.009,
check_eager=True,
) )
else: else:
self.check_grad( self.check_grad(
["input"], ["input"],
"loss", "loss",
numeric_grad_delta=0.009, numeric_grad_delta=0.009,
check_eager=True,
) )
class TestWarpRNNTFP64Op(TestWarpRNNTOp): class TestWarpRNNTFP64Op(TestWarpRNNTOp):
def test_check_output(self): def test_check_output(self):
self.acts.astype(np.float64) self.acts.astype(np.float64)
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.acts.astype(np.float64) self.acts.astype(np.float64)
...@@ -261,14 +259,12 @@ class TestWarpRNNTFP64Op(TestWarpRNNTOp): ...@@ -261,14 +259,12 @@ class TestWarpRNNTFP64Op(TestWarpRNNTOp):
["input"], ["input"],
"loss", "loss",
numeric_grad_delta=0.009, numeric_grad_delta=0.009,
check_eager=True,
) )
else: else:
self.check_grad( self.check_grad(
["input"], ["input"],
"loss", "loss",
numeric_grad_delta=0.009, numeric_grad_delta=0.009,
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -115,7 +115,7 @@ class TestYoloBoxOp(OpTest): ...@@ -115,7 +115,7 @@ class TestYoloBoxOp(OpTest):
self.outputs = {'Boxes': boxes, 'Scores': scores} self.outputs = {'Boxes': boxes, 'Scores': scores}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def initTestCase(self): def initTestCase(self):
self.anchors = [10, 13, 16, 30, 33, 23] self.anchors = [10, 13, 16, 30, 33, 23]
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from scipy.special import expit, logit from scipy.special import expit, logit
import paddle import paddle
...@@ -272,13 +272,11 @@ class TestYolov3LossOp(OpTest): ...@@ -272,13 +272,11 @@ class TestYolov3LossOp(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CPUPlace() place = core.CPUPlace()
self.check_output_with_place(place, atol=2e-3, check_eager=True) self.check_output_with_place(place, atol=2e-3)
def test_check_grad_ignore_gtbox(self): def test_check_grad_ignore_gtbox(self):
place = core.CPUPlace() place = core.CPUPlace()
self.check_grad_with_place( self.check_grad_with_place(place, ['X'], 'Loss', max_relative_error=0.2)
place, ['X'], 'Loss', max_relative_error=0.2, check_eager=True
)
def initTestCase(self): def initTestCase(self):
self.anchors = [ self.anchors = [
......
...@@ -56,7 +56,6 @@ def start_local_trainers( ...@@ -56,7 +56,6 @@ def start_local_trainers(
cluster, cluster,
pod, pod,
training_script, training_script,
eager_mode,
training_script_args, training_script_args,
log_dir=None, log_dir=None,
): ):
...@@ -105,7 +104,7 @@ def start_local_trainers( ...@@ -105,7 +104,7 @@ def start_local_trainers(
class TestMultipleGpus(unittest.TestCase): class TestMultipleGpus(unittest.TestCase):
def run_mnist_2gpu(self, target_file_name, eager_mode=True): def run_mnist_2gpu(self, target_file_name):
if fluid.core.get_cuda_device_count() == 0: if fluid.core.get_cuda_device_count() == 0:
return return
...@@ -118,7 +117,6 @@ class TestMultipleGpus(unittest.TestCase): ...@@ -118,7 +117,6 @@ class TestMultipleGpus(unittest.TestCase):
procs = start_local_trainers( procs = start_local_trainers(
cluster, cluster,
pod, pod,
eager_mode=eager_mode,
training_script=target_file_name, training_script=target_file_name,
training_script_args=[], training_script_args=[],
) )
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册