未验证 提交 6261076c 编写于 作者: W wanghuancoder 提交者: GitHub

Del old dygraph optest5 (#51686)

* delete old dygraph op test
上级 3d78e759
...@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus ...@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestCollectiveAllToAllSingle(TestMultipleGpus): class TestCollectiveAllToAllSingle(TestMultipleGpus):
def test_collective_alltoall_single(self): def test_collective_alltoall_single(self):
self.run_mnist_2gpu('collective_alltoall_single.py', eager_mode=True) self.run_mnist_2gpu('collective_alltoall_single.py')
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus ...@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestCollectiveBatchIsendIrecv(TestMultipleGpus): class TestCollectiveBatchIsendIrecv(TestMultipleGpus):
def test_collective_batch_isend_irecv(self): def test_collective_batch_isend_irecv(self):
self.run_mnist_2gpu('collective_batch_isend_irecv.py', eager_mode=True) self.run_mnist_2gpu('collective_batch_isend_irecv.py')
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus ...@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestCollectiveReduceScatter(TestMultipleGpus): class TestCollectiveReduceScatter(TestMultipleGpus):
def test_collective_reduce_scatter(self): def test_collective_reduce_scatter(self):
self.run_mnist_2gpu('collective_reduce_scatter.py', eager_mode=True) self.run_mnist_2gpu('collective_reduce_scatter.py')
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -45,10 +45,10 @@ class TestGatherOp(OpTest): ...@@ -45,10 +45,10 @@ class TestGatherOp(OpTest):
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
def config(self): def config(self):
""" """
...@@ -194,10 +194,10 @@ class TestGatherBF16Op(OpTest): ...@@ -194,10 +194,10 @@ class TestGatherBF16Op(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=0.5, check_eager=True) self.check_grad(['X'], 'Out', numeric_grad_delta=0.5)
def config(self): def config(self):
""" """
...@@ -223,10 +223,10 @@ class TestGatherOp1(OpTest): ...@@ -223,10 +223,10 @@ class TestGatherOp1(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def config(self): def config(self):
""" """
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
...@@ -36,7 +36,7 @@ class TestGatherTreeOp(OpTest): ...@@ -36,7 +36,7 @@ class TestGatherTreeOp(OpTest):
self.outputs = {'Out': self.backtrace(ids, parents)} self.outputs = {'Out': self.backtrace(ids, parents)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
@staticmethod @staticmethod
def backtrace(ids, parents): def backtrace(ids, parents):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -49,12 +49,10 @@ class TestGraphSendRecvMaxOp(OpTest): ...@@ -49,12 +49,10 @@ class TestGraphSendRecvMaxOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Out', user_defined_grads=[self.gradient])
['X'], 'Out', user_defined_grads=[self.gradient], check_eager=True
)
class TestGraphSendRecvMinOp(OpTest): class TestGraphSendRecvMinOp(OpTest):
...@@ -79,12 +77,10 @@ class TestGraphSendRecvMinOp(OpTest): ...@@ -79,12 +77,10 @@ class TestGraphSendRecvMinOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Out', user_defined_grads=[self.gradient])
['X'], 'Out', user_defined_grads=[self.gradient], check_eager=True
)
class TestGraphSendRecvSumOp(OpTest): class TestGraphSendRecvSumOp(OpTest):
...@@ -107,10 +103,10 @@ class TestGraphSendRecvSumOp(OpTest): ...@@ -107,10 +103,10 @@ class TestGraphSendRecvSumOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestGraphSendRecvMeanOp(OpTest): class TestGraphSendRecvMeanOp(OpTest):
...@@ -135,10 +131,10 @@ class TestGraphSendRecvMeanOp(OpTest): ...@@ -135,10 +131,10 @@ class TestGraphSendRecvMeanOp(OpTest):
self.outputs = {'Out': out, 'Dst_count': dst_count} self.outputs = {'Out': out, 'Dst_count': dst_count}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def compute_graph_send_recv_for_sum_mean(inputs, attributes): def compute_graph_send_recv_for_sum_mean(inputs, attributes):
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -314,10 +314,10 @@ class TestGraphSendUERecvSumOp(OpTest): ...@@ -314,10 +314,10 @@ class TestGraphSendUERecvSumOp(OpTest):
self.message_op = 'ADD' self.message_op = 'ADD'
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
class TestSumCase1(TestGraphSendUERecvSumOp): class TestSumCase1(TestGraphSendUERecvSumOp):
...@@ -420,10 +420,10 @@ class TestGraphSendUERecvMeanOp(OpTest): ...@@ -420,10 +420,10 @@ class TestGraphSendUERecvMeanOp(OpTest):
self.message_op = 'ADD' self.message_op = 'ADD'
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
class TestMeanCase1(TestGraphSendUERecvMeanOp): class TestMeanCase1(TestGraphSendUERecvMeanOp):
...@@ -526,14 +526,13 @@ class TestGraphSendUERecvMaxOp(OpTest): ...@@ -526,14 +526,13 @@ class TestGraphSendUERecvMaxOp(OpTest):
self.message_op = 'ADD' self.message_op = 'ADD'
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X', 'Y'], ['X', 'Y'],
'Out', 'Out',
user_defined_grads=self.gradients, user_defined_grads=self.gradients,
check_eager=True,
) )
...@@ -637,14 +636,13 @@ class TestGraphSendUERecvMinOp(OpTest): ...@@ -637,14 +636,13 @@ class TestGraphSendUERecvMinOp(OpTest):
self.message_op = 'ADD' self.message_op = 'ADD'
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X', 'Y'], ['X', 'Y'],
'Out', 'Out',
user_defined_grads=self.gradients, user_defined_grads=self.gradients,
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -63,10 +63,10 @@ class TestGraphSendUVOp(OpTest): ...@@ -63,10 +63,10 @@ class TestGraphSendUVOp(OpTest):
self.outputs = {'out': out} self.outputs = {'out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x', 'y'], 'out', check_eager=True) self.check_grad(['x', 'y'], 'out')
def set_config(self): def set_config(self):
self.x = np.random.random((10, 20)).astype("float64") self.x = np.random.random((10, 20)).astype("float64")
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, skip_check_grad_ci
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -379,7 +379,7 @@ class TestGridSamplerOp(OpTest): ...@@ -379,7 +379,7 @@ class TestGridSamplerOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
...@@ -387,7 +387,6 @@ class TestGridSamplerOp(OpTest): ...@@ -387,7 +387,6 @@ class TestGridSamplerOp(OpTest):
'Output', 'Output',
max_relative_error=0.01, max_relative_error=0.01,
numeric_grad_delta=self.numeric_grad_delta, numeric_grad_delta=self.numeric_grad_delta,
check_eager=True,
) )
def initTestCase(self): def initTestCase(self):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestHingeLossOp(OpTest): class TestHingeLossOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -154,7 +154,7 @@ class TestHistogramOp(OpTest): ...@@ -154,7 +154,7 @@ class TestHistogramOp(OpTest):
self.attrs = {"bins": self.bins, "min": self.min, "max": self.max} self.attrs = {"bins": self.bins, "min": self.min, "max": self.max}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestHistogramOp_ZeroDim(TestHistogramOp): class TestHistogramOp_ZeroDim(TestHistogramOp):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, skip_check_grad_ci
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -219,14 +219,13 @@ class TestHSigmoidOp(OpTest): ...@@ -219,14 +219,13 @@ class TestHSigmoidOp(OpTest):
self.user_grads = hsigmoid_grad(x, w, label, bias, num_classes) self.user_grads = hsigmoid_grad(x, w, label, bias, num_classes)
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X', 'W', 'Bias'], ['X', 'W', 'Bias'],
['Out'], ['Out'],
user_defined_grads=self.user_grads, user_defined_grads=self.user_grads,
check_eager=True,
) )
...@@ -280,7 +279,7 @@ class TestHSigmoidOpSparse(OpTest): ...@@ -280,7 +279,7 @@ class TestHSigmoidOpSparse(OpTest):
self.outputs = {'PreOut': pre_output, 'Out': out} self.outputs = {'PreOut': pre_output, 'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestHSigmoidOpWithSparseGrad(unittest.TestCase): class TestHSigmoidOpWithSparseGrad(unittest.TestCase):
...@@ -416,14 +415,13 @@ class TestHSigmoidOpWithCostumTree(OpTest): ...@@ -416,14 +415,13 @@ class TestHSigmoidOpWithCostumTree(OpTest):
self.outputs = {'PreOut': pre_output, 'Out': out} self.outputs = {'PreOut': pre_output, 'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['Bias', 'X', 'W'], ['Bias', 'X', 'W'],
['Out'], ['Out'],
no_grad_set=set('Label'), no_grad_set=set('Label'),
check_eager=True,
) )
...@@ -482,12 +480,10 @@ class TestHSigmoidOpWithCostumTreeWithoutBias(OpTest): ...@@ -482,12 +480,10 @@ class TestHSigmoidOpWithCostumTreeWithoutBias(OpTest):
self.outputs = {'PreOut': pre_output, 'Out': out} self.outputs = {'PreOut': pre_output, 'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X', 'W'], ['Out'], no_grad_set=set('Label'))
['X', 'W'], ['Out'], no_grad_set=set('Label'), check_eager=True
)
class TestHSigmoidLossAPI(unittest.TestCase): class TestHSigmoidLossAPI(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -48,12 +48,12 @@ class TestIdentityLossOp(OpTest): ...@@ -48,12 +48,12 @@ class TestIdentityLossOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
paddle.disable_static() paddle.disable_static()
def test_check_grad_normal(self): def test_check_grad_normal(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
paddle.disable_static() paddle.disable_static()
def initTestCase(self): def initTestCase(self):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid import Program from paddle.fluid import Program
...@@ -93,10 +93,10 @@ class TestIndexAddOp(OpTest): ...@@ -93,10 +93,10 @@ class TestIndexAddOp(OpTest):
self.add_value_shape = (3, 3) self.add_value_shape = (3, 3)
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True, atol=1e-2) self.check_output(atol=1e-2)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'AddValue'], 'Out', check_eager=True) self.check_grad(['X', 'AddValue'], 'Out')
class TestIndexAddAPI(unittest.TestCase): class TestIndexAddAPI(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -40,10 +40,10 @@ class TestIndexSampleOp(OpTest): ...@@ -40,10 +40,10 @@ class TestIndexSampleOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def config(self): def config(self):
""" """
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -40,10 +40,10 @@ class TestInverseOp(OpTest): ...@@ -40,10 +40,10 @@ class TestInverseOp(OpTest):
self.outputs = {'Output': inverse} self.outputs = {'Output': inverse}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_grad(self): def test_grad(self):
self.check_grad(['Input'], 'Output', check_eager=True) self.check_grad(['Input'], 'Output')
class TestInverseOpBatched(TestInverseOp): class TestInverseOpBatched(TestInverseOp):
...@@ -60,9 +60,7 @@ class TestInverseOpLarge(TestInverseOp): ...@@ -60,9 +60,7 @@ class TestInverseOpLarge(TestInverseOp):
self.python_api = paddle.tensor.math.inverse self.python_api = paddle.tensor.math.inverse
def test_grad(self): def test_grad(self):
self.check_grad( self.check_grad(['Input'], 'Output', max_relative_error=1e-6)
['Input'], 'Output', max_relative_error=1e-6, check_eager=True
)
class TestInverseOpFP32(TestInverseOp): class TestInverseOpFP32(TestInverseOp):
...@@ -72,9 +70,7 @@ class TestInverseOpFP32(TestInverseOp): ...@@ -72,9 +70,7 @@ class TestInverseOpFP32(TestInverseOp):
self.python_api = paddle.tensor.math.inverse self.python_api = paddle.tensor.math.inverse
def test_grad(self): def test_grad(self):
self.check_grad( self.check_grad(['Input'], 'Output', max_relative_error=1e-2)
['Input'], 'Output', max_relative_error=1e-2, check_eager=True
)
class TestInverseOpBatchedFP32(TestInverseOpFP32): class TestInverseOpBatchedFP32(TestInverseOpFP32):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy.random as random import numpy.random as random
from op_test import OpTest from eager_op_test import OpTest
class TestIOUSimilarityOp(OpTest): class TestIOUSimilarityOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -56,7 +56,7 @@ class TestIscloseOp(OpTest): ...@@ -56,7 +56,7 @@ class TestIscloseOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestIscloseOpException(TestIscloseOp): class TestIscloseOpException(TestIscloseOp):
...@@ -64,28 +64,28 @@ class TestIscloseOpException(TestIscloseOp): ...@@ -64,28 +64,28 @@ class TestIscloseOpException(TestIscloseOp):
def test_rtol_num(): def test_rtol_num():
self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64") self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08]).astype("float64") self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output(check_eager=True) self.check_output()
self.assertRaises(ValueError, test_rtol_num) self.assertRaises(ValueError, test_rtol_num)
def test_rtol_type(): def test_rtol_type():
self.inputs['Rtol'] = np.array([5]).astype("int32") self.inputs['Rtol'] = np.array([5]).astype("int32")
self.inputs['Atol'] = np.array([1e-08]).astype("float64") self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output(check_eager=True) self.check_output()
self.assertRaises(ValueError, test_rtol_type) self.assertRaises(ValueError, test_rtol_type)
def test_atol_num(): def test_atol_num():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64") self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08, 1e-08]).astype("float64") self.inputs['Atol'] = np.array([1e-08, 1e-08]).astype("float64")
self.check_output(check_eager=True) self.check_output()
self.assertRaises(ValueError, test_atol_num) self.assertRaises(ValueError, test_atol_num)
def test_atol_type(): def test_atol_type():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64") self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([8]).astype("int32") self.inputs['Atol'] = np.array([8]).astype("int32")
self.check_output(check_eager=True) self.check_output()
self.assertRaises(ValueError, test_atol_type) self.assertRaises(ValueError, test_atol_type)
...@@ -239,7 +239,7 @@ class TestIscloseOpFloat16(TestIscloseOp): ...@@ -239,7 +239,7 @@ class TestIscloseOpFloat16(TestIscloseOp):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
class TestIscloseOpFloat32(TestIscloseOp): class TestIscloseOpFloat32(TestIscloseOp):
...@@ -260,7 +260,7 @@ class TestIscloseOpFloat64(TestIscloseOp): ...@@ -260,7 +260,7 @@ class TestIscloseOpFloat64(TestIscloseOp):
self.equal_nan = False self.equal_nan = False
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestIscloseOpLargeDimInput(TestIscloseOp): class TestIscloseOpLargeDimInput(TestIscloseOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid.core as core import paddle.fluid.core as core
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
from paddle.nn.functional import kl_div from paddle.nn.functional import kl_div
...@@ -55,12 +55,10 @@ class TestKLDivLossOp(OpTest): ...@@ -55,12 +55,10 @@ class TestKLDivLossOp(OpTest):
self.outputs = {'Loss': loss.astype('float64')} self.outputs = {'Loss': loss.astype('float64')}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Loss', no_grad_set=set(["Target"]))
['X'], 'Loss', no_grad_set=set(["Target"]), check_eager=True
)
def initTestCase(self): def initTestCase(self):
self.x_shape = (4, 5, 5) self.x_shape = (4, 5, 5)
...@@ -114,12 +112,13 @@ class TestKLDivLossDygraph(unittest.TestCase): ...@@ -114,12 +112,13 @@ class TestKLDivLossDygraph(unittest.TestCase):
self.run_kl_loss('none') self.run_kl_loss('none')
def test_kl_loss_static_api(self): def test_kl_loss_static_api(self):
input = paddle.static.data(name='input', shape=[5, 20]) with paddle_static_guard():
label = paddle.static.data(name='label', shape=[5, 20]) input = paddle.static.data(name='input', shape=[5, 20])
label = paddle.static.data(name='label', shape=[5, 20])
paddle.nn.functional.kl_div(input, label) paddle.nn.functional.kl_div(input, label)
paddle.nn.functional.kl_div(input, label, 'sum') paddle.nn.functional.kl_div(input, label, 'sum')
paddle.nn.functional.kl_div(input, label, 'batchmean') paddle.nn.functional.kl_div(input, label, 'batchmean')
class TestKLDivLossTypePromotion(unittest.TestCase): class TestKLDivLossTypePromotion(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -37,16 +37,16 @@ class TestKronOp(OpTest): ...@@ -37,16 +37,16 @@ class TestKronOp(OpTest):
return "float64" return "float64"
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ignore_x(self): def test_check_grad_ignore_x(self):
self.check_grad(['Y'], 'Out', no_grad_set=set('X'), check_eager=True) self.check_grad(['Y'], 'Out', no_grad_set=set('X'))
def test_check_grad_ignore_y(self): def test_check_grad_ignore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y'), check_eager=True) self.check_grad(['X'], 'Out', no_grad_set=set('Y'))
class TestKronOp2(TestKronOp): class TestKronOp2(TestKronOp):
...@@ -168,7 +168,7 @@ class TestComplexKronOp(OpTest): ...@@ -168,7 +168,7 @@ class TestComplexKronOp(OpTest):
return grad_y return grad_y
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
...@@ -176,7 +176,6 @@ class TestComplexKronOp(OpTest): ...@@ -176,7 +176,6 @@ class TestComplexKronOp(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_x, self.grad_y], user_defined_grads=[self.grad_x, self.grad_y],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True,
) )
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
...@@ -186,7 +185,6 @@ class TestComplexKronOp(OpTest): ...@@ -186,7 +185,6 @@ class TestComplexKronOp(OpTest):
no_grad_set=set("X"), no_grad_set=set("X"),
user_defined_grads=[self.grad_y], user_defined_grads=[self.grad_y],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -196,7 +194,6 @@ class TestComplexKronOp(OpTest): ...@@ -196,7 +194,6 @@ class TestComplexKronOp(OpTest):
no_grad_set=set('Y'), no_grad_set=set('Y'),
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -54,11 +54,11 @@ class TestKthvalueOp(OpTest): ...@@ -54,11 +54,11 @@ class TestKthvalueOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True) self.check_grad(set(['X']), 'Out')
class TestKthvalueOpWithKeepdim(OpTest): class TestKthvalueOpWithKeepdim(OpTest):
...@@ -81,11 +81,11 @@ class TestKthvalueOpWithKeepdim(OpTest): ...@@ -81,11 +81,11 @@ class TestKthvalueOpWithKeepdim(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True) self.check_grad(set(['X']), 'Out')
class TestKthvalueOpKernels(unittest.TestCase): class TestKthvalueOpKernels(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestL1NormOp(OpTest): class TestL1NormOp(OpTest):
......
...@@ -17,7 +17,7 @@ from functools import reduce ...@@ -17,7 +17,7 @@ from functools import reduce
from operator import mul from operator import mul
import numpy as np import numpy as np
from op_test import _set_use_system_allocator from eager_op_test import _set_use_system_allocator
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -43,10 +43,10 @@ class TestLerp(OpTest): ...@@ -43,10 +43,10 @@ class TestLerp(OpTest):
self.shape = [100] self.shape = [100]
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
class TestLerpWithDim2(TestLerp): class TestLerpWithDim2(TestLerp):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from scipy import special from scipy import special
import paddle import paddle
...@@ -42,10 +42,10 @@ class TestLgammaOp(OpTest): ...@@ -42,10 +42,10 @@ class TestLgammaOp(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=1e-7, check_eager=True) self.check_grad(['X'], 'Out', numeric_grad_delta=1e-7)
class TestLgammaOpFp32(TestLgammaOp): class TestLgammaOpFp32(TestLgammaOp):
...@@ -53,9 +53,7 @@ class TestLgammaOpFp32(TestLgammaOp): ...@@ -53,9 +53,7 @@ class TestLgammaOpFp32(TestLgammaOp):
self.dtype = np.float32 self.dtype = np.float32
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(['X'], 'Out', numeric_grad_delta=0.005)
['X'], 'Out', numeric_grad_delta=0.005, check_eager=True
)
class TestLgammaOpApi(unittest.TestCase): class TestLgammaOpApi(unittest.TestCase):
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class LinearChainCrfForward: class LinearChainCrfForward:
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -37,7 +37,7 @@ class TestLinspaceOpCommonCase(OpTest): ...@@ -37,7 +37,7 @@ class TestLinspaceOpCommonCase(OpTest):
self.outputs = {'Out': np.arange(0, 11).astype(dtype)} self.outputs = {'Out': np.arange(0, 11).astype(dtype)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestLinspaceOpReverseCase(OpTest): class TestLinspaceOpReverseCase(OpTest):
...@@ -55,7 +55,7 @@ class TestLinspaceOpReverseCase(OpTest): ...@@ -55,7 +55,7 @@ class TestLinspaceOpReverseCase(OpTest):
self.outputs = {'Out': np.arange(10, -1, -1).astype(dtype)} self.outputs = {'Out': np.arange(10, -1, -1).astype(dtype)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestLinspaceOpNumOneCase(OpTest): class TestLinspaceOpNumOneCase(OpTest):
...@@ -73,56 +73,55 @@ class TestLinspaceOpNumOneCase(OpTest): ...@@ -73,56 +73,55 @@ class TestLinspaceOpNumOneCase(OpTest):
self.outputs = {'Out': np.array(10, dtype=dtype)} self.outputs = {'Out': np.array(10, dtype=dtype)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestLinspaceAPI(unittest.TestCase): class TestLinspaceAPI(unittest.TestCase):
def test_variable_input1(self): def test_variable_input1(self):
start = paddle.full(shape=[1], fill_value=0, dtype='float32') with paddle_static_guard():
stop = paddle.full(shape=[1], fill_value=10, dtype='float32') start = paddle.full(shape=[1], fill_value=0, dtype='float32')
num = paddle.full(shape=[1], fill_value=5, dtype='int32') stop = paddle.full(shape=[1], fill_value=10, dtype='float32')
out = paddle.linspace(start, stop, num, dtype='float32') num = paddle.full(shape=[1], fill_value=5, dtype='int32')
exe = fluid.Executor(place=fluid.CPUPlace()) out = paddle.linspace(start, stop, num, dtype='float32')
res = exe.run(fluid.default_main_program(), fetch_list=[out]) exe = fluid.Executor(place=fluid.CPUPlace())
np_res = np.linspace(0, 10, 5, dtype='float32') res = exe.run(fluid.default_main_program(), fetch_list=[out])
self.assertEqual((res == np_res).all(), True) np_res = np.linspace(0, 10, 5, dtype='float32')
self.assertEqual((res == np_res).all(), True)
def test_variable_input2(self): def test_variable_input2(self):
paddle.disable_static()
start = paddle.full(shape=[1], fill_value=0, dtype='float32') start = paddle.full(shape=[1], fill_value=0, dtype='float32')
stop = paddle.full(shape=[1], fill_value=10, dtype='float32') stop = paddle.full(shape=[1], fill_value=10, dtype='float32')
num = paddle.full(shape=[1], fill_value=5, dtype='int32') num = paddle.full(shape=[1], fill_value=5, dtype='int32')
out = paddle.linspace(start, stop, num, dtype='float32') out = paddle.linspace(start, stop, num, dtype='float32')
np_res = np.linspace(0, 10, 5, dtype='float32') np_res = np.linspace(0, 10, 5, dtype='float32')
self.assertEqual((out.numpy() == np_res).all(), True) self.assertEqual((out.numpy() == np_res).all(), True)
paddle.enable_static()
def test_dtype(self): def test_dtype(self):
out_1 = paddle.linspace(0, 10, 5, dtype='float32') with paddle_static_guard():
out_2 = paddle.linspace(0, 10, 5, dtype=np.float32) out_1 = paddle.linspace(0, 10, 5, dtype='float32')
out_3 = paddle.linspace(0, 10, 5, dtype=core.VarDesc.VarType.FP32) out_2 = paddle.linspace(0, 10, 5, dtype=np.float32)
exe = fluid.Executor(place=fluid.CPUPlace()) out_3 = paddle.linspace(0, 10, 5, dtype=core.VarDesc.VarType.FP32)
res_1, res_2, res_3 = exe.run( exe = fluid.Executor(place=fluid.CPUPlace())
fluid.default_main_program(), fetch_list=[out_1, out_2, out_3] res_1, res_2, res_3 = exe.run(
) fluid.default_main_program(), fetch_list=[out_1, out_2, out_3]
assert np.array_equal(res_1, res_2) )
assert np.array_equal(res_1, res_2)
def test_name(self): def test_name(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle_static_guard():
out = paddle.linspace( with paddle.static.program_guard(paddle.static.Program()):
0, 10, 5, dtype='float32', name='linspace_res' out = paddle.linspace(
) 0, 10, 5, dtype='float32', name='linspace_res'
assert 'linspace_res' in out.name )
assert 'linspace_res' in out.name
def test_imperative(self): def test_imperative(self):
paddle.disable_static()
out1 = paddle.linspace(0, 10, 5, dtype='float32') out1 = paddle.linspace(0, 10, 5, dtype='float32')
np_out1 = np.linspace(0, 10, 5, dtype='float32') np_out1 = np.linspace(0, 10, 5, dtype='float32')
out2 = paddle.linspace(0, 10, 5, dtype='int32') out2 = paddle.linspace(0, 10, 5, dtype='int32')
np_out2 = np.linspace(0, 10, 5, dtype='int32') np_out2 = np.linspace(0, 10, 5, dtype='int32')
out3 = paddle.linspace(0, 10, 200, dtype='int32') out3 = paddle.linspace(0, 10, 200, dtype='int32')
np_out3 = np.linspace(0, 10, 200, dtype='int32') np_out3 = np.linspace(0, 10, 200, dtype='int32')
paddle.enable_static()
self.assertEqual((out1.numpy() == np_out1).all(), True) self.assertEqual((out1.numpy() == np_out1).all(), True)
self.assertEqual((out2.numpy() == np_out2).all(), True) self.assertEqual((out2.numpy() == np_out2).all(), True)
self.assertEqual((out3.numpy() == np_out3).all(), True) self.assertEqual((out3.numpy() == np_out3).all(), True)
...@@ -130,52 +129,57 @@ class TestLinspaceAPI(unittest.TestCase): ...@@ -130,52 +129,57 @@ class TestLinspaceAPI(unittest.TestCase):
class TestLinspaceOpError(unittest.TestCase): class TestLinspaceOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with paddle_static_guard():
with program_guard(Program(), Program()):
def test_dtype(): def test_dtype():
paddle.linspace(0, 10, 1, dtype="int8") paddle.linspace(0, 10, 1, dtype="int8")
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
def test_dtype1(): def test_dtype1():
paddle.linspace(0, 10, 1.33, dtype="int32") paddle.linspace(0, 10, 1.33, dtype="int32")
self.assertRaises(TypeError, test_dtype1) self.assertRaises(TypeError, test_dtype1)
def test_start_type(): def test_start_type():
paddle.linspace([0], 10, 1, dtype="float32") paddle.linspace([0], 10, 1, dtype="float32")
self.assertRaises(TypeError, test_start_type) self.assertRaises(TypeError, test_start_type)
def test_end_type(): def test_end_type():
paddle.linspace(0, [10], 1, dtype="float32") paddle.linspace(0, [10], 1, dtype="float32")
self.assertRaises(TypeError, test_end_type) self.assertRaises(TypeError, test_end_type)
def test_step_dtype(): def test_step_dtype():
paddle.linspace(0, 10, [0], dtype="float32") paddle.linspace(0, 10, [0], dtype="float32")
self.assertRaises(TypeError, test_step_dtype) self.assertRaises(TypeError, test_step_dtype)
def test_start_dtype(): def test_start_dtype():
start = paddle.static.data( start = paddle.static.data(
shape=[1], dtype="float64", name="start" shape=[1], dtype="float64", name="start"
) )
paddle.linspace(start, 10, 1, dtype="float32") paddle.linspace(start, 10, 1, dtype="float32")
self.assertRaises(ValueError, test_start_dtype) self.assertRaises(ValueError, test_start_dtype)
def test_end_dtype(): def test_end_dtype():
end = paddle.static.data(shape=[1], dtype="float64", name="end") end = paddle.static.data(
paddle.linspace(0, end, 1, dtype="float32") shape=[1], dtype="float64", name="end"
)
paddle.linspace(0, end, 1, dtype="float32")
self.assertRaises(ValueError, test_end_dtype) self.assertRaises(ValueError, test_end_dtype)
def test_num_dtype(): def test_num_dtype():
num = paddle.static.data(shape=[1], dtype="int32", name="step") num = paddle.static.data(
paddle.linspace(0, 10, num, dtype="float32") shape=[1], dtype="int32", name="step"
)
paddle.linspace(0, 10, num, dtype="float32")
self.assertRaises(TypeError, test_step_dtype) self.assertRaises(TypeError, test_step_dtype)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,7 +16,7 @@ import copy ...@@ -16,7 +16,7 @@ import copy
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_multiclass_nms_op import iou from test_multiclass_nms_op import iou
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestLodResetOpByAttr(OpTest): class TestLodResetOpByAttr(OpTest):
......
...@@ -63,12 +63,10 @@ class TestLogSoftmaxOp(OpTest): ...@@ -63,12 +63,10 @@ class TestLogSoftmaxOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad])
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True
)
class TestLogSoftmaxOp_ZeroDim(TestLogSoftmaxOp): class TestLogSoftmaxOp_ZeroDim(TestLogSoftmaxOp):
...@@ -85,10 +83,10 @@ class TestLogSoftmaxOp_ZeroDim(TestLogSoftmaxOp): ...@@ -85,10 +83,10 @@ class TestLogSoftmaxOp_ZeroDim(TestLogSoftmaxOp):
self.attrs = {'axis': -1} self.attrs = {'axis': -1}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], ['Out'], check_eager=True) self.check_grad(['X'], ['Out'])
class TestLogSoftmaxShape(TestLogSoftmaxOp): class TestLogSoftmaxShape(TestLogSoftmaxOp):
...@@ -122,7 +120,7 @@ class TestLogSoftmaxBF16Op(OpTest): ...@@ -122,7 +120,7 @@ class TestLogSoftmaxBF16Op(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -131,7 +129,6 @@ class TestLogSoftmaxBF16Op(OpTest): ...@@ -131,7 +129,6 @@ class TestLogSoftmaxBF16Op(OpTest):
['X'], ['X'],
['Out'], ['Out'],
user_defined_grads=[self.x_grad], user_defined_grads=[self.x_grad],
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -59,12 +59,10 @@ class TestLogitOp(OpTest): ...@@ -59,12 +59,10 @@ class TestLogitOp(OpTest):
self.eps = 1e-8 self.eps = 1e-8
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad])
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True
)
class TestLogitOpFp32(TestLogitOp): class TestLogitOpFp32(TestLogitOp):
...@@ -74,12 +72,10 @@ class TestLogitOpFp32(TestLogitOp): ...@@ -74,12 +72,10 @@ class TestLogitOpFp32(TestLogitOp):
self.eps = 1e-8 self.eps = 1e-8
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad])
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True
)
class TestLogitOpFp16(TestLogitOp): class TestLogitOpFp16(TestLogitOp):
...@@ -89,12 +85,10 @@ class TestLogitOpFp16(TestLogitOp): ...@@ -89,12 +85,10 @@ class TestLogitOpFp16(TestLogitOp):
self.eps = 1e-8 self.eps = 1e-8
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad])
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True
)
@unittest.skipIf( @unittest.skipIf(
...@@ -122,7 +116,7 @@ class TestLogitOpBf16(OpTest): ...@@ -122,7 +116,7 @@ class TestLogitOpBf16(OpTest):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
...@@ -132,7 +126,6 @@ class TestLogitOpBf16(OpTest): ...@@ -132,7 +126,6 @@ class TestLogitOpBf16(OpTest):
['X'], ['X'],
['Out'], ['Out'],
user_defined_grads=[self.x_grad], user_defined_grads=[self.x_grad],
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -87,7 +87,7 @@ class TestLogsumexp(OpTest): ...@@ -87,7 +87,7 @@ class TestLogsumexp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -95,7 +95,6 @@ class TestLogsumexp(OpTest): ...@@ -95,7 +95,6 @@ class TestLogsumexp(OpTest):
['Out'], ['Out'],
user_defined_grads=self.user_defined_grads, user_defined_grads=self.user_defined_grads,
user_defined_grad_outputs=self.user_defined_grad_outputs, user_defined_grad_outputs=self.user_defined_grad_outputs,
check_eager=True,
) )
def calc_grad(self): def calc_grad(self):
......
...@@ -16,7 +16,7 @@ import struct ...@@ -16,7 +16,7 @@ import struct
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestLookupTableDequantOp(OpTest): class TestLookupTableDequantOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -116,94 +116,104 @@ class TestLocalResponseNormFAPI(unittest.TestCase): ...@@ -116,94 +116,104 @@ class TestLocalResponseNormFAPI(unittest.TestCase):
self.places.append(fluid.CUDAPlace(0)) self.places.append(fluid.CUDAPlace(0))
def check_static_3d_input(self, place): def check_static_3d_input(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()): with paddle_static_guard():
in_np1 = np.random.random([3, 40, 40]).astype("float32") with fluid.program_guard(fluid.Program(), fluid.Program()):
in_np2 = np.transpose(in_np1, (0, 2, 1)) in_np1 = np.random.random([3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 1))
input1 = paddle.static.data( input1 = paddle.static.data(
name="input1", shape=[3, 40, 40], dtype="float32" name="input1", shape=[3, 40, 40], dtype="float32"
) )
input2 = paddle.static.data( input2 = paddle.static.data(
name="input2", shape=[3, 40, 40], dtype="float32" name="input2", shape=[3, 40, 40], dtype="float32"
) )
res1 = paddle.nn.functional.local_response_norm( res1 = paddle.nn.functional.local_response_norm(
x=input1, size=5, data_format='NCL' x=input1, size=5, data_format='NCL'
) )
res2 = paddle.nn.functional.local_response_norm( res2 = paddle.nn.functional.local_response_norm(
x=input2, size=5, data_format='NLC' x=input2, size=5, data_format='NLC'
) )
exe = fluid.Executor(place) exe = fluid.Executor(place)
fetches = exe.run( fetches = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
feed={"input1": in_np1, "input2": in_np2}, feed={"input1": in_np1, "input2": in_np2},
fetch_list=[res1, res2], fetch_list=[res1, res2],
) )
fetches1_tran = np.transpose(fetches[1], (0, 2, 1)) fetches1_tran = np.transpose(fetches[1], (0, 2, 1))
np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05) np.testing.assert_allclose(
fetches[0], fetches1_tran, rtol=1e-05
)
def check_static_4d_input(self, place): def check_static_4d_input(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()): with paddle_static_guard():
input1 = paddle.static.data( with fluid.program_guard(fluid.Program(), fluid.Program()):
name="input1", shape=[3, 3, 40, 40], dtype="float32" input1 = paddle.static.data(
) name="input1", shape=[3, 3, 40, 40], dtype="float32"
input2 = paddle.static.data( )
name="input2", shape=[3, 40, 40, 3], dtype="float32" input2 = paddle.static.data(
) name="input2", shape=[3, 40, 40, 3], dtype="float32"
)
res1 = paddle.nn.functional.local_response_norm( res1 = paddle.nn.functional.local_response_norm(
x=input1, size=5, data_format='NCHW' x=input1, size=5, data_format='NCHW'
) )
res2 = paddle.nn.functional.local_response_norm( res2 = paddle.nn.functional.local_response_norm(
x=input2, size=5, data_format='NHWC' x=input2, size=5, data_format='NHWC'
) )
in_np1 = np.random.random([3, 3, 40, 40]).astype("float32") in_np1 = np.random.random([3, 3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 3, 1)) in_np2 = np.transpose(in_np1, (0, 2, 3, 1))
exe = fluid.Executor(place) exe = fluid.Executor(place)
fetches = exe.run( fetches = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
feed={"input1": in_np1, "input2": in_np2}, feed={"input1": in_np1, "input2": in_np2},
fetch_list=[res1, res2], fetch_list=[res1, res2],
) )
fetches1_tran = np.transpose(fetches[1], (0, 3, 1, 2)) fetches1_tran = np.transpose(fetches[1], (0, 3, 1, 2))
np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05) np.testing.assert_allclose(
fetches[0], fetches1_tran, rtol=1e-05
)
def check_static_5d_input(self, place): def check_static_5d_input(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()): with paddle_static_guard():
input1 = paddle.static.data( with fluid.program_guard(fluid.Program(), fluid.Program()):
name="input1", shape=[3, 3, 3, 40, 40], dtype="float32" input1 = paddle.static.data(
) name="input1", shape=[3, 3, 3, 40, 40], dtype="float32"
input2 = paddle.static.data( )
name="input2", shape=[3, 3, 40, 40, 3], dtype="float32" input2 = paddle.static.data(
) name="input2", shape=[3, 3, 40, 40, 3], dtype="float32"
res1 = paddle.nn.functional.local_response_norm( )
x=input1, size=5, data_format='NCDHW' res1 = paddle.nn.functional.local_response_norm(
) x=input1, size=5, data_format='NCDHW'
res2 = paddle.nn.functional.local_response_norm( )
x=input2, size=5, data_format='NDHWC' res2 = paddle.nn.functional.local_response_norm(
) x=input2, size=5, data_format='NDHWC'
)
in_np1 = np.random.random([3, 3, 3, 40, 40]).astype("float32") in_np1 = np.random.random([3, 3, 3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 3, 4, 1)) in_np2 = np.transpose(in_np1, (0, 2, 3, 4, 1))
exe = fluid.Executor(place) exe = fluid.Executor(place)
fetches = exe.run( fetches = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
feed={"input1": in_np1, "input2": in_np2}, feed={"input1": in_np1, "input2": in_np2},
fetch_list=[res1, res2], fetch_list=[res1, res2],
) )
fetches1_tran = np.transpose(fetches[1], (0, 4, 1, 2, 3)) fetches1_tran = np.transpose(fetches[1], (0, 4, 1, 2, 3))
np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05) np.testing.assert_allclose(
fetches[0], fetches1_tran, rtol=1e-05
)
def test_static(self): def test_static(self):
for place in self.places: with paddle_static_guard():
self.check_static_3d_input(place=place) for place in self.places:
self.check_static_4d_input(place=place) self.check_static_3d_input(place=place)
self.check_static_5d_input(place=place) self.check_static_4d_input(place=place)
self.check_static_5d_input(place=place)
def check_dygraph_3d_input(self, place): def check_dygraph_3d_input(self, place):
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
...@@ -268,46 +278,51 @@ class TestLocalResponseNormFAPI(unittest.TestCase): ...@@ -268,46 +278,51 @@ class TestLocalResponseNormFAPI(unittest.TestCase):
class TestLocalResponseNormFAPIError(unittest.TestCase): class TestLocalResponseNormFAPIError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with paddle_static_guard():
with program_guard(Program(), Program()):
def test_Variable(): def test_Variable():
# the input of lrn must be Variable. # the input of lrn must be Variable.
x1 = fluid.create_lod_tensor( x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() np.array([-1, 3, 5, 5]),
) [[1, 1, 1, 1]],
paddle.nn.functional.local_response_norm(x1, size=5) fluid.CPUPlace(),
)
paddle.nn.functional.local_response_norm(x1, size=5)
self.assertRaises(TypeError, test_Variable) self.assertRaises(TypeError, test_Variable)
def test_datatype(): def test_datatype():
x = paddle.static.data( x = paddle.static.data(
name='x', shape=[3, 4, 5, 6], dtype="int32" name='x', shape=[3, 4, 5, 6], dtype="int32"
) )
paddle.nn.functional.local_response_norm(x, size=5) paddle.nn.functional.local_response_norm(x, size=5)
self.assertRaises(TypeError, test_datatype) self.assertRaises(TypeError, test_datatype)
def test_dataformat(): def test_dataformat():
x = paddle.static.data( x = paddle.static.data(
name='x', shape=[3, 4, 5, 6], dtype="float32" name='x', shape=[3, 4, 5, 6], dtype="float32"
) )
paddle.nn.functional.local_response_norm( paddle.nn.functional.local_response_norm(
x, size=5, data_format="NCTHW" x, size=5, data_format="NCTHW"
) )
self.assertRaises(ValueError, test_dataformat) self.assertRaises(ValueError, test_dataformat)
def test_dim(): def test_dim():
x = paddle.static.data(name='x', shape=[3, 4], dtype="float32") x = paddle.static.data(
paddle.nn.functional.local_response_norm(x, size=5) name='x', shape=[3, 4], dtype="float32"
)
paddle.nn.functional.local_response_norm(x, size=5)
self.assertRaises(ValueError, test_dim) self.assertRaises(ValueError, test_dim)
def test_shape(): def test_shape():
x = paddle.rand(shape=[0, 0, 2, 3], dtype="float32") x = paddle.rand(shape=[0, 0, 2, 3], dtype="float32")
paddle.nn.functional.local_response_norm(x, size=5) paddle.nn.functional.local_response_norm(x, size=5)
self.assertRaises(ValueError, test_shape) self.assertRaises(ValueError, test_shape)
class TestLocalResponseNormCAPI(unittest.TestCase): class TestLocalResponseNormCAPI(unittest.TestCase):
...@@ -335,28 +350,29 @@ class TestLocalResponseNormCAPI(unittest.TestCase): ...@@ -335,28 +350,29 @@ class TestLocalResponseNormCAPI(unittest.TestCase):
def test_static_fp16_gpu(self): def test_static_fp16_gpu(self):
if paddle.fluid.core.is_compiled_with_cuda(): if paddle.fluid.core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0) place = paddle.CUDAPlace(0)
with paddle.static.program_guard( with paddle_static_guard():
paddle.static.Program(), paddle.static.Program() with paddle.static.program_guard(
): paddle.static.Program(), paddle.static.Program()
input = np.random.random([3, 3, 112, 112]).astype("float16") ):
input = np.random.random([3, 3, 112, 112]).astype("float16")
x = paddle.static.data(
name="x", shape=[3, 3, 112, 112], dtype="float16" x = paddle.static.data(
) name="x", shape=[3, 3, 112, 112], dtype="float16"
)
m = paddle.nn.LocalResponseNorm(size=5)
y = m(x) m = paddle.nn.LocalResponseNorm(size=5)
y = m(x)
exe = paddle.static.Executor(place)
res = exe.run( exe = paddle.static.Executor(place)
paddle.static.default_main_program(), res = exe.run(
feed={ paddle.static.default_main_program(),
"x": input, feed={
}, "x": input,
fetch_list=[y], },
) fetch_list=[y],
)
assert np.array_equal(res[0].shape, input.shape)
assert np.array_equal(res[0].shape, input.shape)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
SIGMOID_THRESHOLD_MIN = -40.0 SIGMOID_THRESHOLD_MIN = -40.0
SIGMOID_THRESHOLD_MAX = 13.0 SIGMOID_THRESHOLD_MAX = 13.0
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def sigmoid_np(x): def sigmoid_np(x):
......
...@@ -19,7 +19,7 @@ import unittest ...@@ -19,7 +19,7 @@ import unittest
import numpy as np import numpy as np
import scipy import scipy
import scipy.linalg import scipy.linalg
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -156,10 +156,10 @@ class TestLUOp(OpTest): ...@@ -156,10 +156,10 @@ class TestLUOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], ['Out'], check_eager=True) self.check_grad(['X'], ['Out'])
# m = n 2D # m = n 2D
......
...@@ -19,7 +19,7 @@ import unittest ...@@ -19,7 +19,7 @@ import unittest
import numpy as np import numpy as np
import scipy import scipy
import scipy.linalg import scipy.linalg
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -168,10 +168,10 @@ class TestLU_UnpackOp(OpTest): ...@@ -168,10 +168,10 @@ class TestLU_UnpackOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], ['L', 'U'], check_eager=True) self.check_grad(['X'], ['L', 'U'])
# m = n # m = n
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
from paddle.fluid import Program, core, program_guard from paddle.fluid import Program, core, program_guard
...@@ -148,14 +148,10 @@ class TestMarginCrossEntropyOp(OpTest): ...@@ -148,14 +148,10 @@ class TestMarginCrossEntropyOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(core.CUDAPlace(0), atol=1e-5)
core.CUDAPlace(0), atol=1e-5, check_eager=True
)
def test_check_grad(self): def test_check_grad(self):
self.check_grad_with_place( self.check_grad_with_place(core.CUDAPlace(0), ["Logits"], "Loss")
core.CUDAPlace(0), ["Logits"], "Loss", check_eager=True
)
@unittest.skipIf( @unittest.skipIf(
...@@ -172,7 +168,6 @@ class TestMarginCrossEntropyOpFP32(TestMarginCrossEntropyOp): ...@@ -172,7 +168,6 @@ class TestMarginCrossEntropyOpFP32(TestMarginCrossEntropyOp):
"Loss", "Loss",
numeric_grad_delta=5e-2, numeric_grad_delta=5e-2,
max_relative_error=5e-2, max_relative_error=5e-2,
check_eager=True,
) )
...@@ -184,9 +179,7 @@ class TestMarginCrossEntropyOpFP16(TestMarginCrossEntropyOp): ...@@ -184,9 +179,7 @@ class TestMarginCrossEntropyOpFP16(TestMarginCrossEntropyOp):
self.dtype = np.float16 self.dtype = np.float16
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(core.CUDAPlace(0), atol=5e-2)
core.CUDAPlace(0), atol=5e-2, check_eager=True
)
def test_check_grad(self): def test_check_grad(self):
self.check_grad_with_place( self.check_grad_with_place(
...@@ -195,7 +188,6 @@ class TestMarginCrossEntropyOpFP16(TestMarginCrossEntropyOp): ...@@ -195,7 +188,6 @@ class TestMarginCrossEntropyOpFP16(TestMarginCrossEntropyOp):
"Loss", "Loss",
numeric_grad_delta=6e-1, numeric_grad_delta=6e-1,
max_relative_error=6e-1, max_relative_error=6e-1,
check_eager=True,
) )
...@@ -224,17 +216,13 @@ class TestMarginCrossEntropyOpSphereFace(TestMarginCrossEntropyOp): ...@@ -224,17 +216,13 @@ class TestMarginCrossEntropyOpSphereFace(TestMarginCrossEntropyOp):
class TestMarginCrossEntropyOpCPU(TestMarginCrossEntropyOp): class TestMarginCrossEntropyOpCPU(TestMarginCrossEntropyOp):
def test_check_output(self): def test_check_output(self):
try: try:
self.check_output_with_place( self.check_output_with_place(core.CPUPlace(), atol=1e-5)
core.CPUPlace(), atol=1e-5, check_eager=True
)
except RuntimeError: except RuntimeError:
pass pass
def test_check_grad(self): def test_check_grad(self):
try: try:
self.check_grad_with_place( self.check_grad_with_place(core.CPUPlace(), ["Logits"], "Loss")
core.CPUPlace(), ["Logits"], "Loss", check_eager=True
)
except RuntimeError: except RuntimeError:
pass pass
...@@ -279,63 +267,64 @@ class TestMarginCrossEntropyOpV2(unittest.TestCase): ...@@ -279,63 +267,64 @@ class TestMarginCrossEntropyOpV2(unittest.TestCase):
self.check_static_result(place=place) self.check_static_result(place=place)
def check_static_result(self, place): def check_static_result(self, place):
with program_guard(Program(), Program()): with paddle_static_guard():
datas = np.random.uniform( with program_guard(Program(), Program()):
-0.99, 0.99, [self.batch_dim, self.feat_dim] datas = np.random.uniform(
).astype(self.dtype) -0.99, 0.99, [self.batch_dim, self.feat_dim]
datas = datas / np.sqrt( ).astype(self.dtype)
np.sum(np.square(datas), axis=1, keepdims=True) datas = datas / np.sqrt(
) np.sum(np.square(datas), axis=1, keepdims=True)
weights = np.random.uniform( )
-0.99, 0.99, [self.feat_dim, self.num_class] weights = np.random.uniform(
).astype(self.dtype) -0.99, 0.99, [self.feat_dim, self.num_class]
weights = weights / np.sqrt( ).astype(self.dtype)
np.sum(np.square(weights), axis=0, keepdims=True) weights = weights / np.sqrt(
) np.sum(np.square(weights), axis=0, keepdims=True)
)
logits_np = np.matmul(datas, weights)
labels_np = np.random.randint( logits_np = np.matmul(datas, weights)
0, self.num_class, (self.batch_dim,), dtype="int64" labels_np = np.random.randint(
) 0, self.num_class, (self.batch_dim,), dtype="int64"
)
loss_np, softmax_np = margin_cross_entropy(
logits_np, loss_np, softmax_np = margin_cross_entropy(
labels_np, logits_np,
self.axis, labels_np,
self.margin1, self.axis,
self.margin2, self.margin1,
self.margin3, self.margin2,
self.scale, self.margin3,
self.reduction, self.scale,
) self.reduction,
)
logits = paddle.static.data(
name='logits', logits = paddle.static.data(
shape=[self.batch_dim, self.num_class], name='logits',
dtype=self.dtype, shape=[self.batch_dim, self.num_class],
) dtype=self.dtype,
label = paddle.static.data( )
name='label', shape=[self.batch_dim], dtype="int64" label = paddle.static.data(
) name='label', shape=[self.batch_dim], dtype="int64"
loss, softmax = paddle.nn.functional.margin_cross_entropy( )
logits, loss, softmax = paddle.nn.functional.margin_cross_entropy(
label, logits,
margin1=self.margin1, label,
margin2=self.margin2, margin1=self.margin1,
margin3=self.margin3, margin2=self.margin2,
scale=self.scale, margin3=self.margin3,
return_softmax=True, scale=self.scale,
reduction=self.reduction, return_softmax=True,
) reduction=self.reduction,
)
exe = paddle.fluid.Executor(place)
[loss_res, softmax_res] = exe.run( exe = paddle.fluid.Executor(place)
paddle.fluid.default_main_program(), [loss_res, softmax_res] = exe.run(
feed={'logits': logits_np, 'label': labels_np}, paddle.fluid.default_main_program(),
fetch_list=[loss, softmax], feed={'logits': logits_np, 'label': labels_np},
) fetch_list=[loss, softmax],
np.testing.assert_allclose(loss_res, loss_np) )
np.testing.assert_allclose(softmax_res, softmax_np) np.testing.assert_allclose(loss_res, loss_np)
np.testing.assert_allclose(softmax_res, softmax_np)
def test_dynamic(self): def test_dynamic(self):
for place in self.places: for place in self.places:
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -83,27 +83,32 @@ class TestMarginRankLossLayer(unittest.TestCase): ...@@ -83,27 +83,32 @@ class TestMarginRankLossLayer(unittest.TestCase):
self.check_identity(place) self.check_identity(place)
def check_identity(self, place): def check_identity(self, place):
main = fluid.Program() with paddle_static_guard():
start = fluid.Program() main = fluid.Program()
with fluid.unique_name.guard(): start = fluid.Program()
with fluid.program_guard(main, start): with fluid.unique_name.guard():
label = paddle.static.data( with fluid.program_guard(main, start):
"label", (self.batch_size, 1), "float32" label = paddle.static.data(
) "label", (self.batch_size, 1), "float32"
x1 = paddle.static.data("x1", (self.batch_size, 1), "float32") )
x2 = paddle.static.data("x2", (self.batch_size, 1), "float32") x1 = paddle.static.data(
out = paddle.nn.functional.margin_ranking_loss( "x1", (self.batch_size, 1), "float32"
x1, x2, label, self.margin, 'none' )
) x2 = paddle.static.data(
"x2", (self.batch_size, 1), "float32"
exe = fluid.Executor(place) )
exe.run(start) out = paddle.nn.functional.margin_ranking_loss(
(out_np,) = exe.run( x1, x2, label, self.margin, 'none'
main, )
feed={"label": self.label, "x1": self.x1, "x2": self.x2},
fetch_list=[out], exe = fluid.Executor(place)
) exe.run(start)
np.testing.assert_allclose(out_np, self.loss) (out_np,) = exe.run(
main,
feed={"label": self.label, "x1": self.x1, "x2": self.x2},
fetch_list=[out],
)
np.testing.assert_allclose(out_np, self.loss)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -40,10 +40,10 @@ class TestMaskedSelectOp(OpTest): ...@@ -40,10 +40,10 @@ class TestMaskedSelectOp(OpTest):
self.outputs = {'Y': out} self.outputs = {'Y': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', check_eager=True) self.check_grad(['X'], 'Y')
def init(self): def init(self):
self.shape = (50, 3) self.shape = (50, 3)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestMatchMatrixTensorOp(OpTest): class TestMatchMatrixTensorOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -163,28 +163,33 @@ for dim in [4]: ...@@ -163,28 +163,33 @@ for dim in [4]:
class API_TestMm(unittest.TestCase): class API_TestMm(unittest.TestCase):
def test_out(self): def test_out(self):
with fluid.program_guard(fluid.Program()): with paddle_static_guard():
x = paddle.static.data(name="x", shape=[2], dtype="float64") with fluid.program_guard(fluid.Program()):
y = paddle.static.data(name='y', shape=[2], dtype='float64') x = paddle.static.data(name="x", shape=[2], dtype="float64")
res = paddle.static.data(name="output", shape=[1], dtype="float64") y = paddle.static.data(name='y', shape=[2], dtype='float64')
result = paddle.mm(x, y) res = paddle.static.data(
exe = fluid.Executor(fluid.CPUPlace()) name="output", shape=[1], dtype="float64"
data1 = np.random.rand(2) )
data2 = np.random.rand(2) result = paddle.mm(x, y)
np_res = exe.run(feed={'x': data1, 'y': data2}, fetch_list=[result]) exe = fluid.Executor(fluid.CPUPlace())
expected_result = np.matmul( data1 = np.random.rand(2)
data1.reshape(1, 2), data2.reshape(2, 1) data2 = np.random.rand(2)
) np_res = exe.run(
feed={'x': data1, 'y': data2}, fetch_list=[result]
)
expected_result = np.matmul(
data1.reshape(1, 2), data2.reshape(2, 1)
)
np.testing.assert_allclose( np.testing.assert_allclose(
np_res, np_res,
expected_result, expected_result,
rtol=1e-05, rtol=1e-05,
atol=1e-05, atol=1e-05,
err_msg='two value is {}\n{}, check diff!'.format( err_msg='two value is {}\n{}, check diff!'.format(
np_res, expected_result np_res, expected_result
), ),
) )
def test_dygraph_without_out(self): def test_dygraph_without_out(self):
device = fluid.CPUPlace() device = fluid.CPUPlace()
...@@ -213,41 +218,43 @@ class Test_API_Matmul(unittest.TestCase): ...@@ -213,41 +218,43 @@ class Test_API_Matmul(unittest.TestCase):
class API_TestMmError(unittest.TestCase): class API_TestMmError(unittest.TestCase):
def test_errors(self): def test_errors(self):
def test_error1(): with paddle_static_guard():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.static.data( def test_error1():
name="data1", shape=[10, 2], dtype="float32" with fluid.program_guard(fluid.Program(), fluid.Program()):
) data1 = paddle.static.data(
data2 = paddle.static.data( name="data1", shape=[10, 2], dtype="float32"
name="data2", shape=[3, 10], dtype="float32" )
) data2 = paddle.static.data(
paddle.mm(data1, data2) name="data2", shape=[3, 10], dtype="float32"
)
self.assertRaises(ValueError, test_error1) paddle.mm(data1, data2)
def test_error2(): self.assertRaises(ValueError, test_error1)
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.static.data( def test_error2():
name="data1", shape=[-1, 10, 2], dtype="float32" with fluid.program_guard(fluid.Program(), fluid.Program()):
) data1 = paddle.static.data(
data2 = paddle.static.data( name="data1", shape=[-1, 10, 2], dtype="float32"
name="data2", shape=[-1, 2, 10], dtype="float32" )
) data2 = paddle.static.data(
paddle.mm(data1, data2) name="data2", shape=[-1, 2, 10], dtype="float32"
)
test_error2() paddle.mm(data1, data2)
def test_error3(): test_error2()
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.static.data( def test_error3():
name="data1", shape=[10, 10, 2], dtype="float32" with fluid.program_guard(fluid.Program(), fluid.Program()):
) data1 = paddle.static.data(
data2 = paddle.static.data( name="data1", shape=[10, 10, 2], dtype="float32"
name="data2", shape=[3, 2, 10], dtype="float32" )
) data2 = paddle.static.data(
paddle.mm(data1, data2) name="data2", shape=[3, 2, 10], dtype="float32"
)
self.assertRaises(ValueError, test_error3) paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error3)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,7 +16,7 @@ import copy ...@@ -16,7 +16,7 @@ import copy
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
...@@ -296,7 +296,7 @@ class TestMatrixNMSOp(OpTest): ...@@ -296,7 +296,7 @@ class TestMatrixNMSOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestMatrixNMSOpNoOutput(TestMatrixNMSOp): class TestMatrixNMSOpNoOutput(TestMatrixNMSOp):
......
...@@ -45,7 +45,7 @@ class TestMatrixRankOP(OpTest): ...@@ -45,7 +45,7 @@ class TestMatrixRankOP(OpTest):
self.outputs = {'Out': self.out} self.outputs = {'Out': self.out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def init_data(self): def init_data(self):
self.x = np.eye(3, dtype=np.float32) self.x = np.eye(3, dtype=np.float32)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import check_out_dtype from eager_op_test import check_out_dtype
from test_sum_op import TestReduceOPTensorAxisBase from test_sum_op import TestReduceOPTensorAxisBase
import paddle import paddle
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -57,10 +57,10 @@ class TestMaxOutOp(OpTest): ...@@ -57,10 +57,10 @@ class TestMaxOutOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestMaxOutOpAxis0(TestMaxOutOp): class TestMaxOutOpAxis0(TestMaxOutOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def compute_mean_iou( def compute_mean_iou(
...@@ -137,7 +137,7 @@ class TestCase1(TestMeanIOUOp): ...@@ -137,7 +137,7 @@ class TestCase1(TestMeanIOUOp):
# NOTE(dev): Skip check_dygraph becuase Python API doesn't expose # NOTE(dev): Skip check_dygraph becuase Python API doesn't expose
# in_wrong_num/in_correct_num/in_mean_iou_num argument # in_wrong_num/in_correct_num/in_mean_iou_num argument
def test_check_output(self): def test_check_output(self):
self.check_output(check_dygraph=False, check_eager=False) self.check_output(check_dygraph=False)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import gradient_checker import gradient_checker
import numpy as np import numpy as np
from decorator_helper import prog_scope from decorator_helper import prog_scope
from op_test import OpTest, OpTestTool, convert_float_to_uint16 from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16
from test_sum_op import TestReduceOPTensorAxisBase from test_sum_op import TestReduceOPTensorAxisBase
import paddle import paddle
...@@ -53,10 +53,10 @@ class TestMeanOp(OpTest): ...@@ -53,10 +53,10 @@ class TestMeanOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_checkout_grad(self): def test_checkout_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestMeanOp_ZeroDim(OpTest): class TestMeanOp_ZeroDim(OpTest):
...@@ -68,10 +68,10 @@ class TestMeanOp_ZeroDim(OpTest): ...@@ -68,10 +68,10 @@ class TestMeanOp_ZeroDim(OpTest):
self.outputs = {'Out': np.mean(self.inputs["X"])} self.outputs = {'Out': np.mean(self.inputs["X"])}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_checkout_grad(self): def test_checkout_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestMeanOpError(unittest.TestCase): class TestMeanOpError(unittest.TestCase):
...@@ -102,7 +102,7 @@ class TestFP16MeanOp(TestMeanOp): ...@@ -102,7 +102,7 @@ class TestFP16MeanOp(TestMeanOp):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
def test_checkout_grad(self): def test_checkout_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -126,11 +126,11 @@ class TestBF16MeanOp(TestMeanOp): ...@@ -126,11 +126,11 @@ class TestBF16MeanOp(TestMeanOp):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output_with_place(core.CPUPlace(), check_eager=True) self.check_output_with_place(core.CPUPlace())
def test_checkout_grad(self): def test_checkout_grad(self):
place = core.CPUPlace() place = core.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
def ref_reduce_mean(x, axis=None, keepdim=False, reduce_all=False): def ref_reduce_mean(x, axis=None, keepdim=False, reduce_all=False):
...@@ -181,14 +181,14 @@ class TestReduceMeanOp(OpTest): ...@@ -181,14 +181,14 @@ class TestReduceMeanOp(OpTest):
def test_check_output(self): def test_check_output(self):
if self.dtype != 'float16': if self.dtype != 'float16':
self.check_output(check_eager=True, check_prim=True) self.check_output(check_prim=True)
else: else:
place = paddle.CUDAPlace(0) place = paddle.CUDAPlace(0)
self.check_output_with_place(place=place, check_prim=True) self.check_output_with_place(place=place, check_prim=True)
def test_check_grad(self): def test_check_grad(self):
if self.dtype != 'float16': if self.dtype != 'float16':
self.check_grad(['X'], ['Out'], check_eager=True, check_prim=True) self.check_grad(['X'], ['Out'], check_prim=True)
else: else:
place = paddle.CUDAPlace(0) place = paddle.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import check_out_dtype from eager_op_test import check_out_dtype
from test_sum_op import TestReduceOPTensorAxisBase from test_sum_op import TestReduceOPTensorAxisBase
import paddle import paddle
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -74,11 +74,11 @@ class TestModeOp(OpTest): ...@@ -74,11 +74,11 @@ class TestModeOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True) self.check_grad(set(['X']), 'Out')
class TestModeOpLastdim(OpTest): class TestModeOpLastdim(OpTest):
...@@ -99,11 +99,11 @@ class TestModeOpLastdim(OpTest): ...@@ -99,11 +99,11 @@ class TestModeOpLastdim(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True) self.check_grad(set(['X']), 'Out')
class TestModeOpKernels(unittest.TestCase): class TestModeOpKernels(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def modified_huber_loss_forward(val): def modified_huber_loss_forward(val):
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
import unittest import unittest
import numpy as np import numpy as np
from eager_op_test import OpTest
from numpy.linalg import multi_dot from numpy.linalg import multi_dot
from op_test import OpTest
import paddle import paddle
...@@ -42,11 +42,11 @@ class TestMultiDotOp(OpTest): ...@@ -42,11 +42,11 @@ class TestMultiDotOp(OpTest):
self.outputs = {'Out': multi_dot([self.A, self.B])} self.outputs = {'Out': multi_dot([self.A, self.B])}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True) self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out', check_eager=True) self.check_grad(['x1'], 'Out')
# (A*B)*C # (A*B)*C
...@@ -59,9 +59,9 @@ class TestMultiDotOp3Mat(TestMultiDotOp): ...@@ -59,9 +59,9 @@ class TestMultiDotOp3Mat(TestMultiDotOp):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True) self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out', check_eager=True) self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out', check_eager=True) self.check_grad(['x2'], 'Out')
# A*(B*C) # A*(B*C)
...@@ -74,9 +74,9 @@ class TestMultiDotOp3Mat2(TestMultiDotOp): ...@@ -74,9 +74,9 @@ class TestMultiDotOp3Mat2(TestMultiDotOp):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True) self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out', check_eager=True) self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out', check_eager=True) self.check_grad(['x2'], 'Out')
class TestMultiDotOp4Mat(TestMultiDotOp): class TestMultiDotOp4Mat(TestMultiDotOp):
...@@ -96,10 +96,10 @@ class TestMultiDotOp4Mat(TestMultiDotOp): ...@@ -96,10 +96,10 @@ class TestMultiDotOp4Mat(TestMultiDotOp):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C, self.D])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C, self.D])}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True) self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out', check_eager=True) self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out', check_eager=True) self.check_grad(['x2'], 'Out')
self.check_grad(['x3'], 'Out', check_eager=True) self.check_grad(['x3'], 'Out')
class TestMultiDotOpFirst1D(TestMultiDotOp): class TestMultiDotOpFirst1D(TestMultiDotOp):
...@@ -153,9 +153,9 @@ class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat): ...@@ -153,9 +153,9 @@ class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True) self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out', check_eager=True) self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out', check_eager=True) self.check_grad(['x2'], 'Out')
class TestMultiDotOp4MatLast1D(TestMultiDotOp4Mat): class TestMultiDotOp4MatLast1D(TestMultiDotOp4Mat):
......
...@@ -16,7 +16,7 @@ import copy ...@@ -16,7 +16,7 @@ import copy
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
...@@ -797,7 +797,7 @@ class TestMulticlassNMS3Op(TestMulticlassNMS2Op): ...@@ -797,7 +797,7 @@ class TestMulticlassNMS3Op(TestMulticlassNMS2Op):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestMulticlassNMS3OpNoOutput(TestMulticlassNMS3Op): class TestMulticlassNMS3OpNoOutput(TestMulticlassNMS3Op):
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_attribute_var import UnittestBase from test_attribute_var import UnittestBase
import paddle import paddle
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.static import Program, program_guard from paddle.static import Program, program_guard
...@@ -30,10 +30,10 @@ class TestMVOp(OpTest): ...@@ -30,10 +30,10 @@ class TestMVOp(OpTest):
self.outputs = {'Out': np.dot(self.x, self.vec)} self.outputs = {'Out': np.dot(self.x, self.vec)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Vec'], 'Out', check_eager=True) self.check_grad(['X', 'Vec'], 'Out')
def init_config(self): def init_config(self):
self.x = np.random.random((2, 100)).astype("float64") self.x = np.random.random((2, 100)).astype("float64")
......
...@@ -20,7 +20,7 @@ import numpy as np ...@@ -20,7 +20,7 @@ import numpy as np
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
# from op_test import OpTest # from eager_op_test import OpTest
def np_nan_to_num( def np_nan_to_num(
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -314,10 +314,10 @@ class TestNearestInterpOp(OpTest): ...@@ -314,10 +314,10 @@ class TestNearestInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True, check_eager=True) self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'nearest' self.interp_method = 'nearest'
...@@ -481,9 +481,7 @@ class TestNearestInterpOpUint8(OpTest): ...@@ -481,9 +481,7 @@ class TestNearestInterpOpUint8(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(place=core.CPUPlace(), atol=1)
place=core.CPUPlace(), atol=1, check_eager=True
)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'nearest' self.interp_method = 'nearest'
...@@ -631,10 +629,10 @@ class TestNearestInterpOp_attr_tensor(OpTest): ...@@ -631,10 +629,10 @@ class TestNearestInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True, check_eager=True) self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'nearest' self.interp_method = 'nearest'
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -958,19 +958,19 @@ class TestNLLLossOp1DWithReduce(OpTest): ...@@ -958,19 +958,19 @@ class TestNLLLossOp1DWithReduce(OpTest):
self.attrs = {'reduction': 'mean', 'ignore_index': -100} self.attrs = {'reduction': 'mean', 'ignore_index': -100}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_output_with_weight(self): def test_check_output_with_weight(self):
self.with_weight = True self.with_weight = True
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.with_weight = True self.with_weight = True
place = fluid.CPUPlace() place = fluid.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0) place = fluid.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.input_shape = [10, 10] self.input_shape = [10, 10]
...@@ -1009,19 +1009,19 @@ class TestNLLLossOp1DNoReduce(OpTest): ...@@ -1009,19 +1009,19 @@ class TestNLLLossOp1DNoReduce(OpTest):
self.attrs = {'reduction': 'none', 'ignore_index': -100} self.attrs = {'reduction': 'none', 'ignore_index': -100}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_output_with_weight(self): def test_check_output_with_weight(self):
self.with_weight = True self.with_weight = True
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.with_weight = True self.with_weight = True
place = fluid.CPUPlace() place = fluid.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0) place = fluid.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.input_shape = [10, 10] self.input_shape = [10, 10]
...@@ -1059,19 +1059,19 @@ class TestNLLLossOp2DWithReduce(OpTest): ...@@ -1059,19 +1059,19 @@ class TestNLLLossOp2DWithReduce(OpTest):
self.attrs = {'reduction': 'mean', 'ignore_index': -100} self.attrs = {'reduction': 'mean', 'ignore_index': -100}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_output_with_weight(self): def test_check_output_with_weight(self):
self.with_weight = True self.with_weight = True
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.with_weight = True self.with_weight = True
place = fluid.CPUPlace() place = fluid.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0) place = fluid.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.input_shape = [2, 3, 5, 5] self.input_shape = [2, 3, 5, 5]
...@@ -1110,19 +1110,19 @@ class TestNLLLossOp2DNoReduce(OpTest): ...@@ -1110,19 +1110,19 @@ class TestNLLLossOp2DNoReduce(OpTest):
self.attrs = {'reduction': 'none', 'ignore_index': -100} self.attrs = {'reduction': 'none', 'ignore_index': -100}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_output_with_weight(self): def test_check_output_with_weight(self):
self.with_weight = True self.with_weight = True
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.with_weight = True self.with_weight = True
place = fluid.CPUPlace() place = fluid.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0) place = fluid.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.input_shape = [5, 3, 5, 5] self.input_shape = [5, 3, 5, 5]
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -90,7 +90,7 @@ class TestNMSOp(OpTest): ...@@ -90,7 +90,7 @@ class TestNMSOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -103,10 +103,10 @@ class TestFrobeniusNormOp(OpTest): ...@@ -103,10 +103,10 @@ class TestFrobeniusNormOp(OpTest):
self.outputs = {'Out': norm} self.outputs = {'Out': norm}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
...@@ -127,7 +127,7 @@ class TestFrobeniusNormOp2(TestFrobeniusNormOp): ...@@ -127,7 +127,7 @@ class TestFrobeniusNormOp2(TestFrobeniusNormOp):
self.dtype = "float32" self.dtype = "float32"
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestPnormOp(OpTest): class TestPnormOp(OpTest):
...@@ -150,10 +150,10 @@ class TestPnormOp(OpTest): ...@@ -150,10 +150,10 @@ class TestPnormOp(OpTest):
self.gradient = self.calc_gradient() self.gradient = self.calc_gradient()
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
...@@ -349,7 +349,7 @@ class TestPnormBF16Op(OpTest): ...@@ -349,7 +349,7 @@ class TestPnormBF16Op(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3, check_eager=True) self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -358,7 +358,6 @@ class TestPnormBF16Op(OpTest): ...@@ -358,7 +358,6 @@ class TestPnormBF16Op(OpTest):
['X'], ['X'],
'Out', 'Out',
user_defined_grads=self.gradient, user_defined_grads=self.gradient,
check_eager=True,
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -92,12 +92,12 @@ class TestOverlapAddOp(OpTest): ...@@ -92,12 +92,12 @@ class TestOverlapAddOp(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output(check_eager=True) self.check_output()
paddle.disable_static() paddle.disable_static()
def test_check_grad_normal(self): def test_check_grad_normal(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
paddle.disable_static() paddle.disable_static()
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -77,10 +77,10 @@ class TestPad3dOp(OpTest): ...@@ -77,10 +77,10 @@ class TestPad3dOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
def initTestCase(self): def initTestCase(self):
self.shape = (2, 3, 4, 5, 6) self.shape = (2, 3, 4, 5, 6)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestPadConstantLikeOp(OpTest): class TestPadConstantLikeOp(OpTest):
......
...@@ -102,7 +102,6 @@ def start_local_trainers( ...@@ -102,7 +102,6 @@ def start_local_trainers(
pod, pod,
training_script, training_script,
training_script_args, training_script_args,
eager_mode=True,
allocator_strategy="auto_growth", allocator_strategy="auto_growth",
log_dir=None, log_dir=None,
): ):
...@@ -158,7 +157,6 @@ class TestMultipleGpus(unittest.TestCase): ...@@ -158,7 +157,6 @@ class TestMultipleGpus(unittest.TestCase):
def run_mnist_2gpu( def run_mnist_2gpu(
self, self,
target_file_name, target_file_name,
eager_mode=True,
allocator_strategy="auto_growth", allocator_strategy="auto_growth",
): ):
if ( if (
...@@ -176,7 +174,6 @@ class TestMultipleGpus(unittest.TestCase): ...@@ -176,7 +174,6 @@ class TestMultipleGpus(unittest.TestCase):
procs = start_local_trainers( procs = start_local_trainers(
cluster, cluster,
pod, pod,
eager_mode=eager_mode,
allocator_strategy=allocator_strategy, allocator_strategy=allocator_strategy,
training_script=target_file_name, training_script=target_file_name,
training_script_args=[], training_script_args=[],
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def np_partial_concat(inputs, start, length): def np_partial_concat(inputs, start, length):
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestPartialSumOp(OpTest): class TestPartialSumOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def PolygonBoxRestore(input): def PolygonBoxRestore(input):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -76,7 +76,7 @@ class TestPriorBoxOp(OpTest): ...@@ -76,7 +76,7 @@ class TestPriorBoxOp(OpTest):
self.outputs = {'Boxes': self.out_boxes, 'Variances': self.out_var} self.outputs = {'Boxes': self.out_boxes, 'Variances': self.out_var}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def setUp(self): def setUp(self):
self.op_type = "prior_box" self.op_type = "prior_box"
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestProximalAdagradOp(OpTest): class TestProximalAdagradOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestProximalGDOp(OpTest): class TestProximalGDOp(OpTest):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -174,10 +174,10 @@ class TestPSROIPoolOp(OpTest): ...@@ -174,10 +174,10 @@ class TestPSROIPoolOp(OpTest):
self.set_data() self.set_data()
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase): class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase):
......
...@@ -16,7 +16,7 @@ import itertools ...@@ -16,7 +16,7 @@ import itertools
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -71,13 +71,12 @@ class TestQrOp(OpTest): ...@@ -71,13 +71,12 @@ class TestQrOp(OpTest):
return a, q, r return a, q, r
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
['X'], ['X'],
['Q', 'R'], ['Q', 'R'],
check_eager=True,
numeric_grad_delta=1e-5, numeric_grad_delta=1e-5,
max_relative_error=1e-6, max_relative_error=1e-6,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestRandomCropOp(OpTest): class TestRandomCropOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
from functools import partial from functools import partial
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -47,7 +47,7 @@ class TestRangeOp(OpTest): ...@@ -47,7 +47,7 @@ class TestRangeOp(OpTest):
self.case = (0, 1, 0.2) self.case = (0, 1, 0.2)
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class TestFloatRangeOpCase0(TestRangeOp): class TestFloatRangeOpCase0(TestRangeOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -58,7 +58,7 @@ class TestRealOp(OpTest): ...@@ -58,7 +58,7 @@ class TestRealOp(OpTest):
) )
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -66,7 +66,6 @@ class TestRealOp(OpTest): ...@@ -66,7 +66,6 @@ class TestRealOp(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -58,10 +58,10 @@ class TestRepeatInterleaveOp(OpTest): ...@@ -58,10 +58,10 @@ class TestRepeatInterleaveOp(OpTest):
self.index_size = self.x_shape[self.dim] self.index_size = self.x_shape[self.dim]
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestRepeatInterleaveOp2(OpTest): class TestRepeatInterleaveOp2(OpTest):
...@@ -96,10 +96,10 @@ class TestRepeatInterleaveOp2(OpTest): ...@@ -96,10 +96,10 @@ class TestRepeatInterleaveOp2(OpTest):
self.index_size = self.x_shape[self.dim] self.index_size = self.x_shape[self.dim]
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestIndexSelectAPI(unittest.TestCase): class TestIndexSelectAPI(unittest.TestCase):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python from test_anchor_generator_op import anchor_generator_in_python
from test_multiclass_nms_op import nms from test_multiclass_nms_op import nms
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -37,10 +37,10 @@ class TestReverseOp(OpTest): ...@@ -37,10 +37,10 @@ class TestReverseOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestCase0(TestReverseOp): class TestCase0(TestReverseOp):
......
...@@ -17,7 +17,7 @@ import sys ...@@ -17,7 +17,7 @@ import sys
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -31,6 +31,38 @@ np.set_printoptions(threshold=np.inf) ...@@ -31,6 +31,38 @@ np.set_printoptions(threshold=np.inf)
paddle.enable_static() paddle.enable_static()
def rnn_warpper(
Input,
PreState,
WeightList=None,
SequenceLength=None,
dropout_prob=0.0,
is_bidirec=False,
input_size=10,
hidden_size=100,
num_layers=1,
mode="LSTM",
seed=0,
is_test=False,
):
dropout_state_in = paddle.Tensor()
return paddle._C_ops.rnn(
Input,
PreState,
WeightList,
SequenceLength,
dropout_state_in,
dropout_prob,
is_bidirec,
input_size,
hidden_size,
num_layers,
mode,
seed,
is_test,
)
class TestRNNOp(OpTest): class TestRNNOp(OpTest):
def get_weight_names(self): def get_weight_names(self):
weight_names = [] weight_names = []
...@@ -44,6 +76,9 @@ class TestRNNOp(OpTest): ...@@ -44,6 +76,9 @@ class TestRNNOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "rnn" self.op_type = "rnn"
self.python_api = rnn_warpper
self.python_out_sig = ["Out", "DropoutState", "State"]
self.python_out_sig_sub_name = {"State": ["last_hidden", "last_cell"]}
self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
self.sequence_length = ( self.sequence_length = (
None None
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -233,10 +233,10 @@ class TestROIAlignOp(OpTest): ...@@ -233,10 +233,10 @@ class TestROIAlignOp(OpTest):
self.set_data() self.set_data()
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestROIAlignInLodOp(TestROIAlignOp): class TestROIAlignInLodOp(TestROIAlignOp):
......
...@@ -18,7 +18,7 @@ import unittest ...@@ -18,7 +18,7 @@ import unittest
from decimal import ROUND_HALF_UP, Decimal from decimal import ROUND_HALF_UP, Decimal
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -170,10 +170,10 @@ class TestROIPoolOp(OpTest): ...@@ -170,10 +170,10 @@ class TestROIPoolOp(OpTest):
self.set_data() self.set_data()
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestROIPoolInLodOp(TestROIPoolOp): class TestROIPoolInLodOp(TestROIPoolOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -47,10 +47,10 @@ class TestRollOp(OpTest): ...@@ -47,10 +47,10 @@ class TestRollOp(OpTest):
self.axis = [0, -2] self.axis = [0, -2]
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class TestRollOpCase2(TestRollOp): class TestRollOpCase2(TestRollOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle import fluid from paddle import fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python from test_anchor_generator_op import anchor_generator_in_python
from test_generate_proposal_labels_op import ( from test_generate_proposal_labels_op import (
_bbox_overlaps, _bbox_overlaps,
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -345,10 +345,10 @@ class RReluTest(OpTest): ...@@ -345,10 +345,10 @@ class RReluTest(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(no_check_set=['Noise'], check_eager=True) self.check_output(no_check_set=['Noise'])
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out')
class RReluTrainingTest(RReluTest): class RReluTrainingTest(RReluTest):
......
...@@ -16,7 +16,7 @@ import collections ...@@ -16,7 +16,7 @@ import collections
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestSampleLogitsOp(OpTest): class TestSampleLogitsOp(OpTest):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -91,10 +91,10 @@ class TestScatterNdAddSimpleOp(OpTest): ...@@ -91,10 +91,10 @@ class TestScatterNdAddSimpleOp(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=True) self.check_grad(['X', 'Updates'], 'Out')
class TestScatterNdAddSimpleFP16Op(TestScatterNdAddSimpleOp): class TestScatterNdAddSimpleFP16Op(TestScatterNdAddSimpleOp):
...@@ -122,14 +122,12 @@ class TestScatterNdAddSimpleBF16Op(TestScatterNdAddSimpleOp): ...@@ -122,14 +122,12 @@ class TestScatterNdAddSimpleBF16Op(TestScatterNdAddSimpleOp):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=True
)
class TestScatterNdAddWithEmptyIndex(OpTest): class TestScatterNdAddWithEmptyIndex(OpTest):
...@@ -165,10 +163,10 @@ class TestScatterNdAddWithEmptyIndex(OpTest): ...@@ -165,10 +163,10 @@ class TestScatterNdAddWithEmptyIndex(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=True) self.check_grad(['X', 'Updates'], 'Out')
class TestScatterNdAddWithEmptyIndexFP16(TestScatterNdAddWithEmptyIndex): class TestScatterNdAddWithEmptyIndexFP16(TestScatterNdAddWithEmptyIndex):
...@@ -196,14 +194,12 @@ class TestScatterNdAddWithEmptyIndexBF16(TestScatterNdAddWithEmptyIndex): ...@@ -196,14 +194,12 @@ class TestScatterNdAddWithEmptyIndexBF16(TestScatterNdAddWithEmptyIndex):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=True
)
class TestScatterNdAddWithHighRankSame(OpTest): class TestScatterNdAddWithHighRankSame(OpTest):
...@@ -242,10 +238,10 @@ class TestScatterNdAddWithHighRankSame(OpTest): ...@@ -242,10 +238,10 @@ class TestScatterNdAddWithHighRankSame(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=True) self.check_grad(['X', 'Updates'], 'Out')
class TestScatterNdAddWithHighRankSameFP16(TestScatterNdAddWithHighRankSame): class TestScatterNdAddWithHighRankSameFP16(TestScatterNdAddWithHighRankSame):
...@@ -273,14 +269,12 @@ class TestScatterNdAddWithHighRankSameBF16(TestScatterNdAddWithHighRankSame): ...@@ -273,14 +269,12 @@ class TestScatterNdAddWithHighRankSameBF16(TestScatterNdAddWithHighRankSame):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=True
)
class TestScatterNdAddWithHighRankDiff(OpTest): class TestScatterNdAddWithHighRankDiff(OpTest):
...@@ -303,10 +297,10 @@ class TestScatterNdAddWithHighRankDiff(OpTest): ...@@ -303,10 +297,10 @@ class TestScatterNdAddWithHighRankDiff(OpTest):
self.outputs = {'Out': expect_np} self.outputs = {'Out': expect_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=True) self.check_grad(['X', 'Updates'], 'Out')
# Test Python API # Test Python API
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -46,10 +46,10 @@ class TestScatterOp(OpTest): ...@@ -46,10 +46,10 @@ class TestScatterOp(OpTest):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out", check_eager=False) self.check_grad(["X", "Updates"], "Out")
class TestScatterFP16Op(TestScatterOp): class TestScatterFP16Op(TestScatterOp):
...@@ -69,14 +69,12 @@ class TestScatterBF16Op(TestScatterOp): ...@@ -69,14 +69,12 @@ class TestScatterBF16Op(TestScatterOp):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
class TestScatterOp0(OpTest): class TestScatterOp0(OpTest):
...@@ -102,10 +100,10 @@ class TestScatterOp0(OpTest): ...@@ -102,10 +100,10 @@ class TestScatterOp0(OpTest):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out", check_eager=False) self.check_grad(["X", "Updates"], "Out")
class TestScatterFP16Op0(TestScatterOp0): class TestScatterFP16Op0(TestScatterOp0):
...@@ -125,14 +123,12 @@ class TestScatterBF16Op0(TestScatterOp0): ...@@ -125,14 +123,12 @@ class TestScatterBF16Op0(TestScatterOp0):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
class TestScatterOp1(OpTest): class TestScatterOp1(OpTest):
...@@ -161,10 +157,10 @@ class TestScatterOp1(OpTest): ...@@ -161,10 +157,10 @@ class TestScatterOp1(OpTest):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out", check_eager=False) self.check_grad(["X", "Updates"], "Out")
class TestScatterFP16Op1(TestScatterOp1): class TestScatterFP16Op1(TestScatterOp1):
...@@ -184,14 +180,12 @@ class TestScatterBF16Op1(TestScatterOp1): ...@@ -184,14 +180,12 @@ class TestScatterBF16Op1(TestScatterOp1):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
@unittest.skipIf( @unittest.skipIf(
...@@ -221,14 +215,12 @@ class TestScatterOp2(OpTest): ...@@ -221,14 +215,12 @@ class TestScatterOp2(OpTest):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3, check_eager=False) self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
@unittest.skipIf( @unittest.skipIf(
...@@ -280,14 +272,12 @@ class TestScatterOp3(OpTest): ...@@ -280,14 +272,12 @@ class TestScatterOp3(OpTest):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3, check_eager=False) self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
@unittest.skipIf( @unittest.skipIf(
...@@ -330,10 +320,10 @@ class TestScatterOp4(OpTest): ...@@ -330,10 +320,10 @@ class TestScatterOp4(OpTest):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=False) self.check_grad(['X', 'Updates'], 'Out')
class TestScatterFP16Op4(TestScatterOp4): class TestScatterFP16Op4(TestScatterOp4):
...@@ -353,14 +343,12 @@ class TestScatterBF16Op4(TestScatterOp4): ...@@ -353,14 +343,12 @@ class TestScatterBF16Op4(TestScatterOp4):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
@unittest.skipIf( @unittest.skipIf(
...@@ -390,14 +378,12 @@ class TestScatterOp5(OpTest): ...@@ -390,14 +378,12 @@ class TestScatterOp5(OpTest):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3, check_eager=False) self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
@unittest.skipIf( @unittest.skipIf(
...@@ -440,10 +426,10 @@ class TestScatterOp6(OpTest): ...@@ -440,10 +426,10 @@ class TestScatterOp6(OpTest):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out", check_eager=False) self.check_grad(["X", "Updates"], "Out")
class TestScatterFP16Op6(TestScatterOp6): class TestScatterFP16Op6(TestScatterOp6):
...@@ -463,14 +449,12 @@ class TestScatterBF16Op6(TestScatterOp6): ...@@ -463,14 +449,12 @@ class TestScatterBF16Op6(TestScatterOp6):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
place, ['X', 'Updates'], 'Out', check_eager=False
)
class TestScatterAPI(unittest.TestCase): class TestScatterAPI(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -42,7 +42,7 @@ class TestSearchSorted(OpTest): ...@@ -42,7 +42,7 @@ class TestSearchSorted(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def init_test_case(self): def init_test_case(self):
self.sorted_sequence = np.array([1, 3, 5, 7, 9]).astype("float32") self.sorted_sequence = np.array([1, 3, 5, 7, 9]).astype("float32")
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.static as static import paddle.static as static
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -116,10 +116,10 @@ class TestSegmentOps(OpTest): ...@@ -116,10 +116,10 @@ class TestSegmentOps(OpTest):
self.outputs = {'Out': result.astype(self.dtype)} self.outputs = {'Out': result.astype(self.dtype)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True) self.check_grad(["X"], "Out")
class TestSegmentSum2(TestSegmentOps): class TestSegmentSum2(TestSegmentOps):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -18,7 +18,7 @@ import unittest ...@@ -18,7 +18,7 @@ import unittest
from functools import reduce from functools import reduce
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -36,7 +36,7 @@ class TestShapeOp(OpTest): ...@@ -36,7 +36,7 @@ class TestShapeOp(OpTest):
self.shape = [2, 3] self.shape = [2, 3]
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
class case1(TestShapeOp): class case1(TestShapeOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册