From c9e6c8ce88a2083a99f575ee9e04a0d142cbd2c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A7=9C=E6=B0=B8=E4=B9=85?= <34344716+yjjiang11@users.noreply.github.com> Date: Thu, 9 Mar 2023 15:47:58 +0800 Subject: [PATCH] Yj/dynamic upgrade part5 (#50927) * fix accuracy and activation * add adadelta * support pad2d * support pad * modify exponential and linear_interp_v2 * modify meshgrid test * add group_norm * support some ops * modify activation&group norm * modify act * reset group_norm * modify acti * modify pow test * modify acti * lint * modify mkldnn test * fix acti * modify adadelta * lint * fix act mkldnn * reset acti --- .../mkldnn/test_activation_mkldnn_op.py | 39 ++++++++++++++++++- .../fluid/tests/unittests/test_accuracy_op.py | 7 +++- .../fluid/tests/unittests/test_adadelta_op.py | 30 +++++++++++++- .../unittests/test_bilinear_interp_op.py | 30 +++++++------- .../fluid/tests/unittests/test_expand_op.py | 20 +++++----- .../tests/unittests/test_exponential_op.py | 2 + .../tests/unittests/test_linear_interp_op.py | 16 +++++--- .../unittests/test_linear_interp_v2_op.py | 9 +++-- .../fluid/tests/unittests/test_meshgrid_op.py | 11 ++++-- .../tests/unittests/test_nearest_interp_op.py | 30 +++++++------- .../fluid/tests/unittests/test_pad2d_op.py | 6 +-- .../fluid/tests/unittests/test_pad_op.py | 7 +++- 12 files changed, 146 insertions(+), 61 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py index 0ba5d7c2209..e586bdbbf24 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py @@ -18,7 +18,9 @@ import numpy as np from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd from scipy.special import expit +import paddle import paddle.fluid.core as core +import paddle.nn.functional as F from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle.fluid.tests.unittests.test_activation_op import ( TestAbs, @@ -63,10 +65,19 @@ class TestMKLDNNLeakyReluDim2(TestLeakyRelu): def init_dtype(self): self.dtype = np.float32 + def test_check_output(self): + self.check_output(check_dygraph=False) + + def test_check_grad(self): + if self.dtype == np.float16: + return + self.check_grad(['X'], 'Out', check_dygraph=False) + class TestMKLDNNGeluDim2(TestActivation): def setUp(self): self.op_type = "gelu" + self.python_api = F.gelu self.dtype = np.float32 x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) @@ -80,6 +91,7 @@ class TestMKLDNNGeluDim2(TestActivation): class TestMKLDNNGeluDim2Approx(TestActivation): def setUp(self): self.op_type = "gelu" + self.python_api = F.gelu self.dtype = np.float32 x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) @@ -175,10 +187,19 @@ class TestMKLDNNLeakyReluDim4(TestLeakyRelu): def init_dtype(self): self.dtype = np.float32 + def test_check_output(self): + self.check_output(check_dygraph=False) + + def test_check_grad(self): + if self.dtype == np.float16: + return + self.check_grad(['X'], 'Out', check_dygraph=False) + class TestMKLDNNGeluDim4(TestActivation): def setUp(self): self.op_type = "gelu" + self.python_api = F.gelu self.dtype = np.float32 x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype) @@ -192,6 +213,7 @@ class TestMKLDNNGeluDim4(TestActivation): class TestMKLDNNGeluDim4Approx(TestActivation): def setUp(self): self.op_type = "gelu" + self.python_api = F.gelu self.dtype = np.float32 x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype) @@ -208,6 +230,7 @@ class TestMKLDNNGeluDim4Approx(TestActivation): class TestMKLDNNGeluBf16Dim4(TestActivation): def setUp(self): self.op_type = "gelu" + self.python_api = F.gelu self.dtype = np.uint16 x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32) @@ -230,6 +253,7 @@ class TestMKLDNNGeluBf16Dim4(TestActivation): class TestMKLDNNGeluBf16Dim4Approx(TestActivation): def setUp(self): self.op_type = "gelu" + self.python_api = F.gelu self.dtype = np.uint16 x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32) @@ -294,11 +318,18 @@ class TestMKLDNNSwishDim4(TestSwish): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True, "beta": beta} - self.check_eager = False def init_dtype(self): self.dtype = np.float32 + def test_check_output(self): + self.check_output(check_dygraph=False) + + def test_check_grad(self): + if self.dtype == np.float16: + return + self.check_grad(['X'], 'Out', check_dygraph=False) + def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0): x_dtype = x.dtype @@ -334,6 +365,7 @@ class TestMKLDNNHardSwishDim4(TestHardSwish): class TestMKLDNNMish(TestActivation): def setUp(self): self.op_type = "mish" + self.python_api = F.mish self.dtype = np.float32 x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype) @@ -347,7 +379,7 @@ class TestMKLDNNMish(TestActivation): class TestMKLDNNRound(TestActivation): def setUp(self): self.op_type = "round" - + self.python_api = paddle.round x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(np.float32) out = np.round(x) @@ -370,6 +402,7 @@ class TestMKLDNNSigmoidDim4(TestSigmoid): class TestMKLDNNEluDefaultAlpha(TestActivation): def setUp(self): self.op_type = "elu" + self.python_api = F.elu self.set_alpha() x = np.random.random((5, 5, 4)).astype("float32") @@ -393,6 +426,7 @@ class TestMKLDNNEluCustomAlpha(TestMKLDNNEluDefaultAlpha): class TestMKLDNNExpOp(TestActivation): def setUp(self): self.op_type = "exp" + self.python_api = paddle.exp x = np.random.random((5, 5, 4)).astype("float32") self.inputs = {'X': x} @@ -407,6 +441,7 @@ class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase): np.random.seed(123) self.op_type = 'abs' + self.python_api = paddle.abs self.x = np.random.uniform(-1, 1, [2, 2]).astype(np.float32) self.out = np.abs(self.x) self.out_grad = np.random.random_sample(self.x.shape).astype(np.float32) diff --git a/python/paddle/fluid/tests/unittests/test_accuracy_op.py b/python/paddle/fluid/tests/unittests/test_accuracy_op.py index 5627ead0a6b..b6f99020ea8 100755 --- a/python/paddle/fluid/tests/unittests/test_accuracy_op.py +++ b/python/paddle/fluid/tests/unittests/test_accuracy_op.py @@ -15,16 +15,21 @@ import unittest import numpy as np -from op_test import OpTest +from eager_op_test import OpTest import paddle import paddle.fluid as fluid from paddle.fluid import Program, program_guard +def accuracy_wrapper(infer, indices, label): + return paddle._C_ops.accuracy(infer, indices, label) + + class TestAccuracyOp(OpTest): def setUp(self): self.op_type = "accuracy" + self.python_api = accuracy_wrapper self.dtype = np.float32 self.init_dtype() n = 8192 diff --git a/python/paddle/fluid/tests/unittests/test_adadelta_op.py b/python/paddle/fluid/tests/unittests/test_adadelta_op.py index 699bf5df564..40074916054 100644 --- a/python/paddle/fluid/tests/unittests/test_adadelta_op.py +++ b/python/paddle/fluid/tests/unittests/test_adadelta_op.py @@ -15,15 +15,39 @@ import unittest import numpy as np -from op_test import OpTest +from eager_op_test import OpTest import paddle import paddle.fluid as fluid +def adadelta_wrapper( + Param, + Grad, + AvgSquaredGrad, + AvgSquaredUpdate, + master_weight=None, + rho=0.95, + epsilon=1e-6, +): + paddle._C_ops.adadelta_( + Param, + Grad, + AvgSquaredGrad, + AvgSquaredUpdate, + None, + rho, + epsilon, + False, + ) + return Param, AvgSquaredGrad, AvgSquaredUpdate + + class TestAdadeltaOp1(OpTest): def setUp(self): self.op_type = "adadelta" + self.python_api = adadelta_wrapper + self.python_out_sig = ['Out'] param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") # The squared gradient is positive @@ -76,6 +100,8 @@ class TestAdadeltaOp2(OpTest): def setUp(self): self.op_type = "adadelta" + self.python_api = adadelta_wrapper + self.python_out_sig = ['Out'] param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") # The squared gradient is positive @@ -86,6 +112,8 @@ class TestAdadeltaOp2(OpTest): rho = 0.95 epsilon = 1e-6 + self.attrs = {'rho': rho, 'epsilon': epsilon} + self.inputs = { 'Param': param, 'Grad': grad, diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py index fd2372e2571..340d89b3607 100755 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest +from eager_op_test import OpTest import paddle import paddle.fluid.core as core @@ -108,8 +108,8 @@ class TestBilinearInterpOp(OpTest): self.init_test_case() self.op_type = "bilinear_interp" # NOTE(dev): some AsDispensible input is not used under imperative mode. - # Skip check_eager while found them in Inputs. - self.check_eager = True + # Skip check_dygraph while found them in Inputs. + self.check_dygraph = True input_np = np.random.random(self.input_shape).astype("float64") if self.data_layout == "NCHW": @@ -139,10 +139,10 @@ class TestBilinearInterpOp(OpTest): self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size - self.check_eager = False + self.check_dygraph = False if self.actual_shape is not None: self.inputs['OutSize'] = self.actual_shape - self.check_eager = False + self.check_dygraph = False self.attrs = { 'out_h': self.out_h, @@ -156,11 +156,11 @@ class TestBilinearInterpOp(OpTest): self.outputs = {'Out': output_np} def test_check_output(self): - self.check_output(check_eager=self.check_eager) + self.check_output(check_dygraph=self.check_dygraph) def test_check_grad(self): self.check_grad( - ['X'], 'Out', in_place=True, check_eager=self.check_eager + ['X'], 'Out', in_place=True, check_dygraph=self.check_dygraph ) def init_test_case(self): @@ -285,7 +285,7 @@ class TestBilinearInterpOpUint8(OpTest): self.actual_shape = None self.init_test_case() self.op_type = "bilinear_interp" - self.check_eager = True + self.check_dygraph = True input_np = np.random.randint( low=0, high=256, size=self.input_shape ).astype("uint8") @@ -309,7 +309,7 @@ class TestBilinearInterpOpUint8(OpTest): self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size - self.check_eager = False + self.check_dygraph = False self.attrs = { 'out_h': self.out_h, @@ -323,7 +323,7 @@ class TestBilinearInterpOpUint8(OpTest): def test_check_output(self): self.check_output_with_place( - place=core.CPUPlace(), atol=1, check_eager=self.check_eager + place=core.CPUPlace(), atol=1, check_dygraph=self.check_dygraph ) def init_test_case(self): @@ -427,7 +427,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest): self.actual_shape = None self.init_test_case() self.op_type = "bilinear_interp" - self.check_eager = True + self.check_dygraph = True self.shape_by_1Dtensor = False self.scale_by_1Dtensor = False self.attrs = { @@ -450,7 +450,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest): if self.shape_by_1Dtensor: self.inputs['OutSize'] = self.out_size - self.check_eager = False + self.check_dygraph = False elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): @@ -458,7 +458,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ("x" + str(index), np.ones((1)).astype('int32') * ele) ) self.inputs['SizeTensor'] = size_tensor - self.check_eager = False + self.check_dygraph = False self.attrs['out_h'] = self.out_h self.attrs['out_w'] = self.out_w @@ -473,11 +473,11 @@ class TestBilinearInterpOp_attr_tensor(OpTest): self.outputs = {'Out': output_np} def test_check_output(self): - self.check_output(check_eager=self.check_eager) + self.check_output(check_dygraph=self.check_dygraph) def test_check_grad(self): self.check_grad( - ['X'], 'Out', in_place=True, check_eager=self.check_eager + ['X'], 'Out', in_place=True, check_dygraph=self.check_dygraph ) def init_test_case(self): diff --git a/python/paddle/fluid/tests/unittests/test_expand_op.py b/python/paddle/fluid/tests/unittests/test_expand_op.py index 71ba5483c0e..2ba02737739 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest +from eager_op_test import OpTest import paddle.fluid as fluid @@ -39,10 +39,10 @@ class TestExpandOpRank1(OpTest): self.expand_times = [2] def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_dygraph=False) class TestExpandOpRank2_Corner(TestExpandOpRank1): @@ -104,10 +104,10 @@ class TestExpandOpRank1_tensor_attr(OpTest): self.infer_expand_times = [-1] def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_dygraph=False) class TestExpandOpRank2_Corner_tensor_attr(TestExpandOpRank1_tensor_attr): @@ -146,10 +146,10 @@ class TestExpandOpRank1_tensor(OpTest): self.expand_times = [2] def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_dygraph=False) class TestExpandOpRank2_tensor(TestExpandOpRank1_tensor): @@ -170,7 +170,7 @@ class TestExpandOpInteger(OpTest): self.outputs = {'Out': output} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) # Situation 5: input x is Bool @@ -183,7 +183,7 @@ class TestExpandOpBoolean(OpTest): self.outputs = {'Out': output} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) # Situation 56: input x is Integer @@ -198,7 +198,7 @@ class TestExpandOpInt64_t(OpTest): self.outputs = {'Out': output} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_exponential_op.py b/python/paddle/fluid/tests/unittests/test_exponential_op.py index cc0144a56a1..af2caa19c0e 100644 --- a/python/paddle/fluid/tests/unittests/test_exponential_op.py +++ b/python/paddle/fluid/tests/unittests/test_exponential_op.py @@ -26,6 +26,7 @@ class TestExponentialOp1(OpTest): def setUp(self): paddle.enable_static() self.op_type = "exponential" + self.python_api = paddle.tensor.exponential_ self.config() self.attrs = {"lambda": self.lam} @@ -55,6 +56,7 @@ class TestExponentialOp1(OpTest): self.check_grad( ['X'], 'Out', + in_place=True, user_defined_grads=[np.zeros([1024, 1024], dtype=self.dtype)], user_defined_grad_outputs=[ np.random.rand(1024, 1024).astype(self.dtype) diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_op.py index c05f55a3634..9dfb5391f4b 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_op.py @@ -16,7 +16,7 @@ import platform import unittest import numpy as np -from op_test import OpTest +from eager_op_test import OpTest import paddle import paddle.fluid as fluid @@ -121,12 +121,12 @@ class TestLinearInterpOp(OpTest): def test_check_output(self): if platform.system() == "Linux": - self.check_output(atol=1e-7) + self.check_output(atol=1e-7, check_dygraph=False) else: - self.check_output(atol=1e-5) + self.check_output(atol=1e-5, check_dygraph=False) def test_check_grad(self): - self.check_grad(['X'], 'Out', in_place=True) + self.check_grad(['X'], 'Out', in_place=True, check_dygraph=False) def init_test_case(self): self.interp_method = 'linear' @@ -304,9 +304,13 @@ class TestResizeLinearOpUint8(OpTest): def test_check_output(self): if platform.system() == "Linux": - self.check_output_with_place(place=core.CPUPlace(), atol=1e-7) + self.check_output_with_place( + place=core.CPUPlace(), atol=1e-7, check_dygraph=False + ) else: - self.check_output_with_place(place=core.CPUPlace(), atol=1e-5) + self.check_output_with_place( + place=core.CPUPlace(), atol=1e-5, check_dygraph=False + ) def init_test_case(self): self.interp_method = 'linear' diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py index a56419b81a1..fdf25267d17 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py @@ -16,7 +16,7 @@ import platform import unittest import numpy as np -from op_test import OpTest +from eager_op_test import OpTest import paddle import paddle.fluid as fluid @@ -178,12 +178,12 @@ class TestLinearInterpOp(OpTest): def test_check_output(self): if platform.system() == "Linux": - self.check_output(atol=1e-7, check_eager=True) + self.check_output(atol=1e-7) else: - self.check_output(atol=1e-5, check_eager=True) + self.check_output(atol=1e-5) def test_check_grad(self): - self.check_grad(['X'], 'Out', in_place=True, check_eager=True) + self.check_grad(['X'], 'Out', in_place=True) def init_test_case(self): self.interp_method = 'linear' @@ -340,6 +340,7 @@ class TestResizeLinearOpUint8(OpTest): self.actual_shape = None self.init_test_case() self.op_type = "linear_interp_v2" + self.python_api = linear_interp_test input_np = np.random.random(self.input_shape).astype("uint8") if self.scale > 0: diff --git a/python/paddle/fluid/tests/unittests/test_meshgrid_op.py b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py index 71ed7ad4aa5..d74d3ea4fbc 100644 --- a/python/paddle/fluid/tests/unittests/test_meshgrid_op.py +++ b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py @@ -15,21 +15,27 @@ import unittest import numpy as np -from op_test import OpTest +from eager_op_test import OpTest import paddle import paddle.fluid as fluid +def meshgrid_wrapper(x): + return paddle.tensor.meshgrid(x[0], x[1]) + + class TestMeshgridOp(OpTest): def setUp(self): self.op_type = "meshgrid" + self.python_api = meshgrid_wrapper self.dtype = self.get_dtype() ins, outs = self.init_test_data() self.inputs = {'X': [('x%d' % i, ins[i]) for i in range(len(ins))]} self.outputs = { 'Out': [('out%d' % i, outs[i]) for i in range(len(outs))] } + self.python_out_sig = ['out0', 'out1'] def get_dtype(self): return "float64" @@ -38,8 +44,7 @@ class TestMeshgridOp(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(['x0'], ['out0']) - self.check_grad(['x1'], ['out1']) + self.check_grad(['x0'], ['out0', 'out1']) def init_test_data(self): self.shape = self.get_x_shape() diff --git a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py index b81ac851b5a..abe76af6fc0 100755 --- a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest +from eager_op_test import OpTest import paddle.fluid.core as core @@ -80,7 +80,7 @@ class TestNearestInterpOp(OpTest): self.data_layout = 'NCHW' self.init_test_case() self.op_type = "nearest_interp" - self.check_eager = True + self.check_dygraph = True input_np = np.random.random(self.input_shape).astype("float64") if self.data_layout == "NCHW": @@ -109,10 +109,10 @@ class TestNearestInterpOp(OpTest): self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size - self.check_eager = False + self.check_dygraph = False if self.actual_shape is not None: self.inputs['OutSize'] = self.actual_shape - self.check_eager = False + self.check_dygraph = False self.attrs = { 'out_h': self.out_h, 'out_w': self.out_w, @@ -124,11 +124,11 @@ class TestNearestInterpOp(OpTest): self.outputs = {'Out': output_np} def test_check_output(self): - self.check_output(check_eager=self.check_eager) + self.check_output(check_dygraph=self.check_dygraph) def test_check_grad(self): self.check_grad( - ['X'], 'Out', in_place=True, check_eager=self.check_eager + ['X'], 'Out', in_place=True, check_dygraph=self.check_dygraph ) def init_test_case(self): @@ -243,7 +243,7 @@ class TestNearestInterpOpUint8(OpTest): self.actual_shape = None self.init_test_case() self.op_type = "nearest_interp" - self.check_eager = True + self.check_dygraph = True input_np = np.random.randint( low=0, high=256, size=self.input_shape ).astype("uint8") @@ -266,7 +266,7 @@ class TestNearestInterpOpUint8(OpTest): self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size - self.check_eager = False + self.check_dygraph = False self.attrs = { 'out_h': self.out_h, 'out_w': self.out_w, @@ -278,7 +278,7 @@ class TestNearestInterpOpUint8(OpTest): def test_check_output(self): self.check_output_with_place( - place=core.CPUPlace(), atol=1, check_eager=self.check_eager + place=core.CPUPlace(), atol=1, check_dygraph=self.check_dygraph ) def init_test_case(self): @@ -362,8 +362,8 @@ class TestNearestInterpOp_attr_tensor(OpTest): 'align_corners': self.align_corners, } # NOTE(dev): some AsDispensible input is not used under imperative mode. - # Skip check_eager while found them in Inputs. - self.check_eager = True + # Skip check_dygraph while found them in Inputs. + self.check_dygraph = True input_np = np.random.random(self.input_shape).astype("float64") self.inputs = {'X': input_np} @@ -380,7 +380,7 @@ class TestNearestInterpOp_attr_tensor(OpTest): if self.shape_by_1Dtensor: self.inputs['OutSize'] = self.out_size - self.check_eager = False + self.check_dygraph = False elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): @@ -388,7 +388,7 @@ class TestNearestInterpOp_attr_tensor(OpTest): ("x" + str(index), np.ones((1)).astype('int32') * ele) ) self.inputs['SizeTensor'] = size_tensor - self.check_eager = False + self.check_dygraph = False self.attrs['out_h'] = self.out_h self.attrs['out_w'] = self.out_w @@ -403,11 +403,11 @@ class TestNearestInterpOp_attr_tensor(OpTest): self.outputs = {'Out': output_np} def test_check_output(self): - self.check_output(check_eager=self.check_eager) + self.check_output(check_dygraph=self.check_dygraph) def test_check_grad(self): self.check_grad( - ['X'], 'Out', in_place=True, check_eager=self.check_eager + ['X'], 'Out', in_place=True, check_dygraph=self.check_dygraph ) def init_test_case(self): diff --git a/python/paddle/fluid/tests/unittests/test_pad2d_op.py b/python/paddle/fluid/tests/unittests/test_pad2d_op.py index 3f26961873a..9ceeb411444 100644 --- a/python/paddle/fluid/tests/unittests/test_pad2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad2d_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest +from eager_op_test import OpTest class TestPad2dOp(OpTest): @@ -64,10 +64,10 @@ class TestPad2dOp(OpTest): self.outputs = {'Out': out} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad_normal(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_dygraph=False) def initTestCase(self): self.shape = (2, 3, 4, 5) diff --git a/python/paddle/fluid/tests/unittests/test_pad_op.py b/python/paddle/fluid/tests/unittests/test_pad_op.py index ee42ce1625f..93fb376ee70 100644 --- a/python/paddle/fluid/tests/unittests/test_pad_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad_op.py @@ -16,7 +16,7 @@ import os import unittest import numpy as np -from op_test import OpTest +from eager_op_test import OpTest from test_attribute_var import UnittestBase import paddle @@ -25,11 +25,16 @@ import paddle.fluid.core as core from paddle.fluid import Program, program_guard +def pad_wrapper(x, paddings, pad_value): + return paddle._C_ops.pad(x, paddings, float(pad_value)) + + class TestPadOp(OpTest): def setUp(self): self.initTestCase() self.dtype = self.get_dtype() self.op_type = "pad" + self.python_api = pad_wrapper self.inputs = { 'X': np.random.random(self.shape).astype(self.dtype), } -- GitLab