未验证 提交 c9e6c8ce 编写于 作者: 姜永久 提交者: GitHub

Yj/dynamic upgrade part5 (#50927)

* fix accuracy and activation

* add adadelta

* support pad2d

* support pad

* modify exponential and linear_interp_v2

* modify meshgrid test

* add group_norm

* support some ops

* modify activation&group norm

* modify act

* reset group_norm

* modify acti

* modify pow test

* modify acti

* lint

* modify mkldnn test

* fix acti

* modify adadelta

* lint

* fix act mkldnn

* reset acti
上级 e397a3ff
...@@ -18,7 +18,9 @@ import numpy as np ...@@ -18,7 +18,9 @@ import numpy as np
from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd
from scipy.special import expit from scipy.special import expit
import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.test_activation_op import ( from paddle.fluid.tests.unittests.test_activation_op import (
TestAbs, TestAbs,
...@@ -63,10 +65,19 @@ class TestMKLDNNLeakyReluDim2(TestLeakyRelu): ...@@ -63,10 +65,19 @@ class TestMKLDNNLeakyReluDim2(TestLeakyRelu):
def init_dtype(self): def init_dtype(self):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self):
self.check_output(check_dygraph=False)
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_dygraph=False)
class TestMKLDNNGeluDim2(TestActivation): class TestMKLDNNGeluDim2(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "gelu" self.op_type = "gelu"
self.python_api = F.gelu
self.dtype = np.float32 self.dtype = np.float32
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
...@@ -80,6 +91,7 @@ class TestMKLDNNGeluDim2(TestActivation): ...@@ -80,6 +91,7 @@ class TestMKLDNNGeluDim2(TestActivation):
class TestMKLDNNGeluDim2Approx(TestActivation): class TestMKLDNNGeluDim2Approx(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "gelu" self.op_type = "gelu"
self.python_api = F.gelu
self.dtype = np.float32 self.dtype = np.float32
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
...@@ -175,10 +187,19 @@ class TestMKLDNNLeakyReluDim4(TestLeakyRelu): ...@@ -175,10 +187,19 @@ class TestMKLDNNLeakyReluDim4(TestLeakyRelu):
def init_dtype(self): def init_dtype(self):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self):
self.check_output(check_dygraph=False)
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_dygraph=False)
class TestMKLDNNGeluDim4(TestActivation): class TestMKLDNNGeluDim4(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "gelu" self.op_type = "gelu"
self.python_api = F.gelu
self.dtype = np.float32 self.dtype = np.float32
x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype) x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype)
...@@ -192,6 +213,7 @@ class TestMKLDNNGeluDim4(TestActivation): ...@@ -192,6 +213,7 @@ class TestMKLDNNGeluDim4(TestActivation):
class TestMKLDNNGeluDim4Approx(TestActivation): class TestMKLDNNGeluDim4Approx(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "gelu" self.op_type = "gelu"
self.python_api = F.gelu
self.dtype = np.float32 self.dtype = np.float32
x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype) x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype)
...@@ -208,6 +230,7 @@ class TestMKLDNNGeluDim4Approx(TestActivation): ...@@ -208,6 +230,7 @@ class TestMKLDNNGeluDim4Approx(TestActivation):
class TestMKLDNNGeluBf16Dim4(TestActivation): class TestMKLDNNGeluBf16Dim4(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "gelu" self.op_type = "gelu"
self.python_api = F.gelu
self.dtype = np.uint16 self.dtype = np.uint16
x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32) x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32)
...@@ -230,6 +253,7 @@ class TestMKLDNNGeluBf16Dim4(TestActivation): ...@@ -230,6 +253,7 @@ class TestMKLDNNGeluBf16Dim4(TestActivation):
class TestMKLDNNGeluBf16Dim4Approx(TestActivation): class TestMKLDNNGeluBf16Dim4Approx(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "gelu" self.op_type = "gelu"
self.python_api = F.gelu
self.dtype = np.uint16 self.dtype = np.uint16
x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32) x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32)
...@@ -294,11 +318,18 @@ class TestMKLDNNSwishDim4(TestSwish): ...@@ -294,11 +318,18 @@ class TestMKLDNNSwishDim4(TestSwish):
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out} self.outputs = {'Out': out}
self.attrs = {"use_mkldnn": True, "beta": beta} self.attrs = {"use_mkldnn": True, "beta": beta}
self.check_eager = False
def init_dtype(self): def init_dtype(self):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self):
self.check_output(check_dygraph=False)
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_dygraph=False)
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0): def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
x_dtype = x.dtype x_dtype = x.dtype
...@@ -334,6 +365,7 @@ class TestMKLDNNHardSwishDim4(TestHardSwish): ...@@ -334,6 +365,7 @@ class TestMKLDNNHardSwishDim4(TestHardSwish):
class TestMKLDNNMish(TestActivation): class TestMKLDNNMish(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "mish" self.op_type = "mish"
self.python_api = F.mish
self.dtype = np.float32 self.dtype = np.float32
x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype) x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype)
...@@ -347,7 +379,7 @@ class TestMKLDNNMish(TestActivation): ...@@ -347,7 +379,7 @@ class TestMKLDNNMish(TestActivation):
class TestMKLDNNRound(TestActivation): class TestMKLDNNRound(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "round" self.op_type = "round"
self.python_api = paddle.round
x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(np.float32) x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(np.float32)
out = np.round(x) out = np.round(x)
...@@ -370,6 +402,7 @@ class TestMKLDNNSigmoidDim4(TestSigmoid): ...@@ -370,6 +402,7 @@ class TestMKLDNNSigmoidDim4(TestSigmoid):
class TestMKLDNNEluDefaultAlpha(TestActivation): class TestMKLDNNEluDefaultAlpha(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "elu" self.op_type = "elu"
self.python_api = F.elu
self.set_alpha() self.set_alpha()
x = np.random.random((5, 5, 4)).astype("float32") x = np.random.random((5, 5, 4)).astype("float32")
...@@ -393,6 +426,7 @@ class TestMKLDNNEluCustomAlpha(TestMKLDNNEluDefaultAlpha): ...@@ -393,6 +426,7 @@ class TestMKLDNNEluCustomAlpha(TestMKLDNNEluDefaultAlpha):
class TestMKLDNNExpOp(TestActivation): class TestMKLDNNExpOp(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "exp" self.op_type = "exp"
self.python_api = paddle.exp
x = np.random.random((5, 5, 4)).astype("float32") x = np.random.random((5, 5, 4)).astype("float32")
self.inputs = {'X': x} self.inputs = {'X': x}
...@@ -407,6 +441,7 @@ class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase): ...@@ -407,6 +441,7 @@ class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase):
np.random.seed(123) np.random.seed(123)
self.op_type = 'abs' self.op_type = 'abs'
self.python_api = paddle.abs
self.x = np.random.uniform(-1, 1, [2, 2]).astype(np.float32) self.x = np.random.uniform(-1, 1, [2, 2]).astype(np.float32)
self.out = np.abs(self.x) self.out = np.abs(self.x)
self.out_grad = np.random.random_sample(self.x.shape).astype(np.float32) self.out_grad = np.random.random_sample(self.x.shape).astype(np.float32)
......
...@@ -15,16 +15,21 @@ ...@@ -15,16 +15,21 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
def accuracy_wrapper(infer, indices, label):
return paddle._C_ops.accuracy(infer, indices, label)
class TestAccuracyOp(OpTest): class TestAccuracyOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "accuracy" self.op_type = "accuracy"
self.python_api = accuracy_wrapper
self.dtype = np.float32 self.dtype = np.float32
self.init_dtype() self.init_dtype()
n = 8192 n = 8192
......
...@@ -15,15 +15,39 @@ ...@@ -15,15 +15,39 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
def adadelta_wrapper(
Param,
Grad,
AvgSquaredGrad,
AvgSquaredUpdate,
master_weight=None,
rho=0.95,
epsilon=1e-6,
):
paddle._C_ops.adadelta_(
Param,
Grad,
AvgSquaredGrad,
AvgSquaredUpdate,
None,
rho,
epsilon,
False,
)
return Param, AvgSquaredGrad, AvgSquaredUpdate
class TestAdadeltaOp1(OpTest): class TestAdadeltaOp1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "adadelta" self.op_type = "adadelta"
self.python_api = adadelta_wrapper
self.python_out_sig = ['Out']
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The squared gradient is positive # The squared gradient is positive
...@@ -76,6 +100,8 @@ class TestAdadeltaOp2(OpTest): ...@@ -76,6 +100,8 @@ class TestAdadeltaOp2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "adadelta" self.op_type = "adadelta"
self.python_api = adadelta_wrapper
self.python_out_sig = ['Out']
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The squared gradient is positive # The squared gradient is positive
...@@ -86,6 +112,8 @@ class TestAdadeltaOp2(OpTest): ...@@ -86,6 +112,8 @@ class TestAdadeltaOp2(OpTest):
rho = 0.95 rho = 0.95
epsilon = 1e-6 epsilon = 1e-6
self.attrs = {'rho': rho, 'epsilon': epsilon}
self.inputs = { self.inputs = {
'Param': param, 'Param': param,
'Grad': grad, 'Grad': grad,
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -108,8 +108,8 @@ class TestBilinearInterpOp(OpTest): ...@@ -108,8 +108,8 @@ class TestBilinearInterpOp(OpTest):
self.init_test_case() self.init_test_case()
self.op_type = "bilinear_interp" self.op_type = "bilinear_interp"
# NOTE(dev): some AsDispensible input is not used under imperative mode. # NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs. # Skip check_dygraph while found them in Inputs.
self.check_eager = True self.check_dygraph = True
input_np = np.random.random(self.input_shape).astype("float64") input_np = np.random.random(self.input_shape).astype("float64")
if self.data_layout == "NCHW": if self.data_layout == "NCHW":
...@@ -139,10 +139,10 @@ class TestBilinearInterpOp(OpTest): ...@@ -139,10 +139,10 @@ class TestBilinearInterpOp(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False self.check_dygraph = False
if self.actual_shape is not None: if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape self.inputs['OutSize'] = self.actual_shape
self.check_eager = False self.check_dygraph = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
...@@ -156,11 +156,11 @@ class TestBilinearInterpOp(OpTest): ...@@ -156,11 +156,11 @@ class TestBilinearInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=self.check_eager) self.check_output(check_dygraph=self.check_dygraph)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager ['X'], 'Out', in_place=True, check_dygraph=self.check_dygraph
) )
def init_test_case(self): def init_test_case(self):
...@@ -285,7 +285,7 @@ class TestBilinearInterpOpUint8(OpTest): ...@@ -285,7 +285,7 @@ class TestBilinearInterpOpUint8(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "bilinear_interp" self.op_type = "bilinear_interp"
self.check_eager = True self.check_dygraph = True
input_np = np.random.randint( input_np = np.random.randint(
low=0, high=256, size=self.input_shape low=0, high=256, size=self.input_shape
).astype("uint8") ).astype("uint8")
...@@ -309,7 +309,7 @@ class TestBilinearInterpOpUint8(OpTest): ...@@ -309,7 +309,7 @@ class TestBilinearInterpOpUint8(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False self.check_dygraph = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
...@@ -323,7 +323,7 @@ class TestBilinearInterpOpUint8(OpTest): ...@@ -323,7 +323,7 @@ class TestBilinearInterpOpUint8(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(
place=core.CPUPlace(), atol=1, check_eager=self.check_eager place=core.CPUPlace(), atol=1, check_dygraph=self.check_dygraph
) )
def init_test_case(self): def init_test_case(self):
...@@ -427,7 +427,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -427,7 +427,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "bilinear_interp" self.op_type = "bilinear_interp"
self.check_eager = True self.check_dygraph = True
self.shape_by_1Dtensor = False self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False self.scale_by_1Dtensor = False
self.attrs = { self.attrs = {
...@@ -450,7 +450,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -450,7 +450,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
if self.shape_by_1Dtensor: if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False self.check_dygraph = False
elif self.out_size is not None: elif self.out_size is not None:
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
...@@ -458,7 +458,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -458,7 +458,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones((1)).astype('int32') * ele)
) )
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
self.check_eager = False self.check_dygraph = False
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w self.attrs['out_w'] = self.out_w
...@@ -473,11 +473,11 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -473,11 +473,11 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=self.check_eager) self.check_output(check_dygraph=self.check_dygraph)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager ['X'], 'Out', in_place=True, check_dygraph=self.check_dygraph
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -39,10 +39,10 @@ class TestExpandOpRank1(OpTest): ...@@ -39,10 +39,10 @@ class TestExpandOpRank1(OpTest):
self.expand_times = [2] self.expand_times = [2]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_dygraph=False)
class TestExpandOpRank2_Corner(TestExpandOpRank1): class TestExpandOpRank2_Corner(TestExpandOpRank1):
...@@ -104,10 +104,10 @@ class TestExpandOpRank1_tensor_attr(OpTest): ...@@ -104,10 +104,10 @@ class TestExpandOpRank1_tensor_attr(OpTest):
self.infer_expand_times = [-1] self.infer_expand_times = [-1]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_dygraph=False)
class TestExpandOpRank2_Corner_tensor_attr(TestExpandOpRank1_tensor_attr): class TestExpandOpRank2_Corner_tensor_attr(TestExpandOpRank1_tensor_attr):
...@@ -146,10 +146,10 @@ class TestExpandOpRank1_tensor(OpTest): ...@@ -146,10 +146,10 @@ class TestExpandOpRank1_tensor(OpTest):
self.expand_times = [2] self.expand_times = [2]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_dygraph=False)
class TestExpandOpRank2_tensor(TestExpandOpRank1_tensor): class TestExpandOpRank2_tensor(TestExpandOpRank1_tensor):
...@@ -170,7 +170,7 @@ class TestExpandOpInteger(OpTest): ...@@ -170,7 +170,7 @@ class TestExpandOpInteger(OpTest):
self.outputs = {'Out': output} self.outputs = {'Out': output}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
# Situation 5: input x is Bool # Situation 5: input x is Bool
...@@ -183,7 +183,7 @@ class TestExpandOpBoolean(OpTest): ...@@ -183,7 +183,7 @@ class TestExpandOpBoolean(OpTest):
self.outputs = {'Out': output} self.outputs = {'Out': output}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
# Situation 56: input x is Integer # Situation 56: input x is Integer
...@@ -198,7 +198,7 @@ class TestExpandOpInt64_t(OpTest): ...@@ -198,7 +198,7 @@ class TestExpandOpInt64_t(OpTest):
self.outputs = {'Out': output} self.outputs = {'Out': output}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -26,6 +26,7 @@ class TestExponentialOp1(OpTest): ...@@ -26,6 +26,7 @@ class TestExponentialOp1(OpTest):
def setUp(self): def setUp(self):
paddle.enable_static() paddle.enable_static()
self.op_type = "exponential" self.op_type = "exponential"
self.python_api = paddle.tensor.exponential_
self.config() self.config()
self.attrs = {"lambda": self.lam} self.attrs = {"lambda": self.lam}
...@@ -55,6 +56,7 @@ class TestExponentialOp1(OpTest): ...@@ -55,6 +56,7 @@ class TestExponentialOp1(OpTest):
self.check_grad( self.check_grad(
['X'], ['X'],
'Out', 'Out',
in_place=True,
user_defined_grads=[np.zeros([1024, 1024], dtype=self.dtype)], user_defined_grads=[np.zeros([1024, 1024], dtype=self.dtype)],
user_defined_grad_outputs=[ user_defined_grad_outputs=[
np.random.rand(1024, 1024).astype(self.dtype) np.random.rand(1024, 1024).astype(self.dtype)
......
...@@ -16,7 +16,7 @@ import platform ...@@ -16,7 +16,7 @@ import platform
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -121,12 +121,12 @@ class TestLinearInterpOp(OpTest): ...@@ -121,12 +121,12 @@ class TestLinearInterpOp(OpTest):
def test_check_output(self): def test_check_output(self):
if platform.system() == "Linux": if platform.system() == "Linux":
self.check_output(atol=1e-7) self.check_output(atol=1e-7, check_dygraph=False)
else: else:
self.check_output(atol=1e-5) self.check_output(atol=1e-5, check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True) self.check_grad(['X'], 'Out', in_place=True, check_dygraph=False)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'linear' self.interp_method = 'linear'
...@@ -304,9 +304,13 @@ class TestResizeLinearOpUint8(OpTest): ...@@ -304,9 +304,13 @@ class TestResizeLinearOpUint8(OpTest):
def test_check_output(self): def test_check_output(self):
if platform.system() == "Linux": if platform.system() == "Linux":
self.check_output_with_place(place=core.CPUPlace(), atol=1e-7) self.check_output_with_place(
place=core.CPUPlace(), atol=1e-7, check_dygraph=False
)
else: else:
self.check_output_with_place(place=core.CPUPlace(), atol=1e-5) self.check_output_with_place(
place=core.CPUPlace(), atol=1e-5, check_dygraph=False
)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'linear' self.interp_method = 'linear'
......
...@@ -16,7 +16,7 @@ import platform ...@@ -16,7 +16,7 @@ import platform
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -178,12 +178,12 @@ class TestLinearInterpOp(OpTest): ...@@ -178,12 +178,12 @@ class TestLinearInterpOp(OpTest):
def test_check_output(self): def test_check_output(self):
if platform.system() == "Linux": if platform.system() == "Linux":
self.check_output(atol=1e-7, check_eager=True) self.check_output(atol=1e-7)
else: else:
self.check_output(atol=1e-5, check_eager=True) self.check_output(atol=1e-5)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True, check_eager=True) self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'linear' self.interp_method = 'linear'
...@@ -340,6 +340,7 @@ class TestResizeLinearOpUint8(OpTest): ...@@ -340,6 +340,7 @@ class TestResizeLinearOpUint8(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "linear_interp_v2" self.op_type = "linear_interp_v2"
self.python_api = linear_interp_test
input_np = np.random.random(self.input_shape).astype("uint8") input_np = np.random.random(self.input_shape).astype("uint8")
if self.scale > 0: if self.scale > 0:
......
...@@ -15,21 +15,27 @@ ...@@ -15,21 +15,27 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
def meshgrid_wrapper(x):
return paddle.tensor.meshgrid(x[0], x[1])
class TestMeshgridOp(OpTest): class TestMeshgridOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "meshgrid" self.op_type = "meshgrid"
self.python_api = meshgrid_wrapper
self.dtype = self.get_dtype() self.dtype = self.get_dtype()
ins, outs = self.init_test_data() ins, outs = self.init_test_data()
self.inputs = {'X': [('x%d' % i, ins[i]) for i in range(len(ins))]} self.inputs = {'X': [('x%d' % i, ins[i]) for i in range(len(ins))]}
self.outputs = { self.outputs = {
'Out': [('out%d' % i, outs[i]) for i in range(len(outs))] 'Out': [('out%d' % i, outs[i]) for i in range(len(outs))]
} }
self.python_out_sig = ['out0', 'out1']
def get_dtype(self): def get_dtype(self):
return "float64" return "float64"
...@@ -38,8 +44,7 @@ class TestMeshgridOp(OpTest): ...@@ -38,8 +44,7 @@ class TestMeshgridOp(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], ['out0']) self.check_grad(['x0'], ['out0', 'out1'])
self.check_grad(['x1'], ['out1'])
def init_test_data(self): def init_test_data(self):
self.shape = self.get_x_shape() self.shape = self.get_x_shape()
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -80,7 +80,7 @@ class TestNearestInterpOp(OpTest): ...@@ -80,7 +80,7 @@ class TestNearestInterpOp(OpTest):
self.data_layout = 'NCHW' self.data_layout = 'NCHW'
self.init_test_case() self.init_test_case()
self.op_type = "nearest_interp" self.op_type = "nearest_interp"
self.check_eager = True self.check_dygraph = True
input_np = np.random.random(self.input_shape).astype("float64") input_np = np.random.random(self.input_shape).astype("float64")
if self.data_layout == "NCHW": if self.data_layout == "NCHW":
...@@ -109,10 +109,10 @@ class TestNearestInterpOp(OpTest): ...@@ -109,10 +109,10 @@ class TestNearestInterpOp(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False self.check_dygraph = False
if self.actual_shape is not None: if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape self.inputs['OutSize'] = self.actual_shape
self.check_eager = False self.check_dygraph = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
'out_w': self.out_w, 'out_w': self.out_w,
...@@ -124,11 +124,11 @@ class TestNearestInterpOp(OpTest): ...@@ -124,11 +124,11 @@ class TestNearestInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=self.check_eager) self.check_output(check_dygraph=self.check_dygraph)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager ['X'], 'Out', in_place=True, check_dygraph=self.check_dygraph
) )
def init_test_case(self): def init_test_case(self):
...@@ -243,7 +243,7 @@ class TestNearestInterpOpUint8(OpTest): ...@@ -243,7 +243,7 @@ class TestNearestInterpOpUint8(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "nearest_interp" self.op_type = "nearest_interp"
self.check_eager = True self.check_dygraph = True
input_np = np.random.randint( input_np = np.random.randint(
low=0, high=256, size=self.input_shape low=0, high=256, size=self.input_shape
).astype("uint8") ).astype("uint8")
...@@ -266,7 +266,7 @@ class TestNearestInterpOpUint8(OpTest): ...@@ -266,7 +266,7 @@ class TestNearestInterpOpUint8(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False self.check_dygraph = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
'out_w': self.out_w, 'out_w': self.out_w,
...@@ -278,7 +278,7 @@ class TestNearestInterpOpUint8(OpTest): ...@@ -278,7 +278,7 @@ class TestNearestInterpOpUint8(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(
place=core.CPUPlace(), atol=1, check_eager=self.check_eager place=core.CPUPlace(), atol=1, check_dygraph=self.check_dygraph
) )
def init_test_case(self): def init_test_case(self):
...@@ -362,8 +362,8 @@ class TestNearestInterpOp_attr_tensor(OpTest): ...@@ -362,8 +362,8 @@ class TestNearestInterpOp_attr_tensor(OpTest):
'align_corners': self.align_corners, 'align_corners': self.align_corners,
} }
# NOTE(dev): some AsDispensible input is not used under imperative mode. # NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs. # Skip check_dygraph while found them in Inputs.
self.check_eager = True self.check_dygraph = True
input_np = np.random.random(self.input_shape).astype("float64") input_np = np.random.random(self.input_shape).astype("float64")
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
...@@ -380,7 +380,7 @@ class TestNearestInterpOp_attr_tensor(OpTest): ...@@ -380,7 +380,7 @@ class TestNearestInterpOp_attr_tensor(OpTest):
if self.shape_by_1Dtensor: if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False self.check_dygraph = False
elif self.out_size is not None: elif self.out_size is not None:
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
...@@ -388,7 +388,7 @@ class TestNearestInterpOp_attr_tensor(OpTest): ...@@ -388,7 +388,7 @@ class TestNearestInterpOp_attr_tensor(OpTest):
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones((1)).astype('int32') * ele)
) )
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
self.check_eager = False self.check_dygraph = False
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w self.attrs['out_w'] = self.out_w
...@@ -403,11 +403,11 @@ class TestNearestInterpOp_attr_tensor(OpTest): ...@@ -403,11 +403,11 @@ class TestNearestInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=self.check_eager) self.check_output(check_dygraph=self.check_dygraph)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager ['X'], 'Out', in_place=True, check_dygraph=self.check_dygraph
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestPad2dOp(OpTest): class TestPad2dOp(OpTest):
...@@ -64,10 +64,10 @@ class TestPad2dOp(OpTest): ...@@ -64,10 +64,10 @@ class TestPad2dOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_dygraph=False)
def initTestCase(self): def initTestCase(self):
self.shape = (2, 3, 4, 5) self.shape = (2, 3, 4, 5)
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_attribute_var import UnittestBase from test_attribute_var import UnittestBase
import paddle import paddle
...@@ -25,11 +25,16 @@ import paddle.fluid.core as core ...@@ -25,11 +25,16 @@ import paddle.fluid.core as core
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
def pad_wrapper(x, paddings, pad_value):
return paddle._C_ops.pad(x, paddings, float(pad_value))
class TestPadOp(OpTest): class TestPadOp(OpTest):
def setUp(self): def setUp(self):
self.initTestCase() self.initTestCase()
self.dtype = self.get_dtype() self.dtype = self.get_dtype()
self.op_type = "pad" self.op_type = "pad"
self.python_api = pad_wrapper
self.inputs = { self.inputs = {
'X': np.random.random(self.shape).astype(self.dtype), 'X': np.random.random(self.shape).astype(self.dtype),
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册