未验证 提交 47ddd36e 编写于 作者: 姜永久 提交者: GitHub

update ops for new dynamic graph tests (#50061)

* update elementwise ops tests

* add more ops

* modify sum&split

* lint

* rm check_dygraph

* revert pow

* modify add for cpu test

* revert reshape

* modify min
上级 86a22ad4
...@@ -15,15 +15,18 @@ ...@@ -15,15 +15,18 @@
import unittest import unittest
import numpy as np import numpy as np
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import (
OpTest,
convert_float_to_uint16, def broadcast_wrapper(shape=[1, 10, 12, 1]):
skip_check_grad_ci, def add_wrapper(x, y, axis=-1):
) return x + y.reshape(shape)
return add_wrapper
class TestElementwiseAddOp(OpTest): class TestElementwiseAddOp(OpTest):
...@@ -45,14 +48,13 @@ class TestElementwiseAddOp(OpTest): ...@@ -45,14 +48,13 @@ class TestElementwiseAddOp(OpTest):
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
self.outputs = {'Out': self.out} self.outputs = {'Out': self.out}
def check_eager(self): def check_dygraph(self):
return not self.use_mkldnn and self.axis == -1 return not self.use_mkldnn and self.axis == -1
def test_check_output(self): def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output( self.check_output(
check_dygraph=(not self.use_mkldnn), check_dygraph=self.check_dygraph(),
check_eager=self.check_eager(),
) )
def test_check_grad_normal(self): def test_check_grad_normal(self):
...@@ -62,8 +64,7 @@ class TestElementwiseAddOp(OpTest): ...@@ -62,8 +64,7 @@ class TestElementwiseAddOp(OpTest):
self.check_grad( self.check_grad(
['X', 'Y'], ['X', 'Y'],
'Out', 'Out',
check_dygraph=(not self.use_mkldnn), check_dygraph=self.check_dygraph(),
check_eager=self.check_eager(),
) )
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
...@@ -74,8 +75,7 @@ class TestElementwiseAddOp(OpTest): ...@@ -74,8 +75,7 @@ class TestElementwiseAddOp(OpTest):
['Y'], ['Y'],
'Out', 'Out',
no_grad_set=set("X"), no_grad_set=set("X"),
check_dygraph=(not self.use_mkldnn), check_dygraph=self.check_dygraph(),
check_eager=self.check_eager(),
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -86,8 +86,7 @@ class TestElementwiseAddOp(OpTest): ...@@ -86,8 +86,7 @@ class TestElementwiseAddOp(OpTest):
['X'], ['X'],
'Out', 'Out',
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_dygraph=(not self.use_mkldnn), check_dygraph=self.check_dygraph(),
check_eager=self.check_eager(),
) )
def init_input_output(self): def init_input_output(self):
...@@ -136,7 +135,8 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp): ...@@ -136,7 +135,8 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place( self.check_output_with_place(
place, atol=1e-3, check_dygraph=(not self.use_mkldnn) place,
atol=1e-3,
) )
...@@ -149,6 +149,7 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp): ...@@ -149,6 +149,7 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
class TestBF16ElementwiseAddOp(OpTest): class TestBF16ElementwiseAddOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_add" self.op_type = "elementwise_add"
self.python_api = paddle.add
self.dtype = np.uint16 self.dtype = np.uint16
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
...@@ -170,23 +171,19 @@ class TestBF16ElementwiseAddOp(OpTest): ...@@ -170,23 +171,19 @@ class TestBF16ElementwiseAddOp(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False) self.check_output_with_place(place)
def test_check_grad_normal(self): def test_check_grad_normal(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_eager=False) self.check_grad_with_place(place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['Y'], 'Out', no_grad_set=set("X"))
place, ['Y'], 'Out', no_grad_set=set("X"), check_eager=False
)
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(place, ['X'], 'Out', no_grad_set=set('Y'))
place, ['X'], 'Out', no_grad_set=set('Y'), check_eager=False
)
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -248,6 +245,7 @@ class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): ...@@ -248,6 +245,7 @@ class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1) self.out = self.x + self.y.reshape(100, 1, 1)
self.python_api = broadcast_wrapper(shape=[100, 1, 1])
def init_axis(self): def init_axis(self):
self.axis = 0 self.axis = 0
...@@ -258,6 +256,7 @@ class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp): ...@@ -258,6 +256,7 @@ class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1) self.out = self.x + self.y.reshape(100, 1, 1)
self.python_api = broadcast_wrapper(shape=[100, 1, 1])
def init_axis(self): def init_axis(self):
self.axis = 0 self.axis = 0
...@@ -268,6 +267,7 @@ class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): ...@@ -268,6 +267,7 @@ class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.x = np.random.rand(2, 100, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 100, 1) self.out = self.x + self.y.reshape(1, 100, 1)
self.python_api = broadcast_wrapper(shape=[1, 100, 1])
def init_axis(self): def init_axis(self):
self.axis = 1 self.axis = 1
...@@ -278,6 +278,7 @@ class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp): ...@@ -278,6 +278,7 @@ class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp):
self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.x = np.random.rand(2, 100, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 100, 1) self.out = self.x + self.y.reshape(1, 100, 1)
self.python_api = broadcast_wrapper(shape=[1, 100, 1])
def init_axis(self): def init_axis(self):
self.axis = 1 self.axis = 1
...@@ -288,6 +289,7 @@ class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): ...@@ -288,6 +289,7 @@ class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 100) self.out = self.x + self.y.reshape(1, 1, 100)
self.python_api = broadcast_wrapper(shape=[1, 1, 100])
class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp):
...@@ -295,6 +297,7 @@ class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp): ...@@ -295,6 +297,7 @@ class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp):
self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 100) self.out = self.x + self.y.reshape(1, 1, 100)
self.python_api = broadcast_wrapper(shape=[1, 1, 100])
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
...@@ -302,6 +305,7 @@ class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): ...@@ -302,6 +305,7 @@ class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype) self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12, 1) self.out = self.x + self.y.reshape(1, 10, 12, 1)
self.python_api = broadcast_wrapper(shape=[1, 10, 12, 1])
def init_axis(self): def init_axis(self):
self.axis = 1 self.axis = 1
...@@ -312,6 +316,7 @@ class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp): ...@@ -312,6 +316,7 @@ class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp):
self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype) self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12, 1) self.out = self.x + self.y.reshape(1, 10, 12, 1)
self.python_api = broadcast_wrapper(shape=[1, 10, 12, 1])
def init_axis(self): def init_axis(self):
self.axis = 1 self.axis = 1
...@@ -322,6 +327,7 @@ class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): ...@@ -322,6 +327,7 @@ class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype) self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
self.y = np.random.rand(100, 1).astype(self.dtype) self.y = np.random.rand(100, 1).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1, 1) self.out = self.x + self.y.reshape(100, 1, 1, 1)
self.python_api = broadcast_wrapper(shape=[100, 1, 1, 1])
def init_axis(self): def init_axis(self):
self.axis = 0 self.axis = 0
...@@ -332,6 +338,7 @@ class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp): ...@@ -332,6 +338,7 @@ class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp):
self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype) self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
self.y = np.random.rand(100, 1).astype(self.dtype) self.y = np.random.rand(100, 1).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1, 1) self.out = self.x + self.y.reshape(100, 1, 1, 1)
self.python_api = broadcast_wrapper(shape=[100, 1, 1, 1])
def init_axis(self): def init_axis(self):
self.axis = 0 self.axis = 0
...@@ -597,6 +604,7 @@ class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError): ...@@ -597,6 +604,7 @@ class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError):
class TestComplexElementwiseAddOp(OpTest): class TestComplexElementwiseAddOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_add" self.op_type = "elementwise_add"
self.python_api = paddle.add
self.dtype = np.float64 self.dtype = np.float64
self.shape = (2, 3, 4, 5) self.shape = (2, 3, 4, 5)
self.init_input_output() self.init_input_output()
...@@ -629,7 +637,7 @@ class TestComplexElementwiseAddOp(OpTest): ...@@ -629,7 +637,7 @@ class TestComplexElementwiseAddOp(OpTest):
self.grad_y = self.grad_out self.grad_y = self.grad_out
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
......
...@@ -15,13 +15,20 @@ ...@@ -15,13 +15,20 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import core from paddle.fluid import core
def broadcast_wrapper(shape=[1, 10, 12, 1]):
def div_wrapper(x, y, axis=-1):
return paddle.divide(x, y.reshape(shape))
return div_wrapper
class ElementwiseDivOp(OpTest): class ElementwiseDivOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_div" self.op_type = "elementwise_div"
...@@ -193,6 +200,7 @@ class TestElementwiseDivOpBroadcast0(ElementwiseDivOp): ...@@ -193,6 +200,7 @@ class TestElementwiseDivOpBroadcast0(ElementwiseDivOp):
self.x_shape = [100, 3, 4] self.x_shape = [100, 3, 4]
self.y_shape = [100] self.y_shape = [100]
self.attrs = {'axis': 0} self.attrs = {'axis': 0}
self.python_api = broadcast_wrapper(shape=[100, 1, 1])
def compute_output(self, x, y): def compute_output(self, x, y):
return x / y.reshape(100, 1, 1) return x / y.reshape(100, 1, 1)
...@@ -209,6 +217,7 @@ class TestElementwiseDivOpBroadcast1(ElementwiseDivOp): ...@@ -209,6 +217,7 @@ class TestElementwiseDivOpBroadcast1(ElementwiseDivOp):
self.x_shape = [2, 100, 4] self.x_shape = [2, 100, 4]
self.y_shape = [100] self.y_shape = [100]
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
self.python_api = broadcast_wrapper(shape=[1, 100, 1])
def compute_output(self, x, y): def compute_output(self, x, y):
return x / y.reshape(1, 100, 1) return x / y.reshape(1, 100, 1)
...@@ -224,6 +233,7 @@ class TestElementwiseDivOpBroadcast2(ElementwiseDivOp): ...@@ -224,6 +233,7 @@ class TestElementwiseDivOpBroadcast2(ElementwiseDivOp):
def init_shape(self): def init_shape(self):
self.x_shape = [2, 3, 100] self.x_shape = [2, 3, 100]
self.y_shape = [100] self.y_shape = [100]
self.python_api = broadcast_wrapper(shape=[1, 1, 100])
def compute_output(self, x, y): def compute_output(self, x, y):
return x / y.reshape(1, 1, 100) return x / y.reshape(1, 1, 100)
...@@ -240,6 +250,7 @@ class TestElementwiseDivOpBroadcast3(ElementwiseDivOp): ...@@ -240,6 +250,7 @@ class TestElementwiseDivOpBroadcast3(ElementwiseDivOp):
self.x_shape = [2, 10, 12, 5] self.x_shape = [2, 10, 12, 5]
self.y_shape = [10, 12] self.y_shape = [10, 12]
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
self.python_api = broadcast_wrapper(shape=[1, 10, 12, 1])
def compute_output(self, x, y): def compute_output(self, x, y):
return x / y.reshape(1, 10, 12, 1) return x / y.reshape(1, 10, 12, 1)
...@@ -393,7 +404,7 @@ class TestComplexElementwiseDivOp(OpTest): ...@@ -393,7 +404,7 @@ class TestComplexElementwiseDivOp(OpTest):
self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y) self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, skip_check_grad_ci
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -25,6 +25,13 @@ from paddle import _legacy_C_ops ...@@ -25,6 +25,13 @@ from paddle import _legacy_C_ops
paddle.enable_static() paddle.enable_static()
def broadcast_wrapper(shape=[1, 10, 12, 1]):
def min_wrapper(x, y, axis=-1):
return paddle.minimum(x, y.reshape(shape))
return min_wrapper
class TestElementwiseOp(OpTest): class TestElementwiseOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_min" self.op_type = "elementwise_min"
...@@ -39,16 +46,10 @@ class TestElementwiseOp(OpTest): ...@@ -39,16 +46,10 @@ class TestElementwiseOp(OpTest):
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self): def test_check_output(self):
if hasattr(self, 'attrs'): self.check_output()
self.check_output(check_eager=False)
else:
self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
if hasattr(self, 'attrs'): self.check_grad(['X', 'Y'], 'Out')
self.check_grad(['X', 'Y'], 'Out', check_eager=False)
else:
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
...@@ -118,7 +119,7 @@ class TestElementwiseMinOp_Vector(TestElementwiseOp): ...@@ -118,7 +119,7 @@ class TestElementwiseMinOp_Vector(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp): class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_min" self.op_type = "elementwise_min"
self.python_api = paddle.minimum self.python_api = broadcast_wrapper(shape=[100, 1, 1])
x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(np.float64) x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
...@@ -137,7 +138,7 @@ class TestElementwiseMinOp_broadcast_0(TestElementwiseOp): ...@@ -137,7 +138,7 @@ class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp): class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_min" self.op_type = "elementwise_min"
self.python_api = paddle.minimum self.python_api = broadcast_wrapper(shape=[1, 100, 1])
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64) x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
...@@ -156,7 +157,7 @@ class TestElementwiseMinOp_broadcast_1(TestElementwiseOp): ...@@ -156,7 +157,7 @@ class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp): class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_min" self.op_type = "elementwise_min"
self.python_api = paddle.minimum self.python_api = broadcast_wrapper(shape=[1, 1, 100])
x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(np.float64) x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype(
...@@ -174,7 +175,7 @@ class TestElementwiseMinOp_broadcast_2(TestElementwiseOp): ...@@ -174,7 +175,7 @@ class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp): class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_min" self.op_type = "elementwise_min"
self.python_api = paddle.minimum self.python_api = broadcast_wrapper(shape=[1, 25, 4, 1])
x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(np.float64) x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(np.float64)
sgn = np.random.choice([-1, 1], (25, 4)).astype(np.float64) sgn = np.random.choice([-1, 1], (25, 4)).astype(np.float64)
y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (25, 4)).astype( y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (25, 4)).astype(
......
...@@ -15,14 +15,24 @@ ...@@ -15,14 +15,24 @@
import unittest import unittest
import numpy as np import numpy as np
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import (
OpTest,
convert_float_to_uint16, def mul(x, y, axis=-1, use_mkldnn=False):
skip_check_grad_ci, return x * y
)
setattr(paddle, "mul", mul)
def broadcast_wrapper(shape=[1, 10, 12, 1]):
def mul_wrapper(x, y, axis=-1):
return x * y.reshape(shape)
return mul_wrapper
class ElementwiseMulOp(OpTest): class ElementwiseMulOp(OpTest):
...@@ -31,6 +41,7 @@ class ElementwiseMulOp(OpTest): ...@@ -31,6 +41,7 @@ class ElementwiseMulOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul
self.dtype = np.float64 self.dtype = np.float64
self.axis = -1 self.axis = -1
self.init_dtype() self.init_dtype()
...@@ -107,6 +118,7 @@ class TestElementwiseMulOp_ZeroDim3(ElementwiseMulOp): ...@@ -107,6 +118,7 @@ class TestElementwiseMulOp_ZeroDim3(ElementwiseMulOp):
class TestBF16ElementwiseMulOp(OpTest): class TestBF16ElementwiseMulOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul
self.dtype = np.uint16 self.dtype = np.uint16
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
...@@ -145,6 +157,7 @@ class TestBF16ElementwiseMulOp(OpTest): ...@@ -145,6 +157,7 @@ class TestBF16ElementwiseMulOp(OpTest):
class TestElementwiseMulOp_scalar(ElementwiseMulOp): class TestElementwiseMulOp_scalar(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul
self.inputs = { self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float64), 'X': np.random.rand(10, 3, 4).astype(np.float64),
'Y': np.random.rand(1).astype(np.float64), 'Y': np.random.rand(1).astype(np.float64),
...@@ -156,6 +169,7 @@ class TestElementwiseMulOp_scalar(ElementwiseMulOp): ...@@ -156,6 +169,7 @@ class TestElementwiseMulOp_scalar(ElementwiseMulOp):
class TestElementwiseMulOp_Vector(ElementwiseMulOp): class TestElementwiseMulOp_Vector(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul
self.inputs = { self.inputs = {
'X': np.random.random((100,)).astype("float64"), 'X': np.random.random((100,)).astype("float64"),
'Y': np.random.random((100,)).astype("float64"), 'Y': np.random.random((100,)).astype("float64"),
...@@ -168,6 +182,7 @@ class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): ...@@ -168,6 +182,7 @@ class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.python_api = broadcast_wrapper(shape=[100, 1, 1])
self.out = self.x * self.y.reshape(100, 1, 1) self.out = self.x * self.y.reshape(100, 1, 1)
def init_axis(self): def init_axis(self):
...@@ -177,6 +192,7 @@ class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): ...@@ -177,6 +192,7 @@ class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp):
class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = broadcast_wrapper(shape=[1, 100, 1])
self.inputs = { self.inputs = {
'X': np.random.rand(2, 100, 3).astype(np.float64), 'X': np.random.rand(2, 100, 3).astype(np.float64),
'Y': np.random.rand(100).astype(np.float64), 'Y': np.random.rand(100).astype(np.float64),
...@@ -192,6 +208,7 @@ class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): ...@@ -192,6 +208,7 @@ class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp):
class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = broadcast_wrapper(shape=[1, 1, 100])
self.inputs = { self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float64), 'X': np.random.rand(2, 3, 100).astype(np.float64),
'Y': np.random.rand(100).astype(np.float64), 'Y': np.random.rand(100).astype(np.float64),
...@@ -206,6 +223,7 @@ class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): ...@@ -206,6 +223,7 @@ class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp):
class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = broadcast_wrapper(shape=[1, 10, 12, 1])
self.inputs = { self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(np.float64), 'X': np.random.rand(2, 10, 12, 3).astype(np.float64),
'Y': np.random.rand(10, 12).astype(np.float64), 'Y': np.random.rand(10, 12).astype(np.float64),
...@@ -221,6 +239,7 @@ class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): ...@@ -221,6 +239,7 @@ class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp):
class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul
self.inputs = { self.inputs = {
'X': np.random.rand(10, 2, 11).astype(np.float64), 'X': np.random.rand(10, 2, 11).astype(np.float64),
'Y': np.random.rand(10, 1, 11).astype(np.float64), 'Y': np.random.rand(10, 1, 11).astype(np.float64),
...@@ -232,6 +251,7 @@ class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp): ...@@ -232,6 +251,7 @@ class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp):
class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul
self.inputs = { self.inputs = {
'X': np.random.rand(10, 4, 2, 3).astype(np.float64), 'X': np.random.rand(10, 4, 2, 3).astype(np.float64),
'Y': np.random.rand(10, 4, 1, 3).astype(np.float64), 'Y': np.random.rand(10, 4, 1, 3).astype(np.float64),
...@@ -251,6 +271,7 @@ class TestElementwiseMulOpFp16(ElementwiseMulOp): ...@@ -251,6 +271,7 @@ class TestElementwiseMulOpFp16(ElementwiseMulOp):
class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul
self.inputs = { self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float64), 'X': np.random.rand(2, 3, 100).astype(np.float64),
'Y': np.random.rand(1, 1, 100).astype(np.float64), 'Y': np.random.rand(1, 1, 100).astype(np.float64),
...@@ -262,6 +283,7 @@ class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): ...@@ -262,6 +283,7 @@ class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp): class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul
self.inputs = { self.inputs = {
'X': np.random.rand(30, 3, 1, 5).astype(np.float64), 'X': np.random.rand(30, 3, 1, 5).astype(np.float64),
'Y': np.random.rand(30, 1, 4, 1).astype(np.float64), 'Y': np.random.rand(30, 1, 4, 1).astype(np.float64),
...@@ -273,6 +295,7 @@ class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp): ...@@ -273,6 +295,7 @@ class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul
self.inputs = { self.inputs = {
'X': np.random.rand(10, 10).astype(np.float64), 'X': np.random.rand(10, 10).astype(np.float64),
'Y': np.random.rand(2, 2, 10, 10).astype(np.float64), 'Y': np.random.rand(2, 2, 10, 10).astype(np.float64),
...@@ -289,6 +312,7 @@ class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): ...@@ -289,6 +312,7 @@ class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
class TestComplexElementwiseMulOp(OpTest): class TestComplexElementwiseMulOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul
self.init_base_dtype() self.init_base_dtype()
self.init_input_output() self.init_input_output()
self.init_grad_input_output() self.init_grad_input_output()
......
...@@ -15,15 +15,26 @@ ...@@ -15,15 +15,26 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
def sub_wrapper(shape=None):
def inner_wrapper(x, y, axis=-1):
if shape is None:
return x - y
else:
return x - y.reshape(shape)
return inner_wrapper
class TestElementwiseOp(OpTest): class TestElementwiseOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper()
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"), 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
...@@ -50,6 +61,7 @@ class TestElementwiseOp(OpTest): ...@@ -50,6 +61,7 @@ class TestElementwiseOp(OpTest):
class TestElementwiseSubOp_ZeroDim1(TestElementwiseOp): class TestElementwiseSubOp_ZeroDim1(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper()
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, []).astype("float64"), 'X': np.random.uniform(0.1, 1, []).astype("float64"),
'Y': np.random.uniform(0.1, 1, []).astype("float64"), 'Y': np.random.uniform(0.1, 1, []).astype("float64"),
...@@ -60,6 +72,7 @@ class TestElementwiseSubOp_ZeroDim1(TestElementwiseOp): ...@@ -60,6 +72,7 @@ class TestElementwiseSubOp_ZeroDim1(TestElementwiseOp):
class TestElementwiseSubOp_ZeroDim2(TestElementwiseOp): class TestElementwiseSubOp_ZeroDim2(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper()
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"), 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
'Y': np.random.uniform(0.1, 1, []).astype("float64"), 'Y': np.random.uniform(0.1, 1, []).astype("float64"),
...@@ -70,6 +83,7 @@ class TestElementwiseSubOp_ZeroDim2(TestElementwiseOp): ...@@ -70,6 +83,7 @@ class TestElementwiseSubOp_ZeroDim2(TestElementwiseOp):
class TestElementwiseSubOp_ZeroDim3(TestElementwiseOp): class TestElementwiseSubOp_ZeroDim3(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper()
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, []).astype("float64"), 'X': np.random.uniform(0.1, 1, []).astype("float64"),
'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
...@@ -80,6 +94,7 @@ class TestElementwiseSubOp_ZeroDim3(TestElementwiseOp): ...@@ -80,6 +94,7 @@ class TestElementwiseSubOp_ZeroDim3(TestElementwiseOp):
class TestBF16ElementwiseOp(OpTest): class TestBF16ElementwiseOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper()
self.dtype = np.uint16 self.dtype = np.uint16
x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
...@@ -110,6 +125,7 @@ class TestBF16ElementwiseOp(OpTest): ...@@ -110,6 +125,7 @@ class TestBF16ElementwiseOp(OpTest):
class TestElementwiseSubOp_scalar(TestElementwiseOp): class TestElementwiseSubOp_scalar(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper()
self.inputs = { self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float64), 'X': np.random.rand(10, 3, 4).astype(np.float64),
'Y': np.random.rand(1).astype(np.float64), 'Y': np.random.rand(1).astype(np.float64),
...@@ -120,6 +136,7 @@ class TestElementwiseSubOp_scalar(TestElementwiseOp): ...@@ -120,6 +136,7 @@ class TestElementwiseSubOp_scalar(TestElementwiseOp):
class TestElementwiseSubOp_Vector(TestElementwiseOp): class TestElementwiseSubOp_Vector(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper()
self.inputs = { self.inputs = {
'X': np.random.random((100,)).astype("float64"), 'X': np.random.random((100,)).astype("float64"),
'Y': np.random.random((100,)).astype("float64"), 'Y': np.random.random((100,)).astype("float64"),
...@@ -130,6 +147,7 @@ class TestElementwiseSubOp_Vector(TestElementwiseOp): ...@@ -130,6 +147,7 @@ class TestElementwiseSubOp_Vector(TestElementwiseOp):
class TestElementwiseSubOp_broadcast_0(TestElementwiseOp): class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper(shape=[100, 1, 1])
self.inputs = { self.inputs = {
'X': np.random.rand(100, 3, 2).astype(np.float64), 'X': np.random.rand(100, 3, 2).astype(np.float64),
'Y': np.random.rand(100).astype(np.float64), 'Y': np.random.rand(100).astype(np.float64),
...@@ -144,6 +162,7 @@ class TestElementwiseSubOp_broadcast_0(TestElementwiseOp): ...@@ -144,6 +162,7 @@ class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
class TestElementwiseSubOp_broadcast_1(TestElementwiseOp): class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper(shape=[1, 100, 1])
self.inputs = { self.inputs = {
'X': np.random.rand(2, 100, 3).astype(np.float64), 'X': np.random.rand(2, 100, 3).astype(np.float64),
'Y': np.random.rand(100).astype(np.float64), 'Y': np.random.rand(100).astype(np.float64),
...@@ -158,6 +177,7 @@ class TestElementwiseSubOp_broadcast_1(TestElementwiseOp): ...@@ -158,6 +177,7 @@ class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
class TestElementwiseSubOp_broadcast_2(TestElementwiseOp): class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper(shape=[1, 1, 100])
self.inputs = { self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float64), 'X': np.random.rand(2, 3, 100).astype(np.float64),
'Y': np.random.rand(100).astype(np.float64), 'Y': np.random.rand(100).astype(np.float64),
...@@ -171,6 +191,7 @@ class TestElementwiseSubOp_broadcast_2(TestElementwiseOp): ...@@ -171,6 +191,7 @@ class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
class TestElementwiseSubOp_broadcast_3(TestElementwiseOp): class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper(shape=[1, 10, 12, 1])
self.inputs = { self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(np.float64), 'X': np.random.rand(2, 10, 12, 3).astype(np.float64),
'Y': np.random.rand(10, 12).astype(np.float64), 'Y': np.random.rand(10, 12).astype(np.float64),
...@@ -185,6 +206,7 @@ class TestElementwiseSubOp_broadcast_3(TestElementwiseOp): ...@@ -185,6 +206,7 @@ class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
class TestElementwiseSubOp_broadcast_4(TestElementwiseOp): class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper()
self.inputs = { self.inputs = {
'X': np.random.rand(2, 5, 3, 12).astype(np.float64), 'X': np.random.rand(2, 5, 3, 12).astype(np.float64),
'Y': np.random.rand(2, 5, 1, 12).astype(np.float64), 'Y': np.random.rand(2, 5, 1, 12).astype(np.float64),
...@@ -195,6 +217,7 @@ class TestElementwiseSubOp_broadcast_4(TestElementwiseOp): ...@@ -195,6 +217,7 @@ class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
class TestElementwiseSubOp_commonuse_1(TestElementwiseOp): class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper()
self.inputs = { self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float64), 'X': np.random.rand(2, 3, 100).astype(np.float64),
'Y': np.random.rand(1, 1, 100).astype(np.float64), 'Y': np.random.rand(1, 1, 100).astype(np.float64),
...@@ -205,6 +228,7 @@ class TestElementwiseSubOp_commonuse_1(TestElementwiseOp): ...@@ -205,6 +228,7 @@ class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
class TestElementwiseSubOp_commonuse_2(TestElementwiseOp): class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper()
self.inputs = { self.inputs = {
'X': np.random.rand(10, 3, 1, 4).astype(np.float64), 'X': np.random.rand(10, 3, 1, 4).astype(np.float64),
'Y': np.random.rand(10, 1, 12, 1).astype(np.float64), 'Y': np.random.rand(10, 1, 12, 1).astype(np.float64),
...@@ -215,6 +239,11 @@ class TestElementwiseSubOp_commonuse_2(TestElementwiseOp): ...@@ -215,6 +239,11 @@ class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp): class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
def sub_func(x, y, axis=2):
return x.reshape([1, 1, 10, 12]) - y
self.python_api = sub_func
self.inputs = { self.inputs = {
'X': np.random.rand(10, 12).astype(np.float64), 'X': np.random.rand(10, 12).astype(np.float64),
'Y': np.random.rand(2, 3, 10, 12).astype(np.float64), 'Y': np.random.rand(2, 3, 10, 12).astype(np.float64),
...@@ -230,6 +259,7 @@ class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp): ...@@ -230,6 +259,7 @@ class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
class TestComplexElementwiseSubOp(OpTest): class TestComplexElementwiseSubOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper()
self.dtype = np.float64 self.dtype = np.float64
self.shape = (2, 3, 4, 5) self.shape = (2, 3, 4, 5)
self.init_input_output() self.init_input_output()
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -85,10 +85,13 @@ class TestPixelShuffleOp(OpTest): ...@@ -85,10 +85,13 @@ class TestPixelShuffleOp(OpTest):
self.format = "NCHW" self.format = "NCHW"
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(
['X'],
'Out',
)
class TestChannelLast(TestPixelShuffleOp): class TestChannelLast(TestPixelShuffleOp):
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -41,6 +41,7 @@ def output_hist(out, lam, a, b): ...@@ -41,6 +41,7 @@ def output_hist(out, lam, a, b):
class TestPoissonOp1(OpTest): class TestPoissonOp1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "poisson" self.op_type = "poisson"
self.python_api = paddle.tensor.poisson
self.config() self.config()
self.attrs = {} self.attrs = {}
......
...@@ -16,7 +16,7 @@ import copy ...@@ -16,7 +16,7 @@ import copy
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.framework import core from paddle.framework import core
...@@ -30,6 +30,7 @@ class TestPutAlongAxisOp(OpTest): ...@@ -30,6 +30,7 @@ class TestPutAlongAxisOp(OpTest):
self.reduce_op = "assign" self.reduce_op = "assign"
self.dtype = 'float64' self.dtype = 'float64'
self.op_type = "put_along_axis" self.op_type = "put_along_axis"
self.python_api = paddle.tensor.put_along_axis
self.xnp = np.random.random(self.x_shape).astype(self.x_type) self.xnp = np.random.random(self.x_shape).astype(self.x_type)
# numpy put_along_axis is an inplace opearion. # numpy put_along_axis is an inplace opearion.
self.xnp_result = copy.deepcopy(self.xnp) self.xnp_result = copy.deepcopy(self.xnp)
......
...@@ -15,15 +15,20 @@ ...@@ -15,15 +15,20 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
def size_wrapper(input):
return paddle.numel(paddle.to_tensor(input))
class TestSizeOp(OpTest): class TestSizeOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "size" self.op_type = "size"
self.python_api = size_wrapper
self.shape = [] self.shape = []
self.config() self.config()
input = np.zeros(self.shape, dtype='bool') input = np.zeros(self.shape, dtype='bool')
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -43,6 +43,12 @@ def ref_softmax(x, axis=None, dtype=None): ...@@ -43,6 +43,12 @@ def ref_softmax(x, axis=None, dtype=None):
return np.apply_along_axis(stable_softmax, axis, x_t) return np.apply_along_axis(stable_softmax, axis, x_t)
def softmax_wrapper(
x, axis=-1, dtype=None, name=None, use_cudnn=False, use_mkldnn=False
):
return paddle.nn.functional.softmax(x, axis=axis, dtype=dtype)
class TestSoftmaxOp(OpTest): class TestSoftmaxOp(OpTest):
def get_x_shape(self): def get_x_shape(self):
return [10, 10] return [10, 10]
...@@ -52,6 +58,7 @@ class TestSoftmaxOp(OpTest): ...@@ -52,6 +58,7 @@ class TestSoftmaxOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "softmax" self.op_type = "softmax"
self.python_api = softmax_wrapper
self.use_cudnn = False self.use_cudnn = False
self.use_mkldnn = False self.use_mkldnn = False
# explicilty use float32 for ROCm, as MIOpen does not yet support float64 # explicilty use float32 for ROCm, as MIOpen does not yet support float64
...@@ -109,6 +116,7 @@ class TestSoftmaxOp(OpTest): ...@@ -109,6 +116,7 @@ class TestSoftmaxOp(OpTest):
class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp): class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp):
def setUp(self): def setUp(self):
self.op_type = "softmax" self.op_type = "softmax"
self.python_api = softmax_wrapper
self.use_cudnn = False self.use_cudnn = False
self.use_mkldnn = False self.use_mkldnn = False
# explicilty use float32 for ROCm, as MIOpen does not yet support float64 # explicilty use float32 for ROCm, as MIOpen does not yet support float64
...@@ -133,6 +141,7 @@ class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp): ...@@ -133,6 +141,7 @@ class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp):
class TestSoftmaxOp_ZeroDim2(TestSoftmaxOp): class TestSoftmaxOp_ZeroDim2(TestSoftmaxOp):
def setUp(self): def setUp(self):
self.op_type = "softmax" self.op_type = "softmax"
self.python_api = softmax_wrapper
self.use_cudnn = True self.use_cudnn = True
self.use_mkldnn = False self.use_mkldnn = False
# explicilty use float32 for ROCm, as MIOpen does not yet support float64 # explicilty use float32 for ROCm, as MIOpen does not yet support float64
...@@ -366,6 +375,7 @@ class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp): ...@@ -366,6 +375,7 @@ class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp):
class TestSoftmaxBF16Op(OpTest): class TestSoftmaxBF16Op(OpTest):
def setUp(self): def setUp(self):
self.op_type = "softmax" self.op_type = "softmax"
self.python_api = softmax_wrapper
self.use_cudnn = self.init_cudnn() self.use_cudnn = self.init_cudnn()
self.use_mkldnn = False self.use_mkldnn = False
self.dtype = np.uint16 self.dtype = np.uint16
......
...@@ -15,9 +15,10 @@ ...@@ -15,9 +15,10 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, skip_check_grad_ci
import paddle import paddle
from paddle import _C_ops
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
paddle.enable_static() paddle.enable_static()
...@@ -47,6 +48,10 @@ def spectral_norm(weight, u, v, dim, power_iters, eps): ...@@ -47,6 +48,10 @@ def spectral_norm(weight, u, v, dim, power_iters, eps):
return weight / sigma return weight / sigma
def spectral_norm_wrapper(weight, u, v, dim, power_iters, eps):
return _C_ops.spectral_norm(weight, u, v, dim, power_iters, eps)
@skip_check_grad_ci( @skip_check_grad_ci(
reason="Spectral norm do not check grad when power_iters > 0 " reason="Spectral norm do not check grad when power_iters > 0 "
"because grad is not calculated in power iterations, " "because grad is not calculated in power iterations, "
...@@ -56,6 +61,7 @@ class TestSpectralNormOpNoGrad(OpTest): ...@@ -56,6 +61,7 @@ class TestSpectralNormOpNoGrad(OpTest):
def setUp(self): def setUp(self):
self.initTestCase() self.initTestCase()
self.op_type = 'spectral_norm' self.op_type = 'spectral_norm'
self.python_api = spectral_norm_wrapper
weight = np.random.random(self.weight_shape).astype('float64') weight = np.random.random(self.weight_shape).astype('float64')
u = np.random.normal(0.0, 1.0, self.u_shape).astype('float64') u = np.random.normal(0.0, 1.0, self.u_shape).astype('float64')
v = np.random.normal(0.0, 1.0, self.v_shape).astype('float64') v = np.random.normal(0.0, 1.0, self.v_shape).astype('float64')
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, convert_float_to_uint16 from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -24,6 +24,8 @@ from paddle.fluid import Program, core, program_guard ...@@ -24,6 +24,8 @@ from paddle.fluid import Program, core, program_guard
class TestSplitOp(OpTest): class TestSplitOp(OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.split
self.python_out_sig = ['out0', 'out1', 'out2']
self._set_op_type() self._set_op_type()
self.dtype = self.get_dtype() self.dtype = self.get_dtype()
axis = 1 axis = 1
...@@ -62,6 +64,8 @@ class TestSplitOp(OpTest): ...@@ -62,6 +64,8 @@ class TestSplitOp(OpTest):
# test with attr(num) # test with attr(num)
class TestSplitOp_2(OpTest): class TestSplitOp_2(OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.split
self.python_out_sig = ['out0', 'out1', 'out2']
self._set_op_type() self._set_op_type()
self.dtype = self.get_dtype() self.dtype = self.get_dtype()
self.init_data() self.init_data()
...@@ -98,6 +102,8 @@ class TestSplitOp_2(OpTest): ...@@ -98,6 +102,8 @@ class TestSplitOp_2(OpTest):
# attr(axis) is Tensor # attr(axis) is Tensor
class TestSplitOp_AxisTensor(OpTest): class TestSplitOp_AxisTensor(OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.split
self.python_out_sig = ['out0', 'out1', 'out2']
self._set_op_type() self._set_op_type()
self.dtype = self.get_dtype() self.dtype = self.get_dtype()
self.init_data() self.init_data()
...@@ -133,6 +139,8 @@ class TestSplitOp_AxisTensor(OpTest): ...@@ -133,6 +139,8 @@ class TestSplitOp_AxisTensor(OpTest):
# attr(sections) is list containing Tensor # attr(sections) is list containing Tensor
class TestSplitOp_SectionsTensor(OpTest): class TestSplitOp_SectionsTensor(OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.split
self.python_out_sig = ['out0', 'out1', 'out2']
self._set_op_type() self._set_op_type()
self.dtype = self.get_dtype() self.dtype = self.get_dtype()
self.init_data() self.init_data()
...@@ -178,6 +186,8 @@ class TestSplitOp_SectionsTensor(OpTest): ...@@ -178,6 +186,8 @@ class TestSplitOp_SectionsTensor(OpTest):
class TestSplitOp_unk_section(OpTest): class TestSplitOp_unk_section(OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.split
self.python_out_sig = ['out0', 'out1', 'out2']
self._set_op_type() self._set_op_type()
self.dtype = self.get_dtype() self.dtype = self.get_dtype()
self.init_data() self.init_data()
......
...@@ -19,6 +19,11 @@ import unittest ...@@ -19,6 +19,11 @@ import unittest
import gradient_checker import gradient_checker
import numpy as np import numpy as np
from decorator_helper import prog_scope from decorator_helper import prog_scope
from eager_op_test import (
OpTest,
convert_float_to_uint16,
convert_uint16_to_float,
)
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -26,16 +31,19 @@ import paddle.fluid.core as core ...@@ -26,16 +31,19 @@ import paddle.fluid.core as core
import paddle.inference as paddle_infer import paddle.inference as paddle_infer
from paddle import enable_static from paddle import enable_static
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op_test import (
OpTest,
convert_float_to_uint16, def sum_wrapper(X, use_mkldnn=False):
convert_uint16_to_float, res = 0
) for x in X:
res += x
return res
class TestSumOp(OpTest): class TestSumOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "sum" self.op_type = "sum"
self.python_api = sum_wrapper
self.init_kernel_type() self.init_kernel_type()
self.use_mkldnn = False self.use_mkldnn = False
self.init_kernel_type() self.init_kernel_type()
...@@ -341,10 +349,14 @@ class TestSumBF16Op(OpTest): ...@@ -341,10 +349,14 @@ class TestSumBF16Op(OpTest):
self.dtype = np.uint16 self.dtype = np.uint16
def test_check_output(self): def test_check_output(self):
self.check_output() # new dynamic graph mode does not support unit16 type
self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', numeric_grad_delta=0.5) # new dynamic graph mode does not support unit16 type
self.check_grad(
['x0'], 'Out', numeric_grad_delta=0.5, check_dygraph=False
)
class API_Test_Add_n(unittest.TestCase): class API_Test_Add_n(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.framework import core from paddle.framework import core
...@@ -27,6 +27,7 @@ class TestTakeAlongAxisOp(OpTest): ...@@ -27,6 +27,7 @@ class TestTakeAlongAxisOp(OpTest):
def setUp(self): def setUp(self):
self.init_data() self.init_data()
self.op_type = "take_along_axis" self.op_type = "take_along_axis"
self.python_api = paddle.tensor.take_along_axis
self.xnp = np.random.random(self.x_shape).astype(self.x_type) self.xnp = np.random.random(self.x_shape).astype(self.x_type)
self.target = np.take_along_axis(self.xnp, self.index, self.axis) self.target = np.take_along_axis(self.xnp, self.index, self.axis)
broadcast_shape_list = list(self.x_shape) broadcast_shape_list = list(self.x_shape)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册