未验证 提交 b8713309 编写于 作者: C Charles-hit 提交者: GitHub

Add prim test for elementwise ops (#50807)

* fix prim_op_test when python api outs is different with kernel sig

* add elementwise op prim test

* fix unit test

* add bfloat16 for full in static  prim api

* empty-commit

* close bf16 test

* polish elementwise tests
上级 98ae15c0
...@@ -71,6 +71,9 @@ Tensor full<DescTensor>(const IntArray& shape, ...@@ -71,6 +71,9 @@ Tensor full<DescTensor>(const IntArray& shape,
case phi::DataType::FLOAT16: case phi::DataType::FLOAT16:
op->SetAttr("str_value", std::to_string(value.to<float>())); op->SetAttr("str_value", std::to_string(value.to<float>()));
break; break;
case phi::DataType::BFLOAT16:
op->SetAttr("str_value", std::to_string(value.to<float>()));
break;
case phi::DataType::FLOAT32: case phi::DataType::FLOAT32:
op->SetAttr("value", value.to<float>()); op->SetAttr("value", value.to<float>());
break; break;
...@@ -107,7 +110,8 @@ Tensor full<DescTensor>(const IntArray& shape, ...@@ -107,7 +110,8 @@ Tensor full<DescTensor>(const IntArray& shape,
default: default:
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"We support " "We support "
"bool/float16/float32/float64/int8/int16/int32/int64/uint8/uint16/" "bool/float16/bfloat16/float32/float64/int8/int16/int32/int64/uint8/"
"uint16/"
"uint32/uint64 for full, but we got data type: %s", "uint32/uint64 for full, but we got data type: %s",
phi::DataTypeToString(dtype))); phi::DataTypeToString(dtype)));
} }
......
...@@ -486,10 +486,6 @@ if(NOT WITH_GPU ...@@ -486,10 +486,6 @@ if(NOT WITH_GPU
list(REMOVE_ITEM TEST_OPS test_build_strategy_fusion_group_pass) list(REMOVE_ITEM TEST_OPS test_build_strategy_fusion_group_pass)
endif() endif()
if(NOT WITH_FLASHATTN)
list(REMOVE_ITEM TEST_OPS test_flash_attention)
endif()
# Some ops need to check results when gc is enabled # Some ops need to check results when gc is enabled
# Currently, only ops that register NoNeedBufferVarsInference need to do this test # Currently, only ops that register NoNeedBufferVarsInference need to do this test
set(TEST_OPS_WITH_GC set(TEST_OPS_WITH_GC
...@@ -1215,7 +1211,11 @@ set(TEST_CINN_OPS ...@@ -1215,7 +1211,11 @@ set(TEST_CINN_OPS
test_full_like_op test_full_like_op
test_fill_any_like_op test_fill_any_like_op
test_concat_op test_concat_op
test_top_k_v2_op) test_top_k_v2_op
test_elementwise_add_op
test_elementwise_sub_op
test_elementwise_div_op
test_elementwise_mul_op)
foreach(TEST_CINN_OPS ${TEST_CINN_OPS}) foreach(TEST_CINN_OPS ${TEST_CINN_OPS})
if(WITH_CINN) if(WITH_CINN)
......
...@@ -22,13 +22,6 @@ import paddle.fluid as fluid ...@@ -22,13 +22,6 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
def broadcast_wrapper(shape=[1, 10, 12, 1]):
def add_wrapper(x, y, axis=-1):
return x + y.reshape(shape)
return add_wrapper
class TestElementwiseAddOp(OpTest): class TestElementwiseAddOp(OpTest):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = False self.use_mkldnn = False
...@@ -36,10 +29,14 @@ class TestElementwiseAddOp(OpTest): ...@@ -36,10 +29,14 @@ class TestElementwiseAddOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_add" self.op_type = "elementwise_add"
self.python_api = paddle.add self.python_api = paddle.add
self.prim_op_type = "prim"
self.init_dtype() self.init_dtype()
self.init_input_output() self.init_input_output()
self.init_kernel_type() self.init_kernel_type()
self.init_axis() self.init_axis()
self.only_prim()
self.if_check_prim()
self.if_skip_cinn()
self.inputs = { self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x), 'X': OpTest.np_dtype_to_fluid_dtype(self.x),
...@@ -54,7 +51,7 @@ class TestElementwiseAddOp(OpTest): ...@@ -54,7 +51,7 @@ class TestElementwiseAddOp(OpTest):
def test_check_output(self): def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output( self.check_output(
check_dygraph=self.check_dygraph(), check_dygraph=self.check_dygraph(), check_prim=self.check_prim
) )
def test_check_grad_normal(self): def test_check_grad_normal(self):
...@@ -65,6 +62,7 @@ class TestElementwiseAddOp(OpTest): ...@@ -65,6 +62,7 @@ class TestElementwiseAddOp(OpTest):
['X', 'Y'], ['X', 'Y'],
'Out', 'Out',
check_dygraph=self.check_dygraph(), check_dygraph=self.check_dygraph(),
check_prim=self.check_prim,
) )
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
...@@ -76,6 +74,7 @@ class TestElementwiseAddOp(OpTest): ...@@ -76,6 +74,7 @@ class TestElementwiseAddOp(OpTest):
'Out', 'Out',
no_grad_set=set("X"), no_grad_set=set("X"),
check_dygraph=self.check_dygraph(), check_dygraph=self.check_dygraph(),
check_prim=self.check_prim,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -87,6 +86,7 @@ class TestElementwiseAddOp(OpTest): ...@@ -87,6 +86,7 @@ class TestElementwiseAddOp(OpTest):
'Out', 'Out',
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_dygraph=self.check_dygraph(), check_dygraph=self.check_dygraph(),
check_prim=self.check_prim,
) )
def init_input_output(self): def init_input_output(self):
...@@ -100,6 +100,15 @@ class TestElementwiseAddOp(OpTest): ...@@ -100,6 +100,15 @@ class TestElementwiseAddOp(OpTest):
def init_axis(self): def init_axis(self):
self.axis = -1 self.axis = -1
def only_prim(self):
pass
def if_check_prim(self):
self.check_prim = self.axis == -1
def if_skip_cinn(self):
pass
class TestElementwiseAddOp_ZeroDim1(TestElementwiseAddOp): class TestElementwiseAddOp_ZeroDim1(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
...@@ -107,15 +116,18 @@ class TestElementwiseAddOp_ZeroDim1(TestElementwiseAddOp): ...@@ -107,15 +116,18 @@ class TestElementwiseAddOp_ZeroDim1(TestElementwiseAddOp):
self.y = np.random.uniform(0.1, 1, []).astype(self.dtype) self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
self.out = np.add(self.x, self.y) self.out = np.add(self.x, self.y)
def if_skip_cinn(self):
self.enable_cinn = False
class TestElementwiseAddOp_ZeroDim2(TestElementwiseAddOp):
class TestElementwiseAddOp_ZeroDim2(TestElementwiseAddOp_ZeroDim1):
def init_input_output(self): def init_input_output(self):
self.x = np.random.uniform(0.1, 1, []).astype(self.dtype) self.x = np.random.uniform(0.1, 1, []).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.out = np.add(self.x, self.y) self.out = np.add(self.x, self.y)
class TestElementwiseAddOp_ZeroDim3(TestElementwiseAddOp): class TestElementwiseAddOp_ZeroDim3(TestElementwiseAddOp_ZeroDim1):
def init_input_output(self): def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, []).astype(self.dtype) self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
...@@ -137,8 +149,45 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp): ...@@ -137,8 +149,45 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
self.check_output_with_place( self.check_output_with_place(
place, place,
atol=1e-3, atol=1e-3,
check_dygraph=self.check_dygraph(),
check_prim=self.check_prim,
) )
def test_check_grad_normal(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place,
['X', 'Y'],
'Out',
check_dygraph=self.check_dygraph(),
check_prim=self.check_prim,
)
def test_check_grad_ingore_x(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place,
['Y'],
'Out',
no_grad_set=set("X"),
check_dygraph=self.check_dygraph(),
check_prim=self.check_prim,
)
def test_check_grad_ingore_y(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place,
['X'],
'Out',
no_grad_set=set('Y'),
check_dygraph=self.check_dygraph(),
check_prim=self.check_prim,
)
@unittest.skipIf( @unittest.skipIf(
not core.is_compiled_with_cuda() not core.is_compiled_with_cuda()
...@@ -150,6 +199,7 @@ class TestBF16ElementwiseAddOp(OpTest): ...@@ -150,6 +199,7 @@ class TestBF16ElementwiseAddOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_add" self.op_type = "elementwise_add"
self.python_api = paddle.add self.python_api = paddle.add
self.prim_op_type = "prim"
self.dtype = np.uint16 self.dtype = np.uint16
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
...@@ -168,6 +218,7 @@ class TestBF16ElementwiseAddOp(OpTest): ...@@ -168,6 +218,7 @@ class TestBF16ElementwiseAddOp(OpTest):
} }
self.attrs = {'axis': self.axis, 'use_mkldnn': False} self.attrs = {'axis': self.axis, 'use_mkldnn': False}
self.outputs = {'Out': convert_float_to_uint16(self.out)} self.outputs = {'Out': convert_float_to_uint16(self.out)}
self.if_skip_cinn()
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -175,15 +226,22 @@ class TestBF16ElementwiseAddOp(OpTest): ...@@ -175,15 +226,22 @@ class TestBF16ElementwiseAddOp(OpTest):
def test_check_grad_normal(self): def test_check_grad_normal(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out') self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_prim=True)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['Y'], 'Out', no_grad_set=set("X")) self.check_grad_with_place(
place, ['Y'], 'Out', no_grad_set=set("X"), check_prim=True
)
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', no_grad_set=set('Y')) self.check_grad_with_place(
place, ['X'], 'Out', no_grad_set=set('Y'), check_prim=True
)
def if_skip_cinn(self):
self.enable_cinn = False
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -205,6 +263,9 @@ class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp): ...@@ -205,6 +263,9 @@ class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
self.y = np.random.rand(1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y self.out = self.x + self.y
def only_prim(self):
self.only_prim = True
@skip_check_grad_ci( @skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1,1) to test broadcast." reason="[skip shape check] Use y_shape(1,1) to test broadcast."
...@@ -245,40 +306,67 @@ class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): ...@@ -245,40 +306,67 @@ class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1) self.out = self.x + self.y.reshape(100, 1, 1)
self.python_api = broadcast_wrapper(shape=[100, 1, 1]) self.python_api = paddle.add
def init_axis(self): def init_axis(self):
self.axis = 0 self.axis = 0
def if_check_prim(self):
self.check_prim = False
@skip_check_grad_ci(
reason="the numerical method is not accurate enough on fp16"
)
class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1) self.out = self.x + self.y.reshape(100, 1, 1)
self.python_api = broadcast_wrapper(shape=[100, 1, 1]) self.python_api = paddle.add
def init_axis(self): def init_axis(self):
self.axis = 0 self.axis = 0
# In paddle2.0 api we don't have axis parameter in add,
# so we can't check prim when axis is not -1 by default.
def if_check_prim(self):
self.check_prim = self.axis == -1
# Because the numerical method is not accurate enough on fp16,
# so we do not test the grad on fp16
def test_check_grad_normal(self):
pass
def test_check_grad_ingore_x(self):
pass
def test_check_grad_ingore_y(self):
pass
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.x = np.random.rand(2, 100, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 100, 1) self.out = self.x + self.y.reshape(1, 100, 1)
self.python_api = broadcast_wrapper(shape=[1, 100, 1]) self.python_api = paddle.add
def init_axis(self): def init_axis(self):
self.axis = 1 self.axis = 1
def if_check_prim(self):
self.check_prim = False
class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp):
class TestFP16ElementwiseAddOp_broadcast_1(
TestFP16ElementwiseAddOp_broadcast_0
):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.x = np.random.rand(2, 100, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 100, 1) self.out = self.x + self.y.reshape(1, 100, 1)
self.python_api = broadcast_wrapper(shape=[1, 100, 1]) self.python_api = paddle.add
def init_axis(self): def init_axis(self):
self.axis = 1 self.axis = 1
...@@ -289,15 +377,20 @@ class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): ...@@ -289,15 +377,20 @@ class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 100) self.out = self.x + self.y.reshape(1, 1, 100)
self.python_api = broadcast_wrapper(shape=[1, 1, 100]) self.python_api = paddle.add
class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_2(
TestFP16ElementwiseAddOp_broadcast_0
):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 100) self.out = self.x + self.y.reshape(1, 1, 100)
self.python_api = broadcast_wrapper(shape=[1, 1, 100]) self.python_api = paddle.add
def init_axis(self):
self.axis = -1
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
...@@ -305,18 +398,20 @@ class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): ...@@ -305,18 +398,20 @@ class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype) self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12, 1) self.out = self.x + self.y.reshape(1, 10, 12, 1)
self.python_api = broadcast_wrapper(shape=[1, 10, 12, 1]) self.python_api = paddle.add
def init_axis(self): def init_axis(self):
self.axis = 1 self.axis = 1
class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_3(
TestFP16ElementwiseAddOp_broadcast_0
):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype) self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12, 1) self.out = self.x + self.y.reshape(1, 10, 12, 1)
self.python_api = broadcast_wrapper(shape=[1, 10, 12, 1]) self.python_api = paddle.add
def init_axis(self): def init_axis(self):
self.axis = 1 self.axis = 1
...@@ -327,18 +422,20 @@ class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): ...@@ -327,18 +422,20 @@ class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype) self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
self.y = np.random.rand(100, 1).astype(self.dtype) self.y = np.random.rand(100, 1).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1, 1) self.out = self.x + self.y.reshape(100, 1, 1, 1)
self.python_api = broadcast_wrapper(shape=[100, 1, 1, 1]) self.python_api = paddle.add
def init_axis(self): def init_axis(self):
self.axis = 0 self.axis = 0
class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_4(
TestFP16ElementwiseAddOp_broadcast_0
):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype) self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
self.y = np.random.rand(100, 1).astype(self.dtype) self.y = np.random.rand(100, 1).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1, 1) self.out = self.x + self.y.reshape(100, 1, 1, 1)
self.python_api = broadcast_wrapper(shape=[100, 1, 1, 1]) self.python_api = paddle.add
def init_axis(self): def init_axis(self):
self.axis = 0 self.axis = 0
...@@ -351,7 +448,9 @@ class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp): ...@@ -351,7 +448,9 @@ class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
self.out = self.x + self.y self.out = self.x + self.y
class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_5(
TestFP16ElementwiseAddOp_broadcast_0
):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(10, 3, 12).astype(self.dtype) self.x = np.random.rand(10, 3, 12).astype(self.dtype)
self.y = np.random.rand(10, 1, 12).astype(self.dtype) self.y = np.random.rand(10, 1, 12).astype(self.dtype)
...@@ -372,7 +471,9 @@ class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp): ...@@ -372,7 +471,9 @@ class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
self.out = self.x + self.y self.out = self.x + self.y
class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_6(
TestFP16ElementwiseAddOp_broadcast_0
):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype) self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype) self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
...@@ -389,6 +490,9 @@ class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): ...@@ -389,6 +490,9 @@ class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
self.axis = 1 self.axis = 1
@skip_check_grad_ci(
reason="the numerical method is not accurate enough on fp16."
)
class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp): class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 10, 12).astype(self.dtype) self.x = np.random.rand(2, 10, 12).astype(self.dtype)
...@@ -398,32 +502,31 @@ class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp): ...@@ -398,32 +502,31 @@ class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
def init_axis(self): def init_axis(self):
self.axis = 1 self.axis = 1
# Because the numerical method is not accurate enough on fp16,
# so we do not test the grad on fp16
def test_check_grad_normal(self):
pass
def test_check_grad_ingore_x(self):
pass
def test_check_grad_ingore_y(self):
pass
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast."
)
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(100, 1).astype(self.dtype) self.x = np.random.rand(100, 1).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1) self.out = self.x + self.y.reshape(1, 1)
def init_axis(self):
self.axis = 1
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast."
)
class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp): class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(100, 1).astype(self.dtype) self.x = np.random.rand(100, 1).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1) self.out = self.x + self.y.reshape(1, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
......
...@@ -33,9 +33,12 @@ class ElementwiseDivOp(OpTest): ...@@ -33,9 +33,12 @@ class ElementwiseDivOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_div" self.op_type = "elementwise_div"
self.python_api = paddle.divide self.python_api = paddle.divide
self.prim_op_type = "prim"
self.init_args() self.init_args()
self.init_dtype() self.init_dtype()
self.init_shape() self.init_shape()
self.if_check_prim()
self.if_skip_cinn()
x = self.gen_data(self.x_shape).astype(self.val_dtype) x = self.gen_data(self.x_shape).astype(self.val_dtype)
y = self.gen_data(self.y_shape).astype(self.val_dtype) y = self.gen_data(self.y_shape).astype(self.val_dtype)
...@@ -61,6 +64,9 @@ class ElementwiseDivOp(OpTest): ...@@ -61,6 +64,9 @@ class ElementwiseDivOp(OpTest):
self.grad_x = grad_x self.grad_x = grad_x
self.grad_y = grad_y self.grad_y = grad_y
def if_skip_cinn(self):
self.enable_cinn = False
def init_args(self): def init_args(self):
self.check_dygraph = True self.check_dygraph = True
self.place = None self.place = None
...@@ -73,6 +79,9 @@ class ElementwiseDivOp(OpTest): ...@@ -73,6 +79,9 @@ class ElementwiseDivOp(OpTest):
self.x_shape = [13, 17] self.x_shape = [13, 17]
self.y_shape = [13, 17] self.y_shape = [13, 17]
def if_check_prim(self):
self.check_prim = True
def gen_data(self, shape): def gen_data(self, shape):
return np.random.uniform(0.1, 1, shape) return np.random.uniform(0.1, 1, shape)
...@@ -113,6 +122,7 @@ class ElementwiseDivOp(OpTest): ...@@ -113,6 +122,7 @@ class ElementwiseDivOp(OpTest):
'user_defined_grads': check_option['val_grad'], 'user_defined_grads': check_option['val_grad'],
'user_defined_grad_outputs': [self.grad_out], 'user_defined_grad_outputs': [self.grad_out],
'check_dygraph': self.check_dygraph, 'check_dygraph': self.check_dygraph,
'check_prim': self.check_prim,
} }
if self.place is None: if self.place is None:
self.check_grad(*check_args, **check_kwargs) self.check_grad(*check_args, **check_kwargs)
...@@ -121,6 +131,15 @@ class ElementwiseDivOp(OpTest): ...@@ -121,6 +131,15 @@ class ElementwiseDivOp(OpTest):
self.check_grad_with_place(*check_args, **check_kwargs) self.check_grad_with_place(*check_args, **check_kwargs)
class TestElementwiseDivPrimOpFp32(ElementwiseDivOp):
def init_dtype(self):
self.dtype = np.float32
self.val_dtype = np.float32
def if_skip_cinn(self):
pass
class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp): class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp):
def init_shape(self): def init_shape(self):
self.x_shape = [] self.x_shape = []
...@@ -176,6 +195,10 @@ class TestElementwiseDivOpBF16(ElementwiseDivOp): ...@@ -176,6 +195,10 @@ class TestElementwiseDivOpBF16(ElementwiseDivOp):
self.x_shape = [12, 13] self.x_shape = [12, 13]
self.y_shape = [12, 13] self.y_shape = [12, 13]
# elementwise_pow does't support bfloat16
def if_check_prim(self):
self.check_prim = False
@skip_check_grad_ci( @skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast." reason="[skip shape check] Use y_shape(1) to test broadcast."
...@@ -195,7 +218,38 @@ class TestElementwiseDivOpVector(ElementwiseDivOp): ...@@ -195,7 +218,38 @@ class TestElementwiseDivOpVector(ElementwiseDivOp):
self.y_shape = [100] self.y_shape = [100]
class TestElementwiseDivOpBroadcast0(ElementwiseDivOp): class TestElementwiseDivOpNoPrim(ElementwiseDivOp):
def test_check_gradient(self):
check_list = []
check_list.append(
{
'grad': ['X', 'Y'],
'no_grad': None,
'val_grad': [self.grad_x, self.grad_y],
}
)
check_list.append(
{'grad': ['Y'], 'no_grad': set('X'), 'val_grad': [self.grad_y]}
)
check_list.append(
{'grad': ['X'], 'no_grad': set('Y'), 'val_grad': [self.grad_x]}
)
for check_option in check_list:
check_args = [check_option['grad'], 'Out']
check_kwargs = {
'no_grad_set': check_option['no_grad'],
'user_defined_grads': check_option['val_grad'],
'user_defined_grad_outputs': [self.grad_out],
'check_dygraph': self.check_dygraph,
}
if self.place is None:
self.check_grad(*check_args, **check_kwargs)
else:
check_args.insert(0, self.place)
self.check_grad_with_place(*check_args, **check_kwargs)
class TestElementwiseDivOpBroadcast0(TestElementwiseDivOpNoPrim):
def init_shape(self): def init_shape(self):
self.x_shape = [100, 3, 4] self.x_shape = [100, 3, 4]
self.y_shape = [100] self.y_shape = [100]
...@@ -212,7 +266,7 @@ class TestElementwiseDivOpBroadcast0(ElementwiseDivOp): ...@@ -212,7 +266,7 @@ class TestElementwiseDivOpBroadcast0(ElementwiseDivOp):
return np.sum(-1 * grad_out * out / y.reshape(100, 1, 1), axis=(1, 2)) return np.sum(-1 * grad_out * out / y.reshape(100, 1, 1), axis=(1, 2))
class TestElementwiseDivOpBroadcast1(ElementwiseDivOp): class TestElementwiseDivOpBroadcast1(TestElementwiseDivOpNoPrim):
def init_shape(self): def init_shape(self):
self.x_shape = [2, 100, 4] self.x_shape = [2, 100, 4]
self.y_shape = [100] self.y_shape = [100]
...@@ -229,7 +283,7 @@ class TestElementwiseDivOpBroadcast1(ElementwiseDivOp): ...@@ -229,7 +283,7 @@ class TestElementwiseDivOpBroadcast1(ElementwiseDivOp):
return np.sum(-1 * grad_out * out / y.reshape(1, 100, 1), axis=(0, 2)) return np.sum(-1 * grad_out * out / y.reshape(1, 100, 1), axis=(0, 2))
class TestElementwiseDivOpBroadcast2(ElementwiseDivOp): class TestElementwiseDivOpBroadcast2(TestElementwiseDivOpNoPrim):
def init_shape(self): def init_shape(self):
self.x_shape = [2, 3, 100] self.x_shape = [2, 3, 100]
self.y_shape = [100] self.y_shape = [100]
...@@ -245,7 +299,7 @@ class TestElementwiseDivOpBroadcast2(ElementwiseDivOp): ...@@ -245,7 +299,7 @@ class TestElementwiseDivOpBroadcast2(ElementwiseDivOp):
return np.sum(-1 * grad_out * out / y.reshape(1, 1, 100), axis=(0, 1)) return np.sum(-1 * grad_out * out / y.reshape(1, 1, 100), axis=(0, 1))
class TestElementwiseDivOpBroadcast3(ElementwiseDivOp): class TestElementwiseDivOpBroadcast3(TestElementwiseDivOpNoPrim):
def init_shape(self): def init_shape(self):
self.x_shape = [2, 10, 12, 5] self.x_shape = [2, 10, 12, 5]
self.y_shape = [10, 12] self.y_shape = [10, 12]
...@@ -313,7 +367,7 @@ class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp): ...@@ -313,7 +367,7 @@ class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp):
return np.sum(grad_out / y, axis=(0, 1)) return np.sum(grad_out / y, axis=(0, 1))
class TestElementwiseDivOpInt(ElementwiseDivOp): class TestElementwiseDivOpInt(TestElementwiseDivOpNoPrim):
def init_dtype(self): def init_dtype(self):
self.dtype = np.int32 self.dtype = np.int32
self.val_dtype = np.int32 self.val_dtype = np.int32
...@@ -333,6 +387,9 @@ class TestElementwiseDivOpFp16(ElementwiseDivOp): ...@@ -333,6 +387,9 @@ class TestElementwiseDivOpFp16(ElementwiseDivOp):
self.dtype = np.float16 self.dtype = np.float16
self.val_dtype = np.float16 self.val_dtype = np.float16
def if_skip_cinn(self):
self.enable_cinn = False
class TestElementwiseDivBroadcast(unittest.TestCase): class TestElementwiseDivBroadcast(unittest.TestCase):
def test_shape_with_batch_sizes(self): def test_shape_with_batch_sizes(self):
......
...@@ -21,33 +21,21 @@ import paddle ...@@ -21,33 +21,21 @@ import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
def mul(x, y, axis=-1, use_mkldnn=False):
return x * y
setattr(paddle, "mul", mul)
def broadcast_wrapper(shape=[1, 10, 12, 1]):
def mul_wrapper(x, y, axis=-1):
return x * y.reshape(shape)
return mul_wrapper
class ElementwiseMulOp(OpTest): class ElementwiseMulOp(OpTest):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = False self.use_mkldnn = False
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.dtype = np.float64 self.dtype = np.float64
self.axis = -1 self.axis = -1
self.init_dtype() self.init_dtype()
self.init_input_output() self.init_input_output()
self.init_kernel_type() self.init_kernel_type()
self.init_axis() self.init_axis()
self.if_skip_cinn()
self.inputs = { self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x), 'X': OpTest.np_dtype_to_fluid_dtype(self.x),
...@@ -62,7 +50,12 @@ class ElementwiseMulOp(OpTest): ...@@ -62,7 +50,12 @@ class ElementwiseMulOp(OpTest):
def test_check_grad_normal(self): def test_check_grad_normal(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_grad(['X', 'Y'], 'Out', check_dygraph=(not self.use_mkldnn)) self.check_grad(
['X', 'Y'],
'Out',
check_dygraph=(not self.use_mkldnn),
check_prim=True,
)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
...@@ -71,6 +64,7 @@ class ElementwiseMulOp(OpTest): ...@@ -71,6 +64,7 @@ class ElementwiseMulOp(OpTest):
'Out', 'Out',
no_grad_set=set("X"), no_grad_set=set("X"),
check_dygraph=(not self.use_mkldnn), check_dygraph=(not self.use_mkldnn),
check_prim=True,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -80,6 +74,7 @@ class ElementwiseMulOp(OpTest): ...@@ -80,6 +74,7 @@ class ElementwiseMulOp(OpTest):
'Out', 'Out',
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_dygraph=(not self.use_mkldnn), check_dygraph=(not self.use_mkldnn),
check_prim=True,
) )
def init_input_output(self): def init_input_output(self):
...@@ -93,6 +88,9 @@ class ElementwiseMulOp(OpTest): ...@@ -93,6 +88,9 @@ class ElementwiseMulOp(OpTest):
def init_axis(self): def init_axis(self):
pass pass
def if_skip_cinn(self):
pass
class TestElementwiseMulOp_ZeroDim1(ElementwiseMulOp): class TestElementwiseMulOp_ZeroDim1(ElementwiseMulOp):
def init_input_output(self): def init_input_output(self):
...@@ -100,6 +98,9 @@ class TestElementwiseMulOp_ZeroDim1(ElementwiseMulOp): ...@@ -100,6 +98,9 @@ class TestElementwiseMulOp_ZeroDim1(ElementwiseMulOp):
self.y = np.random.uniform(0.1, 1, []).astype(self.dtype) self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
self.out = np.multiply(self.x, self.y) self.out = np.multiply(self.x, self.y)
def if_skip_cinn(self):
self.enable_cinn = False
class TestElementwiseMulOp_ZeroDim2(ElementwiseMulOp): class TestElementwiseMulOp_ZeroDim2(ElementwiseMulOp):
def init_input_output(self): def init_input_output(self):
...@@ -107,6 +108,9 @@ class TestElementwiseMulOp_ZeroDim2(ElementwiseMulOp): ...@@ -107,6 +108,9 @@ class TestElementwiseMulOp_ZeroDim2(ElementwiseMulOp):
self.y = np.random.uniform(0.1, 1, []).astype(self.dtype) self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
self.out = np.multiply(self.x, self.y) self.out = np.multiply(self.x, self.y)
def if_skip_cinn(self):
self.enable_cinn = False
class TestElementwiseMulOp_ZeroDim3(ElementwiseMulOp): class TestElementwiseMulOp_ZeroDim3(ElementwiseMulOp):
def init_input_output(self): def init_input_output(self):
...@@ -114,11 +118,15 @@ class TestElementwiseMulOp_ZeroDim3(ElementwiseMulOp): ...@@ -114,11 +118,15 @@ class TestElementwiseMulOp_ZeroDim3(ElementwiseMulOp):
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.out = np.multiply(self.x, self.y) self.out = np.multiply(self.x, self.y)
def if_skip_cinn(self):
self.enable_cinn = False
class TestBF16ElementwiseMulOp(OpTest): class TestBF16ElementwiseMulOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.dtype = np.uint16 self.dtype = np.uint16
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
...@@ -137,18 +145,22 @@ class TestBF16ElementwiseMulOp(OpTest): ...@@ -137,18 +145,22 @@ class TestBF16ElementwiseMulOp(OpTest):
} }
self.outputs = {'Out': convert_float_to_uint16(self.out)} self.outputs = {'Out': convert_float_to_uint16(self.out)}
self.attrs = {'axis': self.axis, 'use_mkldnn': False} self.attrs = {'axis': self.axis, 'use_mkldnn': False}
self.if_skip_cinn()
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out', check_prim=True)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad(['Y'], 'Out', no_grad_set=set("X")) self.check_grad(['Y'], 'Out', no_grad_set=set("X"), check_prim=True)
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y')) self.check_grad(['X'], 'Out', no_grad_set=set('Y'), check_prim=True)
def if_skip_cinn(self):
self.enable_cinn = False
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -157,7 +169,8 @@ class TestBF16ElementwiseMulOp(OpTest): ...@@ -157,7 +169,8 @@ class TestBF16ElementwiseMulOp(OpTest):
class TestElementwiseMulOp_scalar(ElementwiseMulOp): class TestElementwiseMulOp_scalar(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.inputs = { self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float64), 'X': np.random.rand(10, 3, 4).astype(np.float64),
'Y': np.random.rand(1).astype(np.float64), 'Y': np.random.rand(1).astype(np.float64),
...@@ -169,7 +182,8 @@ class TestElementwiseMulOp_scalar(ElementwiseMulOp): ...@@ -169,7 +182,8 @@ class TestElementwiseMulOp_scalar(ElementwiseMulOp):
class TestElementwiseMulOp_Vector(ElementwiseMulOp): class TestElementwiseMulOp_Vector(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.inputs = { self.inputs = {
'X': np.random.random((100,)).astype("float64"), 'X': np.random.random((100,)).astype("float64"),
'Y': np.random.random((100,)).astype("float64"), 'Y': np.random.random((100,)).astype("float64"),
...@@ -178,86 +192,154 @@ class TestElementwiseMulOp_Vector(ElementwiseMulOp): ...@@ -178,86 +192,154 @@ class TestElementwiseMulOp_Vector(ElementwiseMulOp):
self.init_kernel_type() self.init_kernel_type()
class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): class ElementwiseMulOp_broadcast(OpTest):
def init_input_output(self): def init_kernel_type(self):
self.use_mkldnn = False
def setUp(self):
self.op_type = "elementwise_mul"
self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.init_dtype()
self.init_kernel_type()
self.init_axis()
self.init_input_attr_output()
self.if_check_prim()
self.if_check_dygraph()
def test_check_output(self):
self.check_output(
check_dygraph=self.check_dygraph, check_prim=self.check_prim
)
def test_check_grad_normal(self):
self.check_grad(
['X', 'Y'],
'Out',
check_dygraph=self.check_dygraph,
check_prim=self.check_prim,
)
def test_check_grad_ingore_x(self):
self.check_grad(
['Y'],
'Out',
no_grad_set=set("X"),
check_dygraph=self.check_dygraph,
check_prim=self.check_prim,
)
def test_check_grad_ingore_y(self):
self.check_grad(
['X'],
'Out',
no_grad_set=set('Y'),
check_dygraph=self.check_dygraph,
check_prim=self.check_prim,
)
def init_input_attr_output(self):
self.x = np.random.uniform(0.1, 1, [13, 17, 1]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [17, 17]).astype(self.dtype)
self.out = np.multiply(self.x, self.y)
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
}
self.outputs = {'Out': self.out}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
def init_dtype(self):
self.dtype = np.float64
def init_axis(self):
self.axis = -1
def if_check_prim(self):
self.check_prim = self.axis == -1
def if_check_dygraph(self):
self.check_dygraph = (not self.use_mkldnn) and (self.axis == -1)
class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp_broadcast):
def init_input_attr_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.python_api = broadcast_wrapper(shape=[100, 1, 1])
self.out = self.x * self.y.reshape(100, 1, 1) self.out = self.x * self.y.reshape(100, 1, 1)
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
}
self.outputs = {'Out': self.out}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
def init_axis(self): def init_axis(self):
self.axis = 0 self.axis = 0
class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp_broadcast):
def setUp(self): def init_input_attr_output(self):
self.op_type = "elementwise_mul"
self.python_api = broadcast_wrapper(shape=[1, 100, 1])
self.inputs = { self.inputs = {
'X': np.random.rand(2, 100, 3).astype(np.float64), 'X': np.random.rand(2, 100, 3).astype(np.float64),
'Y': np.random.rand(100).astype(np.float64), 'Y': np.random.rand(100).astype(np.float64),
} }
self.attrs = {'axis': 1} self.attrs = {'axis': self.axis}
self.outputs = { self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1) 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1)
} }
self.init_kernel_type()
def init_axis(self):
self.axis = 1
class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp_broadcast):
def setUp(self): def init_input_attr_output(self):
self.op_type = "elementwise_mul"
self.python_api = broadcast_wrapper(shape=[1, 1, 100])
self.inputs = { self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float64), 'X': np.random.rand(2, 3, 100).astype(np.float64),
'Y': np.random.rand(100).astype(np.float64), 'Y': np.random.rand(100).astype(np.float64),
} }
self.attrs = {'axis': self.axis}
self.outputs = { self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100) 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100)
} }
self.init_kernel_type()
class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp_broadcast):
def setUp(self): def init_input_attr_output(self):
self.op_type = "elementwise_mul"
self.python_api = broadcast_wrapper(shape=[1, 10, 12, 1])
self.inputs = { self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(np.float64), 'X': np.random.rand(2, 10, 12, 3).astype(np.float64),
'Y': np.random.rand(10, 12).astype(np.float64), 'Y': np.random.rand(10, 12).astype(np.float64),
} }
self.attrs = {'axis': 1} self.attrs = {'axis': self.axis}
self.outputs = { self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1) 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1)
} }
self.init_kernel_type()
def init_axis(self):
self.axis = 1
class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp_broadcast):
def setUp(self): def init_input_attr_output(self):
self.op_type = "elementwise_mul"
self.python_api = paddle.mul
self.inputs = { self.inputs = {
'X': np.random.rand(10, 2, 11).astype(np.float64), 'X': np.random.rand(10, 2, 11).astype(np.float64),
'Y': np.random.rand(10, 1, 11).astype(np.float64), 'Y': np.random.rand(10, 1, 11).astype(np.float64),
} }
self.attrs = {'axis': self.axis}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp_broadcast):
def setUp(self): def init_input_attr_output(self):
self.op_type = "elementwise_mul"
self.python_api = paddle.mul
self.inputs = { self.inputs = {
'X': np.random.rand(10, 4, 2, 3).astype(np.float64), 'X': np.random.rand(10, 4, 2, 3).astype(np.float64),
'Y': np.random.rand(10, 4, 1, 3).astype(np.float64), 'Y': np.random.rand(10, 4, 1, 3).astype(np.float64),
} }
self.attrs = {'axis': self.axis}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf( @unittest.skipIf(
...@@ -267,11 +349,15 @@ class TestElementwiseMulOpFp16(ElementwiseMulOp): ...@@ -267,11 +349,15 @@ class TestElementwiseMulOpFp16(ElementwiseMulOp):
def init_dtype(self): def init_dtype(self):
self.dtype = np.float16 self.dtype = np.float16
def if_skip_cinn(self):
self.enable_cinn = False
class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.inputs = { self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float64), 'X': np.random.rand(2, 3, 100).astype(np.float64),
'Y': np.random.rand(1, 1, 100).astype(np.float64), 'Y': np.random.rand(1, 1, 100).astype(np.float64),
...@@ -283,7 +369,8 @@ class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): ...@@ -283,7 +369,8 @@ class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp): class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.inputs = { self.inputs = {
'X': np.random.rand(30, 3, 1, 5).astype(np.float64), 'X': np.random.rand(30, 3, 1, 5).astype(np.float64),
'Y': np.random.rand(30, 1, 4, 1).astype(np.float64), 'Y': np.random.rand(30, 1, 4, 1).astype(np.float64),
...@@ -295,7 +382,8 @@ class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp): ...@@ -295,7 +382,8 @@ class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.inputs = { self.inputs = {
'X': np.random.rand(10, 10).astype(np.float64), 'X': np.random.rand(10, 10).astype(np.float64),
'Y': np.random.rand(2, 2, 10, 10).astype(np.float64), 'Y': np.random.rand(2, 2, 10, 10).astype(np.float64),
...@@ -312,7 +400,7 @@ class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): ...@@ -312,7 +400,7 @@ class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
class TestComplexElementwiseMulOp(OpTest): class TestComplexElementwiseMulOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mul" self.op_type = "elementwise_mul"
self.python_api = paddle.mul self.python_api = paddle.multiply
self.init_base_dtype() self.init_base_dtype()
self.init_input_output() self.init_input_output()
self.init_grad_input_output() self.init_grad_input_output()
......
...@@ -21,80 +21,115 @@ import paddle ...@@ -21,80 +21,115 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
def sub_wrapper(shape=None):
def inner_wrapper(x, y, axis=-1):
if shape is None:
return x - y
else:
return x - y.reshape(shape)
return inner_wrapper
class TestElementwiseOp(OpTest): class TestElementwiseOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper() self.python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"), 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
self.if_check_prim()
self.if_skip_cinn()
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out', check_prim=self.check_prim)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X") ['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"),
check_prim=self.check_prim,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y') ['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'),
check_prim=self.check_prim,
) )
def if_check_prim(self):
self.check_prim = True
def if_skip_cinn(self):
pass
class TestElementwiseSubOp_ZeroDim1(TestElementwiseOp): class TestElementwiseSubOp_ZeroDim1(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper() self.python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, []).astype("float64"), 'X': np.random.uniform(0.1, 1, []).astype("float64"),
'Y': np.random.uniform(0.1, 1, []).astype("float64"), 'Y': np.random.uniform(0.1, 1, []).astype("float64"),
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
self.if_check_prim()
self.if_skip_cinn()
def if_check_prim(self):
self.check_prim = True
def if_skip_cinn(self):
self.enable_cinn = False
class TestElementwiseSubOp_ZeroDim2(TestElementwiseOp): class TestElementwiseSubOp_ZeroDim2(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper() self.python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"), 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
'Y': np.random.uniform(0.1, 1, []).astype("float64"), 'Y': np.random.uniform(0.1, 1, []).astype("float64"),
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
self.if_check_prim()
self.if_skip_cinn()
def if_check_prim(self):
self.check_prim = True
def if_skip_cinn(self):
self.enable_cinn = False
class TestElementwiseSubOp_ZeroDim3(TestElementwiseOp): class TestElementwiseSubOp_ZeroDim3(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper() self.python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, []).astype("float64"), 'X': np.random.uniform(0.1, 1, []).astype("float64"),
'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
self.if_check_prim()
self.if_skip_cinn()
def if_check_prim(self):
self.check_prim = True
def if_skip_cinn(self):
self.enable_cinn = False
class TestBF16ElementwiseOp(OpTest): class TestBF16ElementwiseOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper() self.python_api = paddle.subtract
self.prim_op_type = "prim"
self.dtype = np.uint16 self.dtype = np.uint16
x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
...@@ -105,18 +140,30 @@ class TestBF16ElementwiseOp(OpTest): ...@@ -105,18 +140,30 @@ class TestBF16ElementwiseOp(OpTest):
'Y': convert_float_to_uint16(y), 'Y': convert_float_to_uint16(y),
} }
self.outputs = {'Out': convert_float_to_uint16(out)} self.outputs = {'Out': convert_float_to_uint16(out)}
self.if_check_prim()
self.if_skip_cinn()
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out', check_prim=self.check_prim)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad(['Y'], 'Out', no_grad_set=set("X")) self.check_grad(
['Y'], 'Out', no_grad_set=set("X"), check_prim=self.check_prim
)
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y')) self.check_grad(
['X'], 'Out', no_grad_set=set('Y'), check_prim=self.check_prim
)
def if_check_prim(self):
self.check_prim = True
def if_skip_cinn(self):
self.enable_cinn = False
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -125,29 +172,33 @@ class TestBF16ElementwiseOp(OpTest): ...@@ -125,29 +172,33 @@ class TestBF16ElementwiseOp(OpTest):
class TestElementwiseSubOp_scalar(TestElementwiseOp): class TestElementwiseSubOp_scalar(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper() self.python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float64), 'X': np.random.rand(10, 3, 4).astype(np.float64),
'Y': np.random.rand(1).astype(np.float64), 'Y': np.random.rand(1).astype(np.float64),
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
self.if_check_prim()
class TestElementwiseSubOp_Vector(TestElementwiseOp): class TestElementwiseSubOp_Vector(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper() self.python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
'X': np.random.random((100,)).astype("float64"), 'X': np.random.random((100,)).astype("float64"),
'Y': np.random.random((100,)).astype("float64"), 'Y': np.random.random((100,)).astype("float64"),
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
self.if_check_prim()
class TestElementwiseSubOp_broadcast_0(TestElementwiseOp): class TestElementwiseSubOp_broadcast_O(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper(shape=[100, 1, 1]) self.python_api = paddle.subtract
self.inputs = { self.inputs = {
'X': np.random.rand(100, 3, 2).astype(np.float64), 'X': np.random.rand(100, 3, 2).astype(np.float64),
'Y': np.random.rand(100).astype(np.float64), 'Y': np.random.rand(100).astype(np.float64),
...@@ -158,11 +209,35 @@ class TestElementwiseSubOp_broadcast_0(TestElementwiseOp): ...@@ -158,11 +209,35 @@ class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1) 'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1)
} }
def test_check_output(self):
self.check_output(check_dygraph=False)
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
class TestElementwiseSubOp_broadcast_1(TestElementwiseOp): def test_check_grad_ingore_x(self):
self.check_grad(
['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"),
check_dygraph=False,
)
def test_check_grad_ingore_y(self):
self.check_grad(
['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'),
check_dygraph=False,
)
class TestElementwiseSubOp_broadcast_1(TestElementwiseSubOp_broadcast_O):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper(shape=[1, 100, 1]) self.python_api = paddle.subtract
self.inputs = { self.inputs = {
'X': np.random.rand(2, 100, 3).astype(np.float64), 'X': np.random.rand(2, 100, 3).astype(np.float64),
'Y': np.random.rand(100).astype(np.float64), 'Y': np.random.rand(100).astype(np.float64),
...@@ -177,7 +252,8 @@ class TestElementwiseSubOp_broadcast_1(TestElementwiseOp): ...@@ -177,7 +252,8 @@ class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
class TestElementwiseSubOp_broadcast_2(TestElementwiseOp): class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper(shape=[1, 1, 100]) self.python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float64), 'X': np.random.rand(2, 3, 100).astype(np.float64),
'Y': np.random.rand(100).astype(np.float64), 'Y': np.random.rand(100).astype(np.float64),
...@@ -186,12 +262,16 @@ class TestElementwiseSubOp_broadcast_2(TestElementwiseOp): ...@@ -186,12 +262,16 @@ class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
self.outputs = { self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100) 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100)
} }
self.if_check_prim()
def if_check_prim(self):
self.check_prim = True
class TestElementwiseSubOp_broadcast_3(TestElementwiseOp): class TestElementwiseSubOp_broadcast_3(TestElementwiseSubOp_broadcast_O):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper(shape=[1, 10, 12, 1]) self.python_api = paddle.subtract
self.inputs = { self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(np.float64), 'X': np.random.rand(2, 10, 12, 3).astype(np.float64),
'Y': np.random.rand(10, 12).astype(np.float64), 'Y': np.random.rand(10, 12).astype(np.float64),
...@@ -206,60 +286,76 @@ class TestElementwiseSubOp_broadcast_3(TestElementwiseOp): ...@@ -206,60 +286,76 @@ class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
class TestElementwiseSubOp_broadcast_4(TestElementwiseOp): class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper() self.python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
'X': np.random.rand(2, 5, 3, 12).astype(np.float64), 'X': np.random.rand(2, 5, 3, 12).astype(np.float64),
'Y': np.random.rand(2, 5, 1, 12).astype(np.float64), 'Y': np.random.rand(2, 5, 1, 12).astype(np.float64),
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
self.if_check_prim()
def if_check_prim(self):
self.check_prim = True
class TestElementwiseSubOp_commonuse_1(TestElementwiseOp): class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper() self.python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float64), 'X': np.random.rand(2, 3, 100).astype(np.float64),
'Y': np.random.rand(1, 1, 100).astype(np.float64), 'Y': np.random.rand(1, 1, 100).astype(np.float64),
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
self.if_check_prim()
def if_check_prim(self):
self.check_prim = True
class TestElementwiseSubOp_commonuse_2(TestElementwiseOp): class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper() self.python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
'X': np.random.rand(10, 3, 1, 4).astype(np.float64), 'X': np.random.rand(10, 3, 1, 4).astype(np.float64),
'Y': np.random.rand(10, 1, 12, 1).astype(np.float64), 'Y': np.random.rand(10, 1, 12, 1).astype(np.float64),
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
self.if_check_prim()
def if_check_prim(self):
self.check_prim = True
class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp): class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
def sub_func(x, y, axis=2): self.prim_op_type = "prim"
return x.reshape([1, 1, 10, 12]) - y
self.python_api = sub_func
self.inputs = { self.inputs = {
'X': np.random.rand(10, 12).astype(np.float64), 'X': np.random.rand(10, 12).astype(np.float64),
'Y': np.random.rand(2, 3, 10, 12).astype(np.float64), 'Y': np.random.rand(2, 3, 10, 12).astype(np.float64),
} }
self.attrs = {'axis': 2} self.attrs = {'axis': 2}
self.outputs = { self.outputs = {
'Out': self.inputs['X'].reshape(1, 1, 10, 12) - self.inputs['Y'] 'Out': self.inputs['X'].reshape(1, 1, 10, 12) - self.inputs['Y']
} }
self.if_check_prim()
def if_check_prim(self):
self.check_prim = True
class TestComplexElementwiseSubOp(OpTest): class TestComplexElementwiseSubOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.python_api = sub_wrapper() self.python_api = paddle.subtract
self.prim_op_type = "prim"
self.dtype = np.float64 self.dtype = np.float64
self.shape = (2, 3, 4, 5) self.shape = (2, 3, 4, 5)
self.init_input_output() self.init_input_output()
...@@ -271,6 +367,8 @@ class TestComplexElementwiseSubOp(OpTest): ...@@ -271,6 +367,8 @@ class TestComplexElementwiseSubOp(OpTest):
} }
self.attrs = {'axis': -1, 'use_mkldnn': False} self.attrs = {'axis': -1, 'use_mkldnn': False}
self.outputs = {'Out': self.out} self.outputs = {'Out': self.out}
self.if_check_prim()
self.if_skip_cinn()
def init_base_dtype(self): def init_base_dtype(self):
self.dtype = np.float64 self.dtype = np.float64
...@@ -300,6 +398,7 @@ class TestComplexElementwiseSubOp(OpTest): ...@@ -300,6 +398,7 @@ class TestComplexElementwiseSubOp(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_x, self.grad_y], user_defined_grads=[self.grad_x, self.grad_y],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_prim=self.check_prim,
) )
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
...@@ -309,6 +408,7 @@ class TestComplexElementwiseSubOp(OpTest): ...@@ -309,6 +408,7 @@ class TestComplexElementwiseSubOp(OpTest):
no_grad_set=set("X"), no_grad_set=set("X"),
user_defined_grads=[self.grad_y], user_defined_grads=[self.grad_y],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_prim=self.check_prim,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -318,8 +418,15 @@ class TestComplexElementwiseSubOp(OpTest): ...@@ -318,8 +418,15 @@ class TestComplexElementwiseSubOp(OpTest):
no_grad_set=set('Y'), no_grad_set=set('Y'),
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_prim=self.check_prim,
) )
def if_skip_cinn(self):
self.enable_cinn = False
def if_check_prim(self):
self.check_prim = True
class TestRealComplexElementwiseSubOp(TestComplexElementwiseSubOp): class TestRealComplexElementwiseSubOp(TestComplexElementwiseSubOp):
def init_input_output(self): def init_input_output(self):
...@@ -336,6 +443,12 @@ class TestRealComplexElementwiseSubOp(TestComplexElementwiseSubOp): ...@@ -336,6 +443,12 @@ class TestRealComplexElementwiseSubOp(TestComplexElementwiseSubOp):
self.grad_x = np.real(self.grad_out) self.grad_x = np.real(self.grad_out)
self.grad_y = -self.grad_out self.grad_y = -self.grad_out
def if_skip_cinn(self):
self.enable_cinn = False
def if_check_prim(self):
self.check_prim = False
class TestSubtractApi(unittest.TestCase): class TestSubtractApi(unittest.TestCase):
def _executed_api(self, x, y, name=None): def _executed_api(self, x, y, name=None):
......
...@@ -490,16 +490,36 @@ def _elementwise_op(helper): ...@@ -490,16 +490,36 @@ def _elementwise_op(helper):
assert x is not None, 'x cannot be None in {}'.format(original_op_type) assert x is not None, 'x cannot be None in {}'.format(original_op_type)
assert y is not None, 'y cannot be None in {}'.format(original_op_type) assert y is not None, 'y cannot be None in {}'.format(original_op_type)
bf16_and_complex_supported_ops = [
"elementwise_add",
"elementwise_sub",
"elementwise_mul",
"elementwise_div",
]
if original_op_type in bf16_and_complex_supported_ops:
data_type = [
'uint16',
'float16',
'float32',
'float64',
'int32',
'int64',
'bool',
'complex64',
'complex128',
]
else:
data_type = ['float16', 'float32', 'float64', 'int32', 'int64', 'bool']
check_variable_and_dtype( check_variable_and_dtype(
x, x,
'x', 'x',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], data_type,
original_op_type, original_op_type,
) )
check_variable_and_dtype( check_variable_and_dtype(
y, y,
'y', 'y',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], data_type,
original_op_type, original_op_type,
) )
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册