未验证 提交 5cbf5bd4 编写于 作者: iSerendipity's avatar iSerendipity 提交者: GitHub

【complex op】No.6 add complex support for logical_and/or/xor/not (#56323)

* 【complex op】No.6 add complex support for logical_and/or/xor/not

* fix dtype check

* modify the docs

* add special condition for not raise when x.dtype is complex

* add random generate for complex dtype

* fix generate for complex

* fix

* fix

* add corner case for complex type

* fix ut

* fix ut
上级 fc1e505e
......@@ -77,6 +77,8 @@ void LogicalNotKernel(const Context& dev_ctx,
int64_t, \
int, \
int8_t, \
phi::dtype::complex<float>, \
phi::dtype::complex<double>, \
int16_t) {}
REGISTER_LOGICAL_CPU_KERNEL(logical_and, And)
......
......@@ -97,6 +97,8 @@ PD_REGISTER_KERNEL(logical_xor, KPS, ALL_LAYOUT, phi::LogicalXorKernel, int) {
int64_t, \
int, \
int8_t, \
phi::dtype::complex<float>, \
phi::dtype::complex<double>, \
int16_t) {}
REGISTER_LOGICAL_CUDA_KERNEL(logical_and, And)
......
......@@ -53,6 +53,8 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
"float32",
"float64",
"uint16",
"complex64",
"complex128",
],
op_name,
)
......@@ -70,6 +72,8 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
"float32",
"float64",
"uint16",
"complex64",
"complex128",
],
op_name,
)
......@@ -114,8 +118,8 @@ def logical_and(x, y, out=None, name=None):
.. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
Args:
x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64.
y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64.
x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128.
y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128.
out(Tensor, optional): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
......@@ -173,8 +177,8 @@ def logical_or(x, y, out=None, name=None):
.. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
Args:
x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64.
y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64.
x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128.
y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128.
out(Tensor): The ``Variable`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
......@@ -234,8 +238,8 @@ def logical_xor(x, y, out=None, name=None):
.. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
Args:
x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, int32, int64, float16, float32, float64.
y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, int32, int64, float16, float32, float64.
x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, int32, int64, float16, float32, float64, complex64, complex128.
y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, int32, int64, float16, float32, float64, complex64, complex128.
out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
......@@ -296,7 +300,7 @@ def logical_not(x, out=None, name=None):
.. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
Args:
x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool, int8, int16, in32, in64, float16, float32, or float64.
x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool, int8, int16, in32, in64, float16, float32, or float64, complex64, complex128.
out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor` will be created to save the output.
name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`.
......
......@@ -31,6 +31,8 @@ SUPPORTED_DTYPES = [
np.float16,
np.float32,
np.float64,
np.complex64,
np.complex128,
]
TEST_META_OP_DATA = [
......@@ -124,6 +126,10 @@ def np_data_generator(np_shape, dtype, *args, **kwargs):
elif dtype == np.uint16:
x = np.random.uniform(0.0, 1.0, np_shape).astype(np.float32)
return convert_float_to_uint16(x)
elif dtype == np.complex64 or dtype == np.complex128:
return np.random.normal(0, 1, np_shape).astype(dtype) + (
1.0j * np.random.normal(0, 1, np_shape)
).astype(dtype)
else:
return np.random.normal(0, 1, np_shape).astype(dtype)
......@@ -169,6 +175,41 @@ def test(unit_test, use_gpu=False, test_error=False):
(dygraph_result.numpy() == np_result).all()
)
unit_test.assertTrue((eager_result.numpy() == np_result).all())
# add some corner case for complex datatype
for complex_data_type in [np.complex64, np.complex128]:
for x_data in (0 + 0j, 0 + 1j, 1 + 0j, 1 + 1j):
for y_data in (0 + 0j, 0 + 1j, 1 + 0j, 1 + 1j):
meta_data['x_np'] = (
x_data * np.ones(shape_data['x_shape'])
).astype(complex_data_type)
meta_data['y_np'] = (
y_data * np.ones(shape_data['y_shape'])
).astype(complex_data_type)
if meta_data['binary_op'] and test_error:
# catch C++ Exception
unit_test.assertRaises(
BaseException, run_static, **meta_data
)
unit_test.assertRaises(
BaseException, run_dygraph, **meta_data
)
continue
static_result = run_static(**meta_data)
dygraph_result = run_dygraph(**meta_data)
eager_result = run_eager(**meta_data)
if meta_data['binary_op']:
np_result = np_op(
meta_data['x_np'], meta_data['y_np']
)
else:
np_result = np_op(meta_data['x_np'])
unit_test.assertTrue((static_result == np_result).all())
unit_test.assertTrue(
(dygraph_result.numpy() == np_result).all()
)
unit_test.assertTrue(
(eager_result.numpy() == np_result).all()
)
def test_type_error(unit_test, use_gpu, type_str_map):
......@@ -180,7 +221,9 @@ def test_type_error(unit_test, use_gpu, type_str_map):
y = paddle.to_tensor(y)
error_type = BaseException
if binary_op:
if type_str_map['x'] != type_str_map['y']:
if type_str_map['x'] != type_str_map['y'] and type_str_map[
'x'
] not in [np.complex64, np.complex128]:
unit_test.assertRaises(error_type, op, x=x, y=y)
if not in_dynamic_mode():
error_type = TypeError
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册