diff --git a/paddle/fluid/operators/controlflow/logical_op.cc b/paddle/fluid/operators/controlflow/logical_op.cc index fb8cde70f5324f42fbc05fdfd65b548e0e58206a..285b17d4995dbc6830035493a264ad9cd1f81d47 100644 --- a/paddle/fluid/operators/controlflow/logical_op.cc +++ b/paddle/fluid/operators/controlflow/logical_op.cc @@ -1,11 +1,8 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -26,15 +23,16 @@ class BinaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker { void Make() override { OpComment comment; AddInput("X", string::Sprintf("Left hand operand of %s operator. Must be " - "a Variable of type bool.", + "a Variable of type being one of bool, int8, " + "int16, int32, int64, float32, float64.", comment.type)); AddInput("Y", string::Sprintf("Right hand operand of %s operator. Must be " - "a Variable of type bool.", + "a Variable of type being one of bool, int8, " + "int16, int32, int64, float32, float64.", comment.type)); AddOutput("Out", string::Sprintf("n-dim bool Variable")); AddComment(string::Sprintf(R"DOC(%s Operator - -It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor. +It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim LoDTensor or Tensor. Each element of Out is calculated by %s )DOC", comment.type, comment.equation)); @@ -46,13 +44,14 @@ class UnaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { OpComment comment; - AddInput("X", string::Sprintf("Operand of %s operator. Must be " - "a LoDTensor or Tensor of type bool.", - comment.type)); + AddInput("X", + string::Sprintf("Operand of %s operator. Must be " + "a LoDTensor or Tensor of type being one of bool, " + "int8, int16, int32, int64, float32, float64.", + comment.type)); AddOutput("Out", string::Sprintf("n-dim bool LoDTensor or Tensor.")); AddComment(string::Sprintf(R"DOC(%s Operator - -It operates element-wise on X, and returns the Out. X and Out are N-dim boolean LoDTensor or Tensor. +It operates element-wise on X, and returns the Out. X and Out are N-dim LoDTensor or Tensor. Each element of Out is calculated by %s )DOC", comment.type, comment.equation)); diff --git a/paddle/fluid/operators/controlflow/logical_op.cu b/paddle/fluid/operators/controlflow/logical_op.cu index 6cbcd516e08264499afdea00d081ae93eb8b319b..301b4c4149fad3be6ae121b7985e61dd42ef7c36 100644 --- a/paddle/fluid/operators/controlflow/logical_op.cu +++ b/paddle/fluid/operators/controlflow/logical_op.cu @@ -1,11 +1,8 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -21,13 +18,13 @@ namespace plat = paddle::platform; namespace paddle { namespace operators { -#define LOGICAL_BINARY_FUNCTOR(func_name, op) \ - template \ - struct func_name { \ - using ELEMENT_TYPE = T; \ - HOSTDEVICE bool operator()(const T* args) const { \ - return args[0] op args[1]; \ - } \ +#define LOGICAL_BINARY_FUNCTOR(func_name, op) \ + template \ + struct func_name { \ + using ELEMENT_TYPE = T; \ + HOSTDEVICE bool operator()(const T* args) const { \ + return static_cast(args[0]) op static_cast(args[1]); \ + } \ }; LOGICAL_BINARY_FUNCTOR(CudaOrFunctor, ||) @@ -68,10 +65,16 @@ class BinaryLogicalOpKernel } // namespace operators } // namespace paddle -#define REGISTER_LOGICAL_CUDA_KERNEL(op_name, func) \ - REGISTER_OP_CUDA_KERNEL( \ - op_name, \ - ops::BinaryLogicalOpKernel>); +#define REGISTER_LOGICAL_CUDA_KERNEL(op_name, func) \ + REGISTER_OP_CUDA_KERNEL( \ + op_name, \ + ops::BinaryLogicalOpKernel>, \ + ops::BinaryLogicalOpKernel>, \ + ops::BinaryLogicalOpKernel>, \ + ops::BinaryLogicalOpKernel>, \ + ops::BinaryLogicalOpKernel>, \ + ops::BinaryLogicalOpKernel>, \ + ops::BinaryLogicalOpKernel>); REGISTER_LOGICAL_CUDA_KERNEL(logical_or, CudaOrFunctor) REGISTER_LOGICAL_CUDA_KERNEL(logical_and, CudaAndFunctor) diff --git a/paddle/fluid/operators/controlflow/logical_op.h b/paddle/fluid/operators/controlflow/logical_op.h index 2c39201a426a25bb8595f415d80192080f1f1931..92fe0a10cb907c333954f51b06a199a6c23cffbe 100644 --- a/paddle/fluid/operators/controlflow/logical_op.h +++ b/paddle/fluid/operators/controlflow/logical_op.h @@ -1,11 +1,8 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -82,12 +79,36 @@ class UnaryLogicalOpKernel } // namespace operators } // namespace paddle -#define REGISTER_BINARY_LOGICAL_KERNEL(op_type, dev, functor) \ - REGISTER_OP_##dev##_KERNEL( \ - op_type, ::paddle::operators::BinaryLogicalOpKernel< \ - ::paddle::platform::dev##DeviceContext, functor>); +#define REGISTER_BINARY_LOGICAL_KERNEL(op_type, dev, functor) \ + REGISTER_OP_##dev##_KERNEL( \ + op_type, ::paddle::operators::BinaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::BinaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::BinaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::BinaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::BinaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::BinaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::BinaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>); -#define REGISTER_UNARY_LOGICAL_KERNEL(op_type, dev, functor) \ - REGISTER_OP_##dev##_KERNEL( \ - op_type, ::paddle::operators::UnaryLogicalOpKernel< \ - ::paddle::platform::dev##DeviceContext, functor>); +#define REGISTER_UNARY_LOGICAL_KERNEL(op_type, dev, functor) \ + REGISTER_OP_##dev##_KERNEL( \ + op_type, ::paddle::operators::UnaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::UnaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::UnaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::UnaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::UnaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::UnaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::UnaryLogicalOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>); diff --git a/paddle/fluid/operators/controlflow/logical_op_npu.cc b/paddle/fluid/operators/controlflow/logical_op_npu.cc index b452bee747232d67389bd253de4802ee02cae811..babdb2257ee3ca59aab7a378da5ef07b3023ca9f 100644 --- a/paddle/fluid/operators/controlflow/logical_op_npu.cc +++ b/paddle/fluid/operators/controlflow/logical_op_npu.cc @@ -1,11 +1,8 @@ /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -82,11 +79,29 @@ class LogicalAndPUKernel : public framework::OpKernel { namespace ops = paddle::operators; namespace plat = paddle::platform; -REGISTER_OP_NPU_KERNEL(logical_not, - ops::LogicalNotNPUKernel); +REGISTER_OP_NPU_KERNEL( + logical_not, ops::LogicalNotNPUKernel, + ops::LogicalNotNPUKernel, + ops::LogicalNotNPUKernel, + ops::LogicalNotNPUKernel, + ops::LogicalNotNPUKernel, + ops::LogicalNotNPUKernel, + ops::LogicalNotNPUKernel); REGISTER_OP_NPU_KERNEL(logical_or, - ops::LogicalOrNPUKernel); + ops::LogicalOrNPUKernel, + ops::LogicalOrNPUKernel, + ops::LogicalOrNPUKernel, + ops::LogicalOrNPUKernel, + ops::LogicalOrNPUKernel, + ops::LogicalOrNPUKernel, + ops::LogicalOrNPUKernel); REGISTER_OP_NPU_KERNEL(logical_and, - ops::LogicalAndPUKernel); + ops::LogicalAndPUKernel, + ops::LogicalAndPUKernel, + ops::LogicalAndPUKernel, + ops::LogicalAndPUKernel, + ops::LogicalAndPUKernel, + ops::LogicalAndPUKernel, + ops::LogicalAndPUKernel); diff --git a/paddle/fluid/operators/controlflow/logical_op_xpu.h b/paddle/fluid/operators/controlflow/logical_op_xpu.h index 9d46ad8c0447ff1d860390945143cf18a423af05..aef6ae27a3194574794baa02ec2733ab302a069e 100644 --- a/paddle/fluid/operators/controlflow/logical_op_xpu.h +++ b/paddle/fluid/operators/controlflow/logical_op_xpu.h @@ -45,7 +45,7 @@ class BinaryLogicalOpXPUKernel : public framework::OpKernel { auto* x = context.Input("X"); auto* y = context.Input("Y"); auto* out = context.Output("Out"); - T* out_ptr = out->mutable_data(context.GetPlace()); + bool* out_ptr = out->mutable_data(context.GetPlace()); const T* x_ptr = x->data(); const T* y_ptr = y->data(); auto& dev_ctx = @@ -153,7 +153,7 @@ class UnaryLogicalOpXPUKernel : public framework::OpKernel { if (x->numel() == 0) { return; } - out->mutable_data(context.GetPlace()); + out->mutable_data(context.GetPlace()); auto& dev_ctx = context.template device_context(); int ret = xpu::logical_not(dev_ctx.x_context(), x->data(), diff --git a/paddle/fluid/operators/controlflow/logicaland_op_xpu.cc b/paddle/fluid/operators/controlflow/logicaland_op_xpu.cc index 08927e66f250644c4f1dba59d535e9648354f91d..6248b6e0b063781eea00cb0f578a913914ee80d8 100644 --- a/paddle/fluid/operators/controlflow/logicaland_op_xpu.cc +++ b/paddle/fluid/operators/controlflow/logicaland_op_xpu.cc @@ -17,5 +17,11 @@ limitations under the License. */ namespace ops = paddle::operators; REGISTER_OP_XPU_KERNEL( logical_and, - ops::BinaryLogicalOpXPUKernel); + ops::BinaryLogicalOpXPUKernel, + ops::BinaryLogicalOpXPUKernel, + ops::BinaryLogicalOpXPUKernel, + ops::BinaryLogicalOpXPUKernel, + ops::BinaryLogicalOpXPUKernel, + ops::BinaryLogicalOpXPUKernel, + ops::BinaryLogicalOpXPUKernel); #endif diff --git a/paddle/fluid/operators/controlflow/logicalnot_op_xpu.cc b/paddle/fluid/operators/controlflow/logicalnot_op_xpu.cc old mode 100755 new mode 100644 index a8cef52ace2c601c61937cb9804793d5bbc25e4e..be857db8aa9669a0904caac9566c652d947b70b4 --- a/paddle/fluid/operators/controlflow/logicalnot_op_xpu.cc +++ b/paddle/fluid/operators/controlflow/logicalnot_op_xpu.cc @@ -15,5 +15,11 @@ limitations under the License. */ #ifdef PADDLE_WITH_XPU #include "paddle/fluid/operators/controlflow/logical_op_xpu.h" namespace ops = paddle::operators; -REGISTER_OP_XPU_KERNEL(logicalnot, ops::UnaryLogicalOpXPUKernel); +REGISTER_OP_XPU_KERNEL(logicalnot, ops::UnaryLogicalOpXPUKernel, + ops::UnaryLogicalOpXPUKernel, + ops::UnaryLogicalOpXPUKernel, + ops::UnaryLogicalOpXPUKernel, + ops::UnaryLogicalOpXPUKernel, + ops::UnaryLogicalOpXPUKernel, + ops::UnaryLogicalOpXPUKernel); #endif diff --git a/paddle/fluid/operators/controlflow/logicalor_op_xpu.cc b/paddle/fluid/operators/controlflow/logicalor_op_xpu.cc index e99c2f1a181040615554c36e113893e43010e4f8..126596841a29f8a1796e828dd9549db163de8512 100644 --- a/paddle/fluid/operators/controlflow/logicalor_op_xpu.cc +++ b/paddle/fluid/operators/controlflow/logicalor_op_xpu.cc @@ -18,5 +18,11 @@ limitations under the License. */ namespace ops = paddle::operators; REGISTER_OP_XPU_KERNEL( logical_or, - ops::BinaryLogicalOpXPUKernel); + ops::BinaryLogicalOpXPUKernel, + ops::BinaryLogicalOpXPUKernel, + ops::BinaryLogicalOpXPUKernel, + ops::BinaryLogicalOpXPUKernel, + ops::BinaryLogicalOpXPUKernel, + ops::BinaryLogicalOpXPUKernel, + ops::BinaryLogicalOpXPUKernel); #endif diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 71ea085d3810844bf10fc616c4d5078921bcf211..cebb5e77ac636f1d7aa9509989080b416afe2a8f 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -12147,17 +12147,22 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): return op(x, y) else: return op(x) - - check_variable_and_dtype(x, "x", ["bool"], op_name) + check_variable_and_dtype(x, "x", [ + "bool", "int8", "int16", "int32", "int64", "float32", "float64" + ], op_name) if y is not None: - check_variable_and_dtype(y, "y", ["bool"], op_name) + check_variable_and_dtype(y, "y", [ + "bool", "int8", "int16", "int32", "int64", "float32", "float64" + ], op_name) if out is not None: check_type(out, "out", Variable, op_name) helper = LayerHelper(op_name, **locals()) - if binary_op: - assert x.dtype == y.dtype + if binary_op and x.dtype != y.dtype: + raise ValueError( + "(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s." + % (op_name, x.dtype, y.dtype)) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -12175,7 +12180,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): def logical_and(x, y, out=None, name=None): r""" - ``logical_and`` operator computes element-wise logical AND on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``. + ``logical_and`` operator computes element-wise logical AND on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``. Each element of ``out`` is calculated by .. math:: @@ -12186,8 +12191,8 @@ def logical_and(x, y, out=None, name=None): ``paddle.logical_and`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`. Args: - x (Tensor): the input tensor, it's data type should be bool. - y (Tensor): the input tensor, it's data type should be bool. + x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. + y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -12211,7 +12216,7 @@ def logical_and(x, y, out=None, name=None): def logical_or(x, y, out=None, name=None): """ - ``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``. + ``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``. Each element of ``out`` is calculated by .. math:: @@ -12222,8 +12227,8 @@ def logical_or(x, y, out=None, name=None): ``paddle.logical_or`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`. Args: - x (Tensor): the input tensor, it's data type should be bool. - y (Tensor): the input tensor, it's data type should be bool. + x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. + y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. out(Tensor): The ``Variable`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -12250,7 +12255,7 @@ def logical_or(x, y, out=None, name=None): def logical_xor(x, y, out=None, name=None): r""" - ``logical_xor`` operator computes element-wise logical XOR on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``. + ``logical_xor`` operator computes element-wise logical XOR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``. Each element of ``out`` is calculated by .. math:: @@ -12261,8 +12266,8 @@ def logical_xor(x, y, out=None, name=None): ``paddle.logical_xor`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`. Args: - x (Tensor): the input tensor, it's data type should be bool. - y (Tensor): the input tensor, it's data type should be bool. + x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. + y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -12290,7 +12295,7 @@ def logical_xor(x, y, out=None, name=None): def logical_not(x, out=None, name=None): """ - ``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``x`` and ``out`` are N-dim boolean ``Variable``. + ``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``out`` is N-dim boolean ``Variable``. Each element of ``out`` is calculated by .. math:: @@ -12298,7 +12303,7 @@ def logical_not(x, out=None, name=None): out = !x Args: - x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool. + x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool, int8, int16, in32, in64, float32, or float64. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor` will be created to save the output. name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`. diff --git a/python/paddle/fluid/tests/unittests/npu/test_logical_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_logical_op_npu.py index f695eeb0f27743144bb67486d6435fcc21604617..f5f0a23d81a5034ee0c72435eae052c56c5538ed 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_logical_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_logical_op_npu.py @@ -23,6 +23,10 @@ import paddle import paddle.fluid as fluid from paddle.static import Program, program_guard +SUPPORTED_DTYPES = [ + bool, np.int8, np.int16, np.int32, np.int64, np.float32, np.float64 +] + TEST_META_OP_DATA = [{ 'op_str': 'logical_and', 'binary_op': True @@ -110,13 +114,13 @@ def run_static(x_np, y_np, op_str, use_npu=False, binary_op=True): place = paddle.NPUPlace(0) exe = fluid.Executor(place) with fluid.program_guard(main_program, startup_program): - x = paddle.static.data(name='x', shape=x_np.shape, dtype='bool') + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) op = getattr(paddle, op_str) feed_list = {'x': x_np} if not binary_op: res = op(x) else: - y = paddle.static.data(name='y', shape=y_np.shape, dtype='bool') + y = paddle.static.data(name='y', shape=y_np.shape, dtype=y_np.dtype) feed_list['y'] = y_np res = op(x, y) exe.run(startup_program) @@ -130,17 +134,20 @@ def run_dygraph(x_np, y_np, op_str, use_npu=False, binary_op=True): place = paddle.NPUPlace(0) paddle.disable_static(place) op = getattr(paddle, op_str) - x = paddle.to_tensor(x_np) + x = paddle.to_tensor(x_np, dtype=x_np.dtype) if not binary_op: dygraph_result = op(x) else: - y = paddle.to_tensor(y_np) + y = paddle.to_tensor(y_np, dtype=y_np.dtype) dygraph_result = op(x, y) return dygraph_result -def np_data_generator(np_shape, *args, **kwargs): - return np.random.choice(a=[True, False], size=np_shape).astype(bool) +def np_data_generator(np_shape, dtype, *args, **kwargs): + if dtype == bool: + return np.random.choice(a=[True, False], size=np_shape).astype(bool) + else: + return np.random.randn(*np_shape).astype(dtype) def test(unit_test, use_npu=False, test_error=False): @@ -152,40 +159,46 @@ def test(unit_test, use_npu=False, test_error=False): if test_error: META_DATA = dict(TEST_META_WRONG_SHAPE_DATA) for shape_data in META_DATA.values(): - meta_data['x_np'] = np_data_generator(shape_data['x_shape']) - meta_data['y_np'] = np_data_generator(shape_data['y_shape']) - if meta_data['binary_op'] and test_error: - # catch C++ Exception - unit_test.assertRaises(BaseException, run_static, **meta_data) - unit_test.assertRaises(BaseException, run_dygraph, **meta_data) - continue - static_result = run_static(**meta_data) - dygraph_result = run_dygraph(**meta_data) - if meta_data['binary_op']: - np_result = np_op(meta_data['x_np'], meta_data['y_np']) - else: - np_result = np_op(meta_data['x_np']) - unit_test.assertTrue((static_result == np_result).all()) - unit_test.assertTrue((dygraph_result.numpy() == np_result).all()) + for data_type in SUPPORTED_DTYPES: + meta_data['x_np'] = np_data_generator( + shape_data['x_shape'], dtype=data_type) + meta_data['y_np'] = np_data_generator( + shape_data['y_shape'], dtype=data_type) + if meta_data['binary_op'] and test_error: + # catch C++ Exception + unit_test.assertRaises(BaseException, run_static, + **meta_data) + unit_test.assertRaises(BaseException, run_dygraph, + **meta_data) + continue + static_result = run_static(**meta_data) + dygraph_result = run_dygraph(**meta_data) + if meta_data['binary_op']: + np_result = np_op(meta_data['x_np'], meta_data['y_np']) + else: + np_result = np_op(meta_data['x_np']) + unit_test.assertTrue((static_result == np_result).all()) + unit_test.assertTrue((dygraph_result.numpy() == np_result).all( + )) def test_type_error(unit_test, use_npu, type_str_map): def check_type(op_str, x, y, binary_op): op = getattr(paddle, op_str) - error_type = TypeError + error_type = ValueError if isinstance(x, np.ndarray): x = paddle.to_tensor(x) y = paddle.to_tensor(y) error_type = BaseException if binary_op: - if type_str_map['x'] != 'bool' or type_str_map['y'] != 'bool': + if type_str_map['x'] != type_str_map['y']: unit_test.assertRaises(error_type, op, x=x, y=y) if not fluid.in_dygraph_mode(): + error_type = TypeError unit_test.assertRaises(error_type, op, x=x, y=y, out=1) else: - if type_str_map['x'] != 'bool': - unit_test.assertRaises(error_type, op, x=x) if not fluid.in_dygraph_mode(): + error_type = TypeError unit_test.assertRaises(error_type, op, x=x, out=1) place = paddle.CPUPlace() @@ -212,12 +225,10 @@ def test_type_error(unit_test, use_npu, type_str_map): def type_map_factory(): - x_type_list = ['float32', 'float64', 'int32', 'int64', 'bool'] - y_type_list = ['float32', 'float64', 'int32', 'int64', 'bool'] return [{ 'x': x_type, 'y': y_type - } for x_type in x_type_list for y_type in y_type_list] + } for x_type in SUPPORTED_DTYPES for y_type in SUPPORTED_DTYPES] class TestCPU(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_logical_op.py b/python/paddle/fluid/tests/unittests/test_logical_op.py index c8bb8c5b73f7680fc8a329656ef2b899f14d96ea..e77526bdb16bc901f38bb3f901b84ec59a109614 100755 --- a/python/paddle/fluid/tests/unittests/test_logical_op.py +++ b/python/paddle/fluid/tests/unittests/test_logical_op.py @@ -21,6 +21,10 @@ import paddle import paddle.fluid as fluid from paddle.static import Program, program_guard +SUPPORTED_DTYPES = [ + bool, np.int8, np.int16, np.int32, np.int64, np.float32, np.float64 +] + TEST_META_OP_DATA = [{ 'op_str': 'logical_and', 'binary_op': True @@ -111,13 +115,13 @@ def run_static(x_np, y_np, op_str, use_gpu=False, binary_op=True): place = paddle.CUDAPlace(0) exe = fluid.Executor(place) with fluid.program_guard(main_program, startup_program): - x = paddle.static.data(name='x', shape=x_np.shape, dtype='bool') + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) op = getattr(paddle, op_str) feed_list = {'x': x_np} if not binary_op: res = op(x) else: - y = paddle.static.data(name='y', shape=y_np.shape, dtype='bool') + y = paddle.static.data(name='y', shape=y_np.shape, dtype=y_np.dtype) feed_list['y'] = y_np res = op(x, y) exe.run(startup_program) @@ -131,17 +135,20 @@ def run_dygraph(x_np, y_np, op_str, use_gpu=False, binary_op=True): place = paddle.CUDAPlace(0) paddle.disable_static(place) op = getattr(paddle, op_str) - x = paddle.to_tensor(x_np) + x = paddle.to_tensor(x_np, dtype=x_np.dtype) if not binary_op: dygraph_result = op(x) else: - y = paddle.to_tensor(y_np) + y = paddle.to_tensor(y_np, dtype=y_np.dtype) dygraph_result = op(x, y) return dygraph_result -def np_data_generator(np_shape, *args, **kwargs): - return np.random.choice(a=[True, False], size=np_shape).astype(bool) +def np_data_generator(np_shape, dtype, *args, **kwargs): + if dtype == bool: + return np.random.choice(a=[True, False], size=np_shape).astype(bool) + else: + return np.random.randn(*np_shape).astype(dtype) def test(unit_test, use_gpu=False, test_error=False): @@ -153,40 +160,46 @@ def test(unit_test, use_gpu=False, test_error=False): if test_error: META_DATA = dict(TEST_META_WRONG_SHAPE_DATA) for shape_data in META_DATA.values(): - meta_data['x_np'] = np_data_generator(shape_data['x_shape']) - meta_data['y_np'] = np_data_generator(shape_data['y_shape']) - if meta_data['binary_op'] and test_error: - # catch C++ Exception - unit_test.assertRaises(BaseException, run_static, **meta_data) - unit_test.assertRaises(BaseException, run_dygraph, **meta_data) - continue - static_result = run_static(**meta_data) - dygraph_result = run_dygraph(**meta_data) - if meta_data['binary_op']: - np_result = np_op(meta_data['x_np'], meta_data['y_np']) - else: - np_result = np_op(meta_data['x_np']) - unit_test.assertTrue((static_result == np_result).all()) - unit_test.assertTrue((dygraph_result.numpy() == np_result).all()) + for data_type in SUPPORTED_DTYPES: + meta_data['x_np'] = np_data_generator( + shape_data['x_shape'], dtype=data_type) + meta_data['y_np'] = np_data_generator( + shape_data['y_shape'], dtype=data_type) + if meta_data['binary_op'] and test_error: + # catch C++ Exception + unit_test.assertRaises(BaseException, run_static, + **meta_data) + unit_test.assertRaises(BaseException, run_dygraph, + **meta_data) + continue + static_result = run_static(**meta_data) + dygraph_result = run_dygraph(**meta_data) + if meta_data['binary_op']: + np_result = np_op(meta_data['x_np'], meta_data['y_np']) + else: + np_result = np_op(meta_data['x_np']) + unit_test.assertTrue((static_result == np_result).all()) + unit_test.assertTrue((dygraph_result.numpy() == np_result).all( + )) def test_type_error(unit_test, use_gpu, type_str_map): def check_type(op_str, x, y, binary_op): op = getattr(paddle, op_str) - error_type = TypeError + error_type = ValueError if isinstance(x, np.ndarray): x = paddle.to_tensor(x) y = paddle.to_tensor(y) error_type = BaseException if binary_op: - if type_str_map['x'] != 'bool' or type_str_map['y'] != 'bool': + if type_str_map['x'] != type_str_map['y']: unit_test.assertRaises(error_type, op, x=x, y=y) if not fluid.in_dygraph_mode(): + error_type = TypeError unit_test.assertRaises(error_type, op, x=x, y=y, out=1) else: - if type_str_map['x'] != 'bool': - unit_test.assertRaises(error_type, op, x=x) if not fluid.in_dygraph_mode(): + error_type = TypeError unit_test.assertRaises(error_type, op, x=x, out=1) place = paddle.CPUPlace() @@ -213,12 +226,10 @@ def test_type_error(unit_test, use_gpu, type_str_map): def type_map_factory(): - x_type_list = ['float32', 'float64', 'int32', 'int64', 'bool'] - y_type_list = ['float32', 'float64', 'int32', 'int64', 'bool'] return [{ 'x': x_type, 'y': y_type - } for x_type in x_type_list for y_type in y_type_list] + } for x_type in SUPPORTED_DTYPES for y_type in SUPPORTED_DTYPES] class TestCPU(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_logical_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_logical_op_xpu.py index 21eb99fcfbf9195a82cf0db1abcc92881b48af44..7e7481bd90646c4aade8663d3103aa8f7a2c28a3 100755 --- a/python/paddle/fluid/tests/unittests/xpu/test_logical_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_logical_op_xpu.py @@ -25,6 +25,10 @@ import paddle from op_test_xpu import XPUOpTest from paddle.static import Program, program_guard +SUPPORTED_DTYPES = [ + bool, np.int8, np.int16, np.int32, np.int64, np.float32, np.float64 +] + TEST_META_OP_DATA = [{ 'op_str': 'logical_and', 'binary_op': True @@ -110,13 +114,13 @@ def run_static_xpu(x_np, y_np, op_str, binary_op=True): place = paddle.XPUPlace(0) exe = fluid.Executor(place) with fluid.program_guard(main_program, startup_program): - x = paddle.static.data(name='x', shape=x_np.shape, dtype='bool') + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) op = getattr(paddle, op_str) feed_list = {'x': x_np} if not binary_op: res = op(x) else: - y = paddle.static.data(name='y', shape=y_np.shape, dtype='bool') + y = paddle.static.data(name='y', shape=y_np.shape, dtype=y_np.dtype) feed_list['y'] = y_np res = op(x, y) exe.run(startup_program) @@ -128,17 +132,20 @@ def run_dygraph_xpu(x_np, y_np, op_str, binary_op=True): place = paddle.XPUPlace(0) paddle.disable_static(place) op = getattr(paddle, op_str) - x = paddle.to_tensor(x_np) + x = paddle.to_tensor(x_np, dtype=x_np.dtype) if not binary_op: dygraph_result = op(x) else: - y = paddle.to_tensor(y_np) + y = paddle.to_tensor(y_np, dtype=y_np.dtype) dygraph_result = op(x, y) return dygraph_result -def np_data_generator(np_shape, *args, **kwargs): - return np.random.choice(a=[True, False], size=np_shape).astype(bool) +def np_data_generator(np_shape, dtype, *args, **kwargs): + if dtype == bool: + return np.random.choice(a=[True, False], size=np_shape).astype(bool) + else: + return np.random.randn(*np_shape).astype(dtype) def test_xpu(unit_test, test_error=False): @@ -149,40 +156,44 @@ def test_xpu(unit_test, test_error=False): if test_error: META_DATA = dict(TEST_META_WRONG_SHAPE_DATA) for shape_data in META_DATA.values(): - meta_data['x_np'] = np_data_generator(shape_data['x_shape']) - meta_data['y_np'] = np_data_generator(shape_data['y_shape']) - if meta_data['binary_op'] and test_error: - # catch C++ Exception - unit_test.assertRaises(BaseException, run_static_xpu, - **meta_data) - continue - static_result = run_static_xpu(**meta_data) - dygraph_result = run_dygraph_xpu(**meta_data) - if meta_data['binary_op']: - np_result = np_op(meta_data['x_np'], meta_data['y_np']) - else: - np_result = np_op(meta_data['x_np']) - unit_test.assertTrue((static_result == np_result).all()) - unit_test.assertTrue((dygraph_result.numpy() == np_result).all()) + for data_type in SUPPORTED_DTYPES: + meta_data['x_np'] = np_data_generator( + shape_data['x_shape'], dtype=data_type) + meta_data['y_np'] = np_data_generator( + shape_data['y_shape'], dtype=data_type) + if meta_data['binary_op'] and test_error: + # catch C++ Exception + unit_test.assertRaises(BaseException, run_static_xpu, + **meta_data) + continue + static_result = run_static_xpu(**meta_data) + dygraph_result = run_dygraph_xpu(**meta_data) + if meta_data['binary_op']: + np_result = np_op(meta_data['x_np'], meta_data['y_np']) + else: + np_result = np_op(meta_data['x_np']) + unit_test.assertTrue((static_result == np_result).all()) + unit_test.assertTrue((dygraph_result.numpy() == np_result).all( + )) def test_type_error(unit_test, type_str_map): def check_type(op_str, x, y, binary_op): op = getattr(paddle, op_str) - error_type = TypeError + error_type = ValueError if isinstance(x, np.ndarray): x = paddle.to_tensor(x) y = paddle.to_tensor(y) error_type = BaseException if binary_op: - if type_str_map['x'] != 'bool' or type_str_map['y'] != 'bool': + if type_str_map['x'] != type_str_map['y']: unit_test.assertRaises(error_type, op, x=x, y=y) if not fluid.in_dygraph_mode(): + error_type = TypeError unit_test.assertRaises(error_type, op, x=x, y=y, out=1) else: - if type_str_map['x'] != 'bool': - unit_test.assertRaises(error_type, op, x=x) if not fluid.in_dygraph_mode(): + error_type = TypeError unit_test.assertRaises(error_type, op, x=x, out=1) place = paddle.XPUPlace(0) @@ -208,12 +219,10 @@ def test_type_error(unit_test, type_str_map): def type_map_factory(): - x_type_list = ['float32', 'float64', 'int32', 'int64', 'bool'] - y_type_list = ['float32', 'float64', 'int32', 'int64', 'bool'] return [{ 'x': x_type, 'y': y_type - } for x_type in x_type_list for y_type in y_type_list] + } for x_type in SUPPORTED_DTYPES for y_type in SUPPORTED_DTYPES] @unittest.skipIf(not paddle.is_compiled_with_xpu(),