From 6fa2813945f1e4783d9c9c9c801b781e3a1dd640 Mon Sep 17 00:00:00 2001 From: zhaozhenlong Date: Tue, 26 May 2020 20:15:52 +0800 Subject: [PATCH] add op BitwiseAnd BitwiseOr BitwiseXor --- mindspore/common/dtype.py | 1 + mindspore/ops/_op_impl/tbe/__init__.py | 3 + mindspore/ops/_op_impl/tbe/bitwise_and.py | 37 ++++++++++ mindspore/ops/_op_impl/tbe/bitwise_or.py | 37 ++++++++++ mindspore/ops/_op_impl/tbe/bitwise_xor.py | 37 ++++++++++ mindspore/ops/operations/__init__.py | 7 +- mindspore/ops/operations/math_ops.py | 83 ++++++++++++++++++++++- tests/ut/python/ops/test_ops.py | 30 ++++++++ 8 files changed, 232 insertions(+), 3 deletions(-) create mode 100644 mindspore/ops/_op_impl/tbe/bitwise_and.py create mode 100644 mindspore/ops/_op_impl/tbe/bitwise_or.py create mode 100644 mindspore/ops/_op_impl/tbe/bitwise_xor.py diff --git a/mindspore/common/dtype.py b/mindspore/common/dtype.py index e6b9779f3..e9bf84057 100644 --- a/mindspore/common/dtype.py +++ b/mindspore/common/dtype.py @@ -109,6 +109,7 @@ number_type = (int8, float64,) int_type = (int8, int16, int32, int64,) +uint_type = (uint8, uint16, uint32, uint64) float_type = (float16, float32, float64,) _simple_types = { diff --git a/mindspore/ops/_op_impl/tbe/__init__.py b/mindspore/ops/_op_impl/tbe/__init__.py index 6051b4e1f..7fc9f4d29 100644 --- a/mindspore/ops/_op_impl/tbe/__init__.py +++ b/mindspore/ops/_op_impl/tbe/__init__.py @@ -202,3 +202,6 @@ from .scatter_add import _scatter_add_tbe from .atan2 import _atan2_tbe from .batch_to_space_nd import _batch_to_space_nd_tbe from .space_to_batch_nd import _space_to_batch_nd_tbe +from .bitwise_and import bitwise_and_op_info +from .bitwise_or import bitwise_or_op_info +from .bitwise_xor import bitwise_xor_op_info diff --git a/mindspore/ops/_op_impl/tbe/bitwise_and.py b/mindspore/ops/_op_impl/tbe/bitwise_and.py new file mode 100644 index 000000000..30a79e524 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/bitwise_and.py @@ -0,0 +1,37 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""BitwiseAnd op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +bitwise_and_op_info = TBERegOp("BitwiseAnd") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("bitwise_and.so") \ + .compute_cost(10) \ + .kernel_name("bitwise_and") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I16_Default, DataType.I16_Default, DataType.I16_Default) \ + .dtype_format(DataType.U16_Default, DataType.U16_Default, DataType.U16_Default) \ + .get_op_info() + + +@op_info_register(bitwise_and_op_info) +def _bitwise_and_tbe(): + """BitwiseAnd TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/bitwise_or.py b/mindspore/ops/_op_impl/tbe/bitwise_or.py new file mode 100644 index 000000000..a80bc908b --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/bitwise_or.py @@ -0,0 +1,37 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""BitwiseOr op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +bitwise_or_op_info = TBERegOp("BitwiseOr") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("bitwise_or.so") \ + .compute_cost(10) \ + .kernel_name("bitwise_or") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I16_Default, DataType.I16_Default, DataType.I16_Default) \ + .dtype_format(DataType.U16_Default, DataType.U16_Default, DataType.U16_Default) \ + .get_op_info() + + +@op_info_register(bitwise_or_op_info) +def _bitwise_or_tbe(): + """BitwiseOr TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/bitwise_xor.py b/mindspore/ops/_op_impl/tbe/bitwise_xor.py new file mode 100644 index 000000000..c606877ff --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/bitwise_xor.py @@ -0,0 +1,37 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""BitwiseXor op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +bitwise_xor_op_info = TBERegOp("BitwiseXor") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("bitwise_xor.so") \ + .compute_cost(10) \ + .kernel_name("bitwise_xor") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I16_Default, DataType.I16_Default, DataType.I16_Default) \ + .dtype_format(DataType.U16_Default, DataType.U16_Default, DataType.U16_Default) \ + .get_op_info() + + +@op_info_register(bitwise_xor_op_info) +def _bitwise_xor_tbe(): + """BitwiseXor TBE register""" + return diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 379fd5bca..7483ba047 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -39,7 +39,7 @@ from .debug_ops import (ImageSummary, InsertGradientOf, HookBackward, ScalarSumm TensorSummary, HistogramSummary, Print) from .control_ops import ControlDepend, GeSwitch, Merge from .inner_ops import ScalarCast -from .math_ops import (Abs, ACos, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul, +from .math_ops import (Abs, ACos, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul, BitwiseAnd, BitwiseOr, BitwiseXor, ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd, Cos, Div, Equal, EqualCount, Exp, Erf, Erfc, Floor, FloorDiv, FloorMod, Acosh, Greater, GreaterEqual, Less, LessEqual, Log, Log1p, LogicalAnd, @@ -266,7 +266,10 @@ __all__ = [ "ApplyCenteredRMSProp", "SpaceToBatchND", "BatchToSpaceND", - "SquareSumAll" + "SquareSumAll", + "BitwiseAnd", + "BitwiseOr", + "BitwiseXor" ] __all__.extend(thor_ops.__all__) diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index fdea0e367..f45ce4c01 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -71,7 +71,7 @@ class _BinaryOp(PrimitiveWithInfer): @prim_attr_register def __init__(self): - """init _MathBinaryOp""" + """init _BinaryOp""" self.init_prim_io_names(inputs=['x', 'y'], outputs=['output']) def infer_shape(self, x_shape, y_shape): @@ -93,6 +93,27 @@ class _MathBinaryOp(_BinaryOp): return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type, self.name) +class _BitwiseBinaryOp(_MathBinaryOp): + """ + Define bitwise binary operators. + """ + + @prim_attr_register + def __init__(self): + """init _BitwiseBinaryOp""" + self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y']) + + @staticmethod + def _check_bitwise_op_input_type(x1_type, x2_type, prim): + args = {'x1': x1_type, 'x2': x2_type} + valid_types = mstype.int_type + mstype.uint_type + validator.check_tensor_type_same(args, valid_types, prim) + return x1_type + + def infer_dtype(self, x1_type, x2_type): + return _BitwiseBinaryOp._check_bitwise_op_input_type(x1_type, x2_type, self.name) + + class TensorAdd(_MathBinaryOp): """ Adds two input tensors element-wise. @@ -2186,3 +2207,63 @@ class SquareSumAll(PrimitiveWithInfer): validator.check_tensor_type_same({'x1_type': x_type}, [mstype.float16, mstype.float32], self.name) validator.check_tensor_type_same({'x2_type': y_type}, [mstype.float16, mstype.float32], self.name) return x_type, y_type + + +class BitwiseAnd(_BitwiseBinaryOp): + """ + Returns bitwise `and` of two tensors element-wise. + + Inputs: + - **input_x1** (Tensor) - The input tensor with int or uint type. + - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`. + + Outputs: + - **y** (Tensor) - The same type as the `input_x1`. + + Examples: + >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16) + >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16) + >>> bitwise_and = P.BitwiseAnd() + >>> bitwise_and(input_x1, input_x2) + [0, 0, 1, -1, 1, 0, 1] + """ + + +class BitwiseOr(_BitwiseBinaryOp): + """ + Returns bitwise `or` of two tensors element-wise. + + Inputs: + - **input_x1** (Tensor) - The input tensor with int or uint type. + - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`. + + Outputs: + - **y** (Tensor) - The same type as the `input_x1`. + + Examples: + >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16) + >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16) + >>> bitwise_or = P.BitwiseOr() + >>> bitwise_or(input_x1, input_x2) + [0, 1, 1, -1, -1, 3, 3] + """ + + +class BitwiseXor(_BitwiseBinaryOp): + """ + Returns bitwise `xor` of two tensors element-wise. + + Inputs: + - **input_x1** (Tensor) - The input tensor with int or uint type. + - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`. + + Outputs: + - **y** (Tensor) - The same type as the `input_x1`. + + Examples: + >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16) + >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16) + >>> bitwise_xor = P.BitwiseXor() + >>> bitwise_xor(input_x1, input_x2) + [0, 1, 0, 0, -2, 3, 2] + """ diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index b08480858..1d93e3c1b 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -242,6 +242,36 @@ class ApplyRMSNet(nn.Cell): return out test_case_math_ops = [ + ('BitwiseAnd', { + 'block': P.BitwiseAnd(), + 'desc_inputs': [Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16), + Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)], + 'skip': ['backward']}), + ('BitwiseAnd_1', { + 'block': P.BitwiseAnd(), + 'desc_inputs': [Tensor(np.array([[1, 2, 3], [-1, -2, -3]]), mstype.int16), + Tensor(np.array([1, 1, 1]), mstype.int16)], + 'skip': ['backward']}), + ('BitwiseOr', { + 'block': P.BitwiseOr(), + 'desc_inputs': [Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16), + Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)], + 'skip': ['backward']}), + ('BitwiseOr_1', { + 'block': P.BitwiseOr(), + 'desc_inputs': [Tensor(np.array([[1, 2, 3], [-1, -2, -3]]), mstype.int16), + Tensor(np.array([1, 1, 1]), mstype.int16)], + 'skip': ['backward']}), + ('BitwiseXor', { + 'block': P.BitwiseXor(), + 'desc_inputs': [Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16), + Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)], + 'skip': ['backward']}), + ('BitwiseXor_1', { + 'block': P.BitwiseXor(), + 'desc_inputs': [Tensor(np.array([[1, 2, 3], [-1, -2, -3]]), mstype.int16), + Tensor(np.array([1, 1, 1]), mstype.int16)], + 'skip': ['backward']}), ('Neg', { 'block': P.Neg(), 'desc_inputs': [[1, 3, 4, 4]], -- GitLab