diff --git a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc index 34ee3753f4fde54179ed954f5f62c6d0e4e6d83f..d40d6d8b00ead033f1ecb6c500a31e16cc1eb651 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc @@ -97,6 +97,9 @@ static std::map tbe_func_adapter_map = { {"cum_sum", "cumsum_d"}, {"apply_rms_prop", "apply_rms_prop_d"}, {"cum_prod", "cumprod_d"}, + {"reduce_all", "reduce_all_d"}, + {"sparse_apply_adagrad", "sparse_apply_adagrad_d"}, + {"unsorted_segment_min", "unsorted_segment_min_d"}, {"reduce_prod", "reduce_prod_d"}}; void TbeAdapter::NormalizeFuncName(std::string *func_name) { diff --git a/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc b/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc index 08bbb351377ad66c721d4726de33661507d0b8fc..b7471e38e019069204c659a840c7327fea589f79 100644 --- a/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc +++ b/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc @@ -42,8 +42,9 @@ ConstInputToAttrInfoRegistry::ConstInputToAttrInfoRegistry() { Register(prim::kPrimConcat->name(), {0}); Register(prim::kPrimCumSum->name(), {1}); Register(prim::kPrimCumProd->name(), {1}); + Register(prim::kPrimReduceAll->name(), {1}); + Register(prim::kPrimUnsortedSegmentMin->name(), {2}); Register(kUnsortedSegmentProdOpName, {2}); - Register(kUnsortedSegmentMinOpName, {2}); Register(kSimpleMeanGradOpName, {1}); Register(kMeanGradOpName, {1}); Register(kSliceOpName, {1, 2}); diff --git a/mindspore/ops/_op_impl/tbe/__init__.py b/mindspore/ops/_op_impl/tbe/__init__.py index 7fc9f4d296665dff439c5018bbe8841389f57628..cd90cc9f8f25ea9e2f7daf11ec866f8348d06661 100644 --- a/mindspore/ops/_op_impl/tbe/__init__.py +++ b/mindspore/ops/_op_impl/tbe/__init__.py @@ -205,3 +205,6 @@ from .space_to_batch_nd import _space_to_batch_nd_tbe from .bitwise_and import bitwise_and_op_info from .bitwise_or import bitwise_or_op_info from .bitwise_xor import bitwise_xor_op_info +from .reduce_all import _reduce_all_tbe +from .sparse_apply_adagrad import _sparse_apply_adagrad_tbe +from .unsorted_segment_min import _unsorted_segment_min_tbe diff --git a/mindspore/ops/_op_impl/tbe/reduce_all.py b/mindspore/ops/_op_impl/tbe/reduce_all.py new file mode 100644 index 0000000000000000000000000000000000000000..a372dcf6b759f2715b93c0d51be2c30e0bc3e7ea --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/reduce_all.py @@ -0,0 +1,38 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""ReduceAll op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +reduce_all_op_info = TBERegOp("ReduceAll") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("reduce_all_d.so") \ + .compute_cost(10) \ + .kernel_name("reduce_all_d") \ + .partial_flag(True) \ + .attr("axis", "required", "listInt", "all") \ + .attr("keep_dims", "optional", "bool", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.BOOL_FracZ, DataType.BOOL_FracZ) \ + .get_op_info() + + +@op_info_register(reduce_all_op_info) +def _reduce_all_tbe(): + """ReduceAll TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py b/mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py new file mode 100644 index 0000000000000000000000000000000000000000..ca77a5eaed0e49fef43005f8f1df763209464227 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py @@ -0,0 +1,44 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""SparseApplyAdagrad op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +sparse_apply_adagrad_op_info = TBERegOp("SparseApplyAdagrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("sparse_apply_adagrad.so") \ + .compute_cost(10) \ + .kernel_name("sparse_apply_adagrad") \ + .partial_flag(True) \ + .attr("lr", "required", "float", "all") \ + .attr("update_slots", "optional", "bool", "all") \ + .attr("use_locking", "optional", "bool", "all") \ + .input(0, "var", False, "required", "all") \ + .input(1, "accum", False, "required", "all") \ + .input(2, "grad", False, "required", "all") \ + .input(3, "indices", False, "required", "all") \ + .output(0, "var", False, "required", "all") \ + .dtype_format(DataType.F32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW, DataType.I32_NCHW, DataType.F32_NCHW) \ + .dtype_format(DataType.F32_NHWC, DataType.F32_NHWC, DataType.F32_NHWC, DataType.I32_NHWC, DataType.F32_NHWC) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.I32_Default, + DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(sparse_apply_adagrad_op_info) +def _sparse_apply_adagrad_tbe(): + """SparseApplyAdagrad TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/unsorted_segment_min.py b/mindspore/ops/_op_impl/tbe/unsorted_segment_min.py new file mode 100644 index 0000000000000000000000000000000000000000..a26f14048a8e787082a152870ee75ff728b98eb9 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/unsorted_segment_min.py @@ -0,0 +1,48 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""UnsortedSegmentMin op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +unsorted_segment_min_op_info = TBERegOp("UnsortedSegmentMin") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("unsorted_segment_min_d.so") \ + .compute_cost(10) \ + .kernel_name("unsorted_segment_min_d") \ + .partial_flag(True) \ + .attr("num_segments", "required", "int", "all") \ + .input(0, "data", False, "required", "all") \ + .input(1, "segment_ids", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.I32_Default, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.I32_Default, DataType.F16_FracZ) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.I32_Default, DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_5HD, DataType.I32_Default, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.I32_Default, DataType.F32_FracZ) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.I32_Default, DataType.F32_C1HWNCoC0) \ + .dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_Default, DataType.I32_5HD) \ + .dtype_format(DataType.I32_FracZ, DataType.I32_Default, DataType.I32_FracZ) \ + .dtype_format(DataType.I32_C1HWNCoC0, DataType.I32_Default, DataType.I32_C1HWNCoC0) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .get_op_info() + + +@op_info_register(unsorted_segment_min_op_info) +def _unsorted_segment_min_tbe(): + """UnsortedSegmentMin TBE register""" + return diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 6a04f9e671f219257a1e164ebab1c9111c7225c7..842c632842c68cef4c1b9470657f41f2fd6bee8c 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -651,6 +651,11 @@ test_case_math_ops = [ 'block': P.Cos(), 'desc_inputs': [[2, 3]], 'desc_bprop': [[2, 3]]}), + ('ReduceAll', { + 'block': P.ReduceAll(), + 'desc_const': [1], + 'desc_inputs': [Tensor(np.array([[True, False], [True, True]]))], + 'desc_bprop': []}), ] test_case_nn_ops = [ @@ -1059,6 +1064,13 @@ test_case_nn_ops = [ Tensor([[-1.4, -0.7], [0.9, 0.7]], mstype.float16)], 'desc_bprop': [], 'skip': ['backward']}), + ('SparseApplyAdagrad', { + 'block': P.SparseApplyAdagrad(0.5), + 'desc_inputs': [Tensor([[0.7, 0.2], [0.1, 0.07]], mstype.float32), + Tensor([[0.2, 0.2], [0.1, 0.4]], mstype.float32), + Tensor([[0.5, 0.4], [0.6, 0.1]], mstype.float32), Tensor([1, 1], mstype.int32)], + 'desc_bprop': [Tensor([[0.7, 0.2], [0.1, 0.07]], mstype.float32)], + 'skip': ['backward']}), ] test_case_array_ops = [ @@ -1266,6 +1278,12 @@ test_case_array_ops = [ 'desc_inputs': [[4, 3, 1, 1]], 'desc_bprop': [[1, 3, 2, 1]], }), + ('UnsortedSegmentMin_1', { + 'block': P.UnsortedSegmentMin(), + 'desc_const': [2], + 'desc_inputs': [Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32)), + Tensor(np.array([0, 1, 1]).astype(np.int32))], + 'desc_bprop': [Tensor(np.array([[1, 2, 3], [4, 2, 1]]).astype(np.float32))]}), ] test_case_other_ops = [