diff --git a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc index 4489233db77163f3d00854718f0535f389feecf4..dbe57f7523636ff2c3baccfc019b41b469aa016d 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc @@ -91,7 +91,8 @@ static std::map tbe_func_adapter_map = { {"s_gd", "sgd"}, {"l_ars_update", "lars_v2_update"}, {"n_ms_with_mask", "nms_with_mask"}, - {"square_sum_all", "square_sum_all"}}; + {"square_sum_all", "square_sum_all"}, + {"cum_sum", "cumsum_d"}}; void TbeAdapter::NormalizeFuncName(std::string *func_name) { if (func_name == nullptr) { diff --git a/mindspore/ccsrc/operator/ops.cc b/mindspore/ccsrc/operator/ops.cc index da4e8983cb66c04c9b3ab778f58686c6aeb55856..cdee85f05f9b0f25c1c7d190c07c45db0107207c 100755 --- a/mindspore/ccsrc/operator/ops.cc +++ b/mindspore/ccsrc/operator/ops.cc @@ -166,6 +166,7 @@ const PrimitivePtr kPrimSquare = std::make_shared("Square"); const PrimitivePtr kPrimEqual = std::make_shared("Equal"); const PrimitivePtr kPrimLess = std::make_shared("Less"); const PrimitivePtr kPrimLessEqual = std::make_shared("LessEqual"); +const PrimitivePtr kPrimCumSum = std::make_shared("CumSum"); // NN const PrimitivePtr kPrimFlatten = std::make_shared("Flatten"); diff --git a/mindspore/ccsrc/operator/ops.h b/mindspore/ccsrc/operator/ops.h index 8b63c876edf73207313f8531ef8c15e7978c4e70..26f82d45597ded756fe9f4468e144a5ec14bd5f1 100755 --- a/mindspore/ccsrc/operator/ops.h +++ b/mindspore/ccsrc/operator/ops.h @@ -172,6 +172,7 @@ extern const PrimitivePtr kPrimSquare; extern const PrimitivePtr kPrimEqual; extern const PrimitivePtr kPrimLess; extern const PrimitivePtr kPrimLessEqual; +extern const PrimitivePtr kPrimCumSum; // NN extern const PrimitivePtr kPrimFlatten; diff --git a/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc b/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc index bb4a710d479ce1d02f42e3c89a8f1c8667a0c269..b5807b792dfd25acf3974f4395210910c489ca18 100644 --- a/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc +++ b/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc @@ -40,6 +40,7 @@ ConstInputToAttrInfoRegistry::ConstInputToAttrInfoRegistry() { Register(prim::kPrimUnsortedSegmentSum->name(), {2}); Register(prim::kPrimOneHot->name(), {1}); Register(prim::kPrimConcat->name(), {0}); + Register(prim::kPrimCumSum->name(), {1}); Register(kUnsortedSegmentProdOpName, {2}); Register(kUnsortedSegmentMinOpName, {2}); Register(kSimpleMeanGradOpName, {1}); @@ -60,7 +61,6 @@ ConstInputToAttrInfoRegistry::ConstInputToAttrInfoRegistry() { Register(kResizeNearestNeighborV2OpName, {1}); Register(kResizeNearestNeighborV2GradOpName, {1}); Register(kApplyRMSPropOpname, {4, 5, 6}); - Register(kCumsumOpName, {1}); Register(kResizeBilinearV2OpName, {1}); Register(kReduceProdOpName, {1}); Register(kCumprodOpName, {1}); diff --git a/mindspore/ops/_op_impl/tbe/__init__.py b/mindspore/ops/_op_impl/tbe/__init__.py index 99aff51b5698b8b0f7f13995012f9f6ee07f90e6..4da3edde289373c46883845be5dbf9ce786a859b 100644 --- a/mindspore/ops/_op_impl/tbe/__init__.py +++ b/mindspore/ops/_op_impl/tbe/__init__.py @@ -191,3 +191,6 @@ from .prelu import _prelu_tbe from .prelu_grad import _prelu_grad_tbe from .binary_cross_entropy import _binary_cross_entropy_tbe from .binary_cross_entropy_grad import _binary_cross_entropy_grad_tbe +from .sin import _sin_tbe +from .cos import _cos_tbe +from .cum_sum import _cum_sum_tbe diff --git a/mindspore/ops/_op_impl/tbe/cos.py b/mindspore/ops/_op_impl/tbe/cos.py new file mode 100644 index 0000000000000000000000000000000000000000..ecb106210009764baa68cfb530b2cd30ec4bc020 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/cos.py @@ -0,0 +1,37 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Cos op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +cos_op_info = TBERegOp("Cos") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("cos.so") \ + .compute_cost(10) \ + .kernel_name("cos") \ + .partial_flag(True) \ + .op_pattern("formatAgnostic") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() + + +@op_info_register(cos_op_info) +def _cos_tbe(): + """Cos TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/cum_sum.py b/mindspore/ops/_op_impl/tbe/cum_sum.py new file mode 100644 index 0000000000000000000000000000000000000000..587a63074becb942eefc243a603ca114e75a44e4 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/cum_sum.py @@ -0,0 +1,42 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""CumSum op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +cum_sum_op_info = TBERegOp("CumSum") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("cumsum_d.so") \ + .compute_cost(10) \ + .kernel_name("cumsum_d") \ + .partial_flag(True) \ + .attr("axis", "optional", "int", "all", "0") \ + .attr("exclusive", "optional", "bool", "true,false", "fales") \ + .attr("reverse", "optional", "bool", "true,false", "false") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .get_op_info() + + +@op_info_register(cum_sum_op_info) +def _cum_sum_tbe(): + """CumSum TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/sin.py b/mindspore/ops/_op_impl/tbe/sin.py new file mode 100644 index 0000000000000000000000000000000000000000..187c0f0f3217d6a4435b0ecd28902e8f12b293c3 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/sin.py @@ -0,0 +1,37 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Sin op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +sin_op_info = TBERegOp("Sin") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("sin.so") \ + .compute_cost(10) \ + .kernel_name("sin") \ + .partial_flag(True) \ + .op_pattern("formatAgnostic") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() + + +@op_info_register(sin_op_info) +def _sin_tbe(): + """Sin TBE register""" + return diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index de884259c9c8fc3da1da461ec32b397460f0f79d..43237efb060914a229c991091204866df51e45bb 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -504,10 +504,9 @@ test_case_math_ops = [ 'desc_inputs': [[4]], 'desc_bprop': [[4]]}), ('CumSum', { - 'block': P.CumSum(), - 'desc_const': [0], - 'desc_inputs': [Tensor(np.array([[3, 4], [1, 6]]).astype(np.float16))], - 'desc_bprop': [Tensor(np.array([[3, 4], [4, 10]]).astype(np.float16))]}), + 'block': CumSumNet(), + 'desc_inputs': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))], + 'desc_bprop': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))]}), ('ReduceSum_3', { 'block': P.ReduceSum(), 'desc_const': [0], @@ -579,6 +578,10 @@ test_case_math_ops = [ 'desc_inputs': [Tensor(np.array([0, 1, 4, 5]).astype(np.float32)), Tensor(np.array([1, 1, 3, 7]).astype(np.float32))], 'skip': ['backward']}), + ('Cos', { + 'block': P.Cos(), + 'desc_inputs': [[2, 3]], + 'desc_bprop': [[2, 3]]}), ] test_case_nn_ops = [ @@ -885,12 +888,6 @@ test_case_nn_ops = [ 'desc_inputs': [Tensor(np.array([[128, 32, 32, 64], [128, 32, 32, 64]]).astype(np.float16))], 'desc_bprop': [Tensor(np.array([[128, 32, 32, 64], [128, 32, 32, 64]]).astype(np.float16))], 'skip': ['backward']}), - ('CumSumNet', { - 'block': CumSumNet(), - 'desc_const': [0], - 'desc_inputs': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float16))], - 'desc_bprop': [ - Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float16))]}), ('OneHot', { 'block': P.OneHot(), 'desc_const': [3, Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32)],