From 218ade30b2d3883c6031e29d6e8b3af4c1ff2295 Mon Sep 17 00:00:00 2001 From: zhouneng Date: Wed, 20 May 2020 15:33:43 +0800 Subject: [PATCH] add vm support sin and cumsum --- mindspore/ccsrc/kernel/tbe/tbe_adapter.cc | 3 +- mindspore/ccsrc/operator/ops.cc | 1 + mindspore/ccsrc/operator/ops.h | 1 + .../pass/const_input_to_attr_registry.cc | 2 +- mindspore/ops/_op_impl/tbe/__init__.py | 3 ++ mindspore/ops/_op_impl/tbe/cos.py | 37 ++++++++++++++++ mindspore/ops/_op_impl/tbe/cum_sum.py | 42 +++++++++++++++++++ mindspore/ops/_op_impl/tbe/sin.py | 37 ++++++++++++++++ tests/ut/python/ops/test_ops.py | 17 ++++---- 9 files changed, 131 insertions(+), 12 deletions(-) create mode 100644 mindspore/ops/_op_impl/tbe/cos.py create mode 100644 mindspore/ops/_op_impl/tbe/cum_sum.py create mode 100644 mindspore/ops/_op_impl/tbe/sin.py diff --git a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc index 4489233db..dbe57f752 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc @@ -91,7 +91,8 @@ static std::map tbe_func_adapter_map = { {"s_gd", "sgd"}, {"l_ars_update", "lars_v2_update"}, {"n_ms_with_mask", "nms_with_mask"}, - {"square_sum_all", "square_sum_all"}}; + {"square_sum_all", "square_sum_all"}, + {"cum_sum", "cumsum_d"}}; void TbeAdapter::NormalizeFuncName(std::string *func_name) { if (func_name == nullptr) { diff --git a/mindspore/ccsrc/operator/ops.cc b/mindspore/ccsrc/operator/ops.cc index da4e8983c..cdee85f05 100755 --- a/mindspore/ccsrc/operator/ops.cc +++ b/mindspore/ccsrc/operator/ops.cc @@ -166,6 +166,7 @@ const PrimitivePtr kPrimSquare = std::make_shared("Square"); const PrimitivePtr kPrimEqual = std::make_shared("Equal"); const PrimitivePtr kPrimLess = std::make_shared("Less"); const PrimitivePtr kPrimLessEqual = std::make_shared("LessEqual"); +const PrimitivePtr kPrimCumSum = std::make_shared("CumSum"); // NN const PrimitivePtr kPrimFlatten = std::make_shared("Flatten"); diff --git a/mindspore/ccsrc/operator/ops.h b/mindspore/ccsrc/operator/ops.h index 8b63c876e..26f82d455 100755 --- a/mindspore/ccsrc/operator/ops.h +++ b/mindspore/ccsrc/operator/ops.h @@ -172,6 +172,7 @@ extern const PrimitivePtr kPrimSquare; extern const PrimitivePtr kPrimEqual; extern const PrimitivePtr kPrimLess; extern const PrimitivePtr kPrimLessEqual; +extern const PrimitivePtr kPrimCumSum; // NN extern const PrimitivePtr kPrimFlatten; diff --git a/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc b/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc index bb4a710d4..b5807b792 100644 --- a/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc +++ b/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc @@ -40,6 +40,7 @@ ConstInputToAttrInfoRegistry::ConstInputToAttrInfoRegistry() { Register(prim::kPrimUnsortedSegmentSum->name(), {2}); Register(prim::kPrimOneHot->name(), {1}); Register(prim::kPrimConcat->name(), {0}); + Register(prim::kPrimCumSum->name(), {1}); Register(kUnsortedSegmentProdOpName, {2}); Register(kUnsortedSegmentMinOpName, {2}); Register(kSimpleMeanGradOpName, {1}); @@ -60,7 +61,6 @@ ConstInputToAttrInfoRegistry::ConstInputToAttrInfoRegistry() { Register(kResizeNearestNeighborV2OpName, {1}); Register(kResizeNearestNeighborV2GradOpName, {1}); Register(kApplyRMSPropOpname, {4, 5, 6}); - Register(kCumsumOpName, {1}); Register(kResizeBilinearV2OpName, {1}); Register(kReduceProdOpName, {1}); Register(kCumprodOpName, {1}); diff --git a/mindspore/ops/_op_impl/tbe/__init__.py b/mindspore/ops/_op_impl/tbe/__init__.py index 99aff51b5..4da3edde2 100644 --- a/mindspore/ops/_op_impl/tbe/__init__.py +++ b/mindspore/ops/_op_impl/tbe/__init__.py @@ -191,3 +191,6 @@ from .prelu import _prelu_tbe from .prelu_grad import _prelu_grad_tbe from .binary_cross_entropy import _binary_cross_entropy_tbe from .binary_cross_entropy_grad import _binary_cross_entropy_grad_tbe +from .sin import _sin_tbe +from .cos import _cos_tbe +from .cum_sum import _cum_sum_tbe diff --git a/mindspore/ops/_op_impl/tbe/cos.py b/mindspore/ops/_op_impl/tbe/cos.py new file mode 100644 index 000000000..ecb106210 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/cos.py @@ -0,0 +1,37 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Cos op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +cos_op_info = TBERegOp("Cos") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("cos.so") \ + .compute_cost(10) \ + .kernel_name("cos") \ + .partial_flag(True) \ + .op_pattern("formatAgnostic") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() + + +@op_info_register(cos_op_info) +def _cos_tbe(): + """Cos TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/cum_sum.py b/mindspore/ops/_op_impl/tbe/cum_sum.py new file mode 100644 index 000000000..587a63074 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/cum_sum.py @@ -0,0 +1,42 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""CumSum op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +cum_sum_op_info = TBERegOp("CumSum") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("cumsum_d.so") \ + .compute_cost(10) \ + .kernel_name("cumsum_d") \ + .partial_flag(True) \ + .attr("axis", "optional", "int", "all", "0") \ + .attr("exclusive", "optional", "bool", "true,false", "fales") \ + .attr("reverse", "optional", "bool", "true,false", "false") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .get_op_info() + + +@op_info_register(cum_sum_op_info) +def _cum_sum_tbe(): + """CumSum TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/sin.py b/mindspore/ops/_op_impl/tbe/sin.py new file mode 100644 index 000000000..187c0f0f3 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/sin.py @@ -0,0 +1,37 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Sin op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +sin_op_info = TBERegOp("Sin") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("sin.so") \ + .compute_cost(10) \ + .kernel_name("sin") \ + .partial_flag(True) \ + .op_pattern("formatAgnostic") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() + + +@op_info_register(sin_op_info) +def _sin_tbe(): + """Sin TBE register""" + return diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index de884259c..43237efb0 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -504,10 +504,9 @@ test_case_math_ops = [ 'desc_inputs': [[4]], 'desc_bprop': [[4]]}), ('CumSum', { - 'block': P.CumSum(), - 'desc_const': [0], - 'desc_inputs': [Tensor(np.array([[3, 4], [1, 6]]).astype(np.float16))], - 'desc_bprop': [Tensor(np.array([[3, 4], [4, 10]]).astype(np.float16))]}), + 'block': CumSumNet(), + 'desc_inputs': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))], + 'desc_bprop': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))]}), ('ReduceSum_3', { 'block': P.ReduceSum(), 'desc_const': [0], @@ -579,6 +578,10 @@ test_case_math_ops = [ 'desc_inputs': [Tensor(np.array([0, 1, 4, 5]).astype(np.float32)), Tensor(np.array([1, 1, 3, 7]).astype(np.float32))], 'skip': ['backward']}), + ('Cos', { + 'block': P.Cos(), + 'desc_inputs': [[2, 3]], + 'desc_bprop': [[2, 3]]}), ] test_case_nn_ops = [ @@ -885,12 +888,6 @@ test_case_nn_ops = [ 'desc_inputs': [Tensor(np.array([[128, 32, 32, 64], [128, 32, 32, 64]]).astype(np.float16))], 'desc_bprop': [Tensor(np.array([[128, 32, 32, 64], [128, 32, 32, 64]]).astype(np.float16))], 'skip': ['backward']}), - ('CumSumNet', { - 'block': CumSumNet(), - 'desc_const': [0], - 'desc_inputs': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float16))], - 'desc_bprop': [ - Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float16))]}), ('OneHot', { 'block': P.OneHot(), 'desc_const': [3, Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32)], -- GitLab