提交 a36880f0 编写于 作者: L lihongkang

add vms for reducealld etc.

上级 14f9a6e3
......@@ -97,6 +97,9 @@ static std::map<string, string> tbe_func_adapter_map = {
{"cum_sum", "cumsum_d"},
{"apply_rms_prop", "apply_rms_prop_d"},
{"cum_prod", "cumprod_d"},
{"reduce_all", "reduce_all_d"},
{"sparse_apply_adagrad", "sparse_apply_adagrad_d"},
{"unsorted_segment_min", "unsorted_segment_min_d"},
{"reduce_prod", "reduce_prod_d"}};
void TbeAdapter::NormalizeFuncName(std::string *func_name) {
......
......@@ -42,8 +42,9 @@ ConstInputToAttrInfoRegistry::ConstInputToAttrInfoRegistry() {
Register(prim::kPrimConcat->name(), {0});
Register(prim::kPrimCumSum->name(), {1});
Register(prim::kPrimCumProd->name(), {1});
Register(prim::kPrimReduceAll->name(), {1});
Register(prim::kPrimUnsortedSegmentMin->name(), {2});
Register(kUnsortedSegmentProdOpName, {2});
Register(kUnsortedSegmentMinOpName, {2});
Register(kSimpleMeanGradOpName, {1});
Register(kMeanGradOpName, {1});
Register(kSliceOpName, {1, 2});
......
......@@ -205,3 +205,6 @@ from .space_to_batch_nd import _space_to_batch_nd_tbe
from .bitwise_and import bitwise_and_op_info
from .bitwise_or import bitwise_or_op_info
from .bitwise_xor import bitwise_xor_op_info
from .reduce_all import _reduce_all_tbe
from .sparse_apply_adagrad import _sparse_apply_adagrad_tbe
from .unsorted_segment_min import _unsorted_segment_min_tbe
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ReduceAll op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
reduce_all_op_info = TBERegOp("ReduceAll") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("reduce_all_d.so") \
.compute_cost(10) \
.kernel_name("reduce_all_d") \
.partial_flag(True) \
.attr("axis", "required", "listInt", "all") \
.attr("keep_dims", "optional", "bool", "all") \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \
.dtype_format(DataType.BOOL_FracZ, DataType.BOOL_FracZ) \
.get_op_info()
@op_info_register(reduce_all_op_info)
def _reduce_all_tbe():
"""ReduceAll TBE register"""
return
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""SparseApplyAdagrad op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
sparse_apply_adagrad_op_info = TBERegOp("SparseApplyAdagrad") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("sparse_apply_adagrad.so") \
.compute_cost(10) \
.kernel_name("sparse_apply_adagrad") \
.partial_flag(True) \
.attr("lr", "required", "float", "all") \
.attr("update_slots", "optional", "bool", "all") \
.attr("use_locking", "optional", "bool", "all") \
.input(0, "var", False, "required", "all") \
.input(1, "accum", False, "required", "all") \
.input(2, "grad", False, "required", "all") \
.input(3, "indices", False, "required", "all") \
.output(0, "var", False, "required", "all") \
.dtype_format(DataType.F32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW, DataType.I32_NCHW, DataType.F32_NCHW) \
.dtype_format(DataType.F32_NHWC, DataType.F32_NHWC, DataType.F32_NHWC, DataType.I32_NHWC, DataType.F32_NHWC) \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.I32_Default,
DataType.F32_Default) \
.get_op_info()
@op_info_register(sparse_apply_adagrad_op_info)
def _sparse_apply_adagrad_tbe():
"""SparseApplyAdagrad TBE register"""
return
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""UnsortedSegmentMin op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
unsorted_segment_min_op_info = TBERegOp("UnsortedSegmentMin") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("unsorted_segment_min_d.so") \
.compute_cost(10) \
.kernel_name("unsorted_segment_min_d") \
.partial_flag(True) \
.attr("num_segments", "required", "int", "all") \
.input(0, "data", False, "required", "all") \
.input(1, "segment_ids", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_5HD, DataType.I32_Default, DataType.F16_5HD) \
.dtype_format(DataType.F16_FracZ, DataType.I32_Default, DataType.F16_FracZ) \
.dtype_format(DataType.F16_C1HWNCoC0, DataType.I32_Default, DataType.F16_C1HWNCoC0) \
.dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_5HD, DataType.I32_Default, DataType.F32_5HD) \
.dtype_format(DataType.F32_FracZ, DataType.I32_Default, DataType.F32_FracZ) \
.dtype_format(DataType.F32_C1HWNCoC0, DataType.I32_Default, DataType.F32_C1HWNCoC0) \
.dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default) \
.dtype_format(DataType.I32_5HD, DataType.I32_Default, DataType.I32_5HD) \
.dtype_format(DataType.I32_FracZ, DataType.I32_Default, DataType.I32_FracZ) \
.dtype_format(DataType.I32_C1HWNCoC0, DataType.I32_Default, DataType.I32_C1HWNCoC0) \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
.get_op_info()
@op_info_register(unsorted_segment_min_op_info)
def _unsorted_segment_min_tbe():
"""UnsortedSegmentMin TBE register"""
return
......@@ -651,6 +651,11 @@ test_case_math_ops = [
'block': P.Cos(),
'desc_inputs': [[2, 3]],
'desc_bprop': [[2, 3]]}),
('ReduceAll', {
'block': P.ReduceAll(),
'desc_const': [1],
'desc_inputs': [Tensor(np.array([[True, False], [True, True]]))],
'desc_bprop': []}),
]
test_case_nn_ops = [
......@@ -1059,6 +1064,13 @@ test_case_nn_ops = [
Tensor([[-1.4, -0.7], [0.9, 0.7]], mstype.float16)],
'desc_bprop': [],
'skip': ['backward']}),
('SparseApplyAdagrad', {
'block': P.SparseApplyAdagrad(0.5),
'desc_inputs': [Tensor([[0.7, 0.2], [0.1, 0.07]], mstype.float32),
Tensor([[0.2, 0.2], [0.1, 0.4]], mstype.float32),
Tensor([[0.5, 0.4], [0.6, 0.1]], mstype.float32), Tensor([1, 1], mstype.int32)],
'desc_bprop': [Tensor([[0.7, 0.2], [0.1, 0.07]], mstype.float32)],
'skip': ['backward']}),
]
test_case_array_ops = [
......@@ -1266,6 +1278,12 @@ test_case_array_ops = [
'desc_inputs': [[4, 3, 1, 1]],
'desc_bprop': [[1, 3, 2, 1]],
}),
('UnsortedSegmentMin_1', {
'block': P.UnsortedSegmentMin(),
'desc_const': [2],
'desc_inputs': [Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32)),
Tensor(np.array([0, 1, 1]).astype(np.int32))],
'desc_bprop': [Tensor(np.array([[1, 2, 3], [4, 2, 1]]).astype(np.float32))]}),
]
test_case_other_ops = [
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册