提交 2cd9649b 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!2810 Add operator adapting in ME for Softsign

Merge pull request !2810 from zhangzheng/softsign
......@@ -336,6 +336,21 @@ def get_bprop_softplus(self):
return bprop
@bprop_getters.register(P.Softsign)
def get_bprop_softsign(self):
"""Grad definition for `Softsign` operation."""
mul = P.Mul()
absolute = P.Abs()
div = P.Div()
square = P.Square()
def bprop(x, out, dout):
dx = mul(dout, div(1, square(1 + absolute(x))))
return (dx,)
return bprop
@bprop_getters.register(P.Tanh)
def get_bprop_tanh(self):
"""Grad definition for `Tanh` operation."""
......
......@@ -122,6 +122,7 @@ from .round import _round_tbe
from .tanh import _tanh_tbe
from .tanh_grad import _tanh_grad_tbe
from .softmax import _softmax_tbe
from .softsign import _softsign_tbe
from .softplus import _softplus_tbe
from .softplus_grad import _softplus_grad_tbe
from .softmax_grad_ext import _softmax_grad_ext_tbe
......
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Softsign op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
softsign_op_info = TBERegOp("Softsign") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("softsign.so") \
.compute_cost(10) \
.kernel_name("softsign") \
.partial_flag(True) \
.op_pattern("formatAgnostic") \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(softsign_op_info)
def _softsign_tbe():
"""Softsign TBE register"""
return
......@@ -68,7 +68,7 @@ from .nn_ops import (LSTM, SGD, Adam, SparseApplyAdam, SparseApplyLazyAdam, Appl
MaxPoolWithArgmax, OneHot, Pad, MirrorPad, PReLU, ReLU, ReLU6, ReLUV2, HSwish, HSigmoid,
ResizeBilinear, Sigmoid,
SigmoidCrossEntropyWithLogits,
SmoothL1Loss, Softmax, Softplus, LRN,
SmoothL1Loss, Softmax, Softsign, Softplus, LRN,
SoftmaxCrossEntropyWithLogits, ROIAlign,
SparseSoftmaxCrossEntropyWithLogits, Tanh,
TopK, BinaryCrossEntropy, SparseApplyAdagrad, LARSUpdate, ApplyFtrl, SparseApplyFtrl,
......@@ -115,6 +115,7 @@ __all__ = [
'SparseApplyLazyAdam',
'Softplus',
'Softmax',
'Softsign',
'LogSoftmax',
'SoftmaxCrossEntropyWithLogits',
'ROIAlign',
......
......@@ -224,6 +224,41 @@ class Softplus(PrimitiveWithInfer):
return input_x
class Softsign(PrimitiveWithInfer):
r"""
Softsign activation function.
The function is shown as follows:
.. math::
\text{output} = \frac{\text{input_x}}{1 + \abs{\text{input_x}}},
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32)
>>> softsign = P.Softsign()
>>> softsign(input_x)
[0. -0.5 0.6666667 0.9677419 -0.9677419]
"""
@prim_attr_register
def __init__(self):
"""init Softsign"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class ReLU(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) of input tensor element-wise.
......
......@@ -1376,6 +1376,10 @@ test_case_nn_ops = [
'block': P.Softmax(),
'desc_inputs': [[5, 5]],
'desc_bprop': [[5, 5]]}),
('Softsign', {
'block': P.Softsign(),
'desc_inputs': [[5, 5]],
'desc_bprop': [[5, 5]]}),
('DepthwiseConv2dNative_1', {
'block': P.DepthwiseConv2dNative(3, (3, 3), pad_mode="pad", pad=1, stride=2),
'desc_inputs': [[10, 32, 32, 32], [1, 32, 3, 3]],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册