diff --git a/paddle/gserver/activations/ActivationFunction.cpp b/paddle/gserver/activations/ActivationFunction.cpp index 8b7b2e9b65898950e036ebc023cd28990cef303f..f5a41b66bf09a4abc5ae7b64f227ca52461408f5 100644 --- a/paddle/gserver/activations/ActivationFunction.cpp +++ b/paddle/gserver/activations/ActivationFunction.cpp @@ -212,6 +212,37 @@ Error __must_check backward(Argument& act) { } END_DEFINE_ACTIVATION(sequence_softmax) +/* + * @brief SoftSign Activation. + * \f[ + * f(z) = \frac{z}{1 + |z|} + * \f] + */ +BEGIN_DEFINE_ACTIVATION(softsign) +private: +MatrixPtr denominator_; + +Error __must_check forward(Argument& act) { + size_t height = act.value->getHeight(); + size_t width = act.value->getWidth(); + Matrix::resizeOrCreate( + denominator_, height, width, false, useGpu(act.deviceId)); + denominator_->assign(*act.value); + denominator_->abs2(); + denominator_->add(1.); + + act.value->dotDiv(*act.value, *denominator_); + return Error(); +} + +Error __must_check backward(Argument& act) { + denominator_->square2(); + denominator_->scalarDiv(*denominator_, 1.); + act.grad->dotMul(*act.grad, *denominator_); + return Error(); +} +END_DEFINE_ACTIVATION(softsign) + /** * @brief Relu Activation. * forward. y = max(0, z) diff --git a/python/paddle/trainer_config_helpers/activations.py b/python/paddle/trainer_config_helpers/activations.py index c749fa827fea4a808ab715dcb3442aa24d06a4d2..00efc01c0592107314f5b23c951706d039d49a88 100644 --- a/python/paddle/trainer_config_helpers/activations.py +++ b/python/paddle/trainer_config_helpers/activations.py @@ -17,7 +17,8 @@ __all__ = [ "IdentityActivation", "LinearActivation", 'SequenceSoftmaxActivation', 'ExpActivation', "ReluActivation", "BReluActivation", "SoftReluActivation", "STanhActivation", "AbsActivation", "SquareActivation", "BaseActivation", - "LogActivation", "SqrtActivation", "ReciprocalActivation" + "LogActivation", "SqrtActivation", "ReciprocalActivation", + "SoftSignActivation" ] @@ -243,8 +244,20 @@ class ReciprocalActivation(BaseActivation): Reciprocal Activation. .. math:: - f(z) = 1/z + f(z)=\\frac{1}{z} """ def __init__(self): BaseActivation.__init__(self, 'reciprocal', False) + + +class SoftSignActivation(BaseActivation): + """ + SoftSign Activation. + + .. math:: + f(z)=\\frac{z}{1 + |z|} + """ + + def __init__(self): + BaseActivation.__init__(self, 'softsign', False) diff --git a/python/paddle/v2/fluid/tests/test_sequence_slice_op.py b/python/paddle/v2/fluid/tests/test_sequence_slice_op.py old mode 100755 new mode 100644 index 4351d8e6d77c16e0012f9ae163b118fdbb793a8f..ccd9a05343b0c4aa05b258959665c0662f271512 --- a/python/paddle/v2/fluid/tests/test_sequence_slice_op.py +++ b/python/paddle/v2/fluid/tests/test_sequence_slice_op.py @@ -3,6 +3,7 @@ import numpy as np import sys from op_test import OpTest + class TestSequenceSliceOp(OpTest): def set_data(self): self.init_test_case() @@ -13,12 +14,12 @@ class TestSequenceSliceOp(OpTest): length = np.array(self.length).astype("int64") self.inputs = {'X': (x, lod), 'Offset': offset, 'Length': length} - outs = [] #np.zeros((100, 3, 2)).astype('float32') + outs = [] #np.zeros((100, 3, 2)).astype('float32') out_lod = [[0]] out_lod_offset = 0 for i in range(len(offset)): - sub_x = x[lod[0][i] + offset[i, 0]: lod[0] - [i] + offset[i, 0] + length[i, 0], :] + sub_x = x[lod[0][i] + offset[i, 0]:lod[0][i] + offset[i, 0] + + length[i, 0], :] out_lod_offset = out_lod_offset + len(sub_x) outs.append(sub_x) out_lod[0].append(out_lod_offset) @@ -41,5 +42,6 @@ class TestSequenceSliceOp(OpTest): def test_check_grad(self): self.check_grad(['X'], 'Out') + if __name__ == '__main__': unittest.main()