提交 95cbbd77 编写于 作者: S sweetsky0901

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into my_maxout_op

...@@ -212,6 +212,37 @@ Error __must_check backward(Argument& act) { ...@@ -212,6 +212,37 @@ Error __must_check backward(Argument& act) {
} }
END_DEFINE_ACTIVATION(sequence_softmax) END_DEFINE_ACTIVATION(sequence_softmax)
/*
* @brief SoftSign Activation.
* \f[
* f(z) = \frac{z}{1 + |z|}
* \f]
*/
BEGIN_DEFINE_ACTIVATION(softsign)
private:
MatrixPtr denominator_;
Error __must_check forward(Argument& act) {
size_t height = act.value->getHeight();
size_t width = act.value->getWidth();
Matrix::resizeOrCreate(
denominator_, height, width, false, useGpu(act.deviceId));
denominator_->assign(*act.value);
denominator_->abs2();
denominator_->add(1.);
act.value->dotDiv(*act.value, *denominator_);
return Error();
}
Error __must_check backward(Argument& act) {
denominator_->square2();
denominator_->scalarDiv(*denominator_, 1.);
act.grad->dotMul(*act.grad, *denominator_);
return Error();
}
END_DEFINE_ACTIVATION(softsign)
/** /**
* @brief Relu Activation. * @brief Relu Activation.
* forward. y = max(0, z) * forward. y = max(0, z)
......
...@@ -17,7 +17,8 @@ __all__ = [ ...@@ -17,7 +17,8 @@ __all__ = [
"IdentityActivation", "LinearActivation", 'SequenceSoftmaxActivation', "IdentityActivation", "LinearActivation", 'SequenceSoftmaxActivation',
'ExpActivation', "ReluActivation", "BReluActivation", "SoftReluActivation", 'ExpActivation', "ReluActivation", "BReluActivation", "SoftReluActivation",
"STanhActivation", "AbsActivation", "SquareActivation", "BaseActivation", "STanhActivation", "AbsActivation", "SquareActivation", "BaseActivation",
"LogActivation", "SqrtActivation", "ReciprocalActivation" "LogActivation", "SqrtActivation", "ReciprocalActivation",
"SoftSignActivation"
] ]
...@@ -243,8 +244,20 @@ class ReciprocalActivation(BaseActivation): ...@@ -243,8 +244,20 @@ class ReciprocalActivation(BaseActivation):
Reciprocal Activation. Reciprocal Activation.
.. math:: .. math::
f(z) = 1/z f(z)=\\frac{1}{z}
""" """
def __init__(self): def __init__(self):
BaseActivation.__init__(self, 'reciprocal', False) BaseActivation.__init__(self, 'reciprocal', False)
class SoftSignActivation(BaseActivation):
"""
SoftSign Activation.
.. math::
f(z)=\\frac{z}{1 + |z|}
"""
def __init__(self):
BaseActivation.__init__(self, 'softsign', False)
...@@ -3,6 +3,7 @@ import numpy as np ...@@ -3,6 +3,7 @@ import numpy as np
import sys import sys
from op_test import OpTest from op_test import OpTest
class TestSequenceSliceOp(OpTest): class TestSequenceSliceOp(OpTest):
def set_data(self): def set_data(self):
self.init_test_case() self.init_test_case()
...@@ -13,12 +14,12 @@ class TestSequenceSliceOp(OpTest): ...@@ -13,12 +14,12 @@ class TestSequenceSliceOp(OpTest):
length = np.array(self.length).astype("int64") length = np.array(self.length).astype("int64")
self.inputs = {'X': (x, lod), 'Offset': offset, 'Length': length} self.inputs = {'X': (x, lod), 'Offset': offset, 'Length': length}
outs = [] #np.zeros((100, 3, 2)).astype('float32') outs = [] #np.zeros((100, 3, 2)).astype('float32')
out_lod = [[0]] out_lod = [[0]]
out_lod_offset = 0 out_lod_offset = 0
for i in range(len(offset)): for i in range(len(offset)):
sub_x = x[lod[0][i] + offset[i, 0]: lod[0] sub_x = x[lod[0][i] + offset[i, 0]:lod[0][i] + offset[i, 0] +
[i] + offset[i, 0] + length[i, 0], :] length[i, 0], :]
out_lod_offset = out_lod_offset + len(sub_x) out_lod_offset = out_lod_offset + len(sub_x)
outs.append(sub_x) outs.append(sub_x)
out_lod[0].append(out_lod_offset) out_lod[0].append(out_lod_offset)
...@@ -41,5 +42,6 @@ class TestSequenceSliceOp(OpTest): ...@@ -41,5 +42,6 @@ class TestSequenceSliceOp(OpTest):
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册