未验证 提交 70cee22f 编写于 作者: C cnn 提交者: GitHub

New features, add sinh and cosh op, test=develop (#25495)

* New features, add sinh and cosh op, test=develop

* remove duplicate test function and remove out paramters, test=develop

* Add out paramters temporary, remove later. test=develop

* remove out args, PR 25570, test=develop

* remove TestParameter, test=developx

* add test api for static dygraph, test=develop

* add backword unittests for sinh and cosh, test=develop
上级 2f95e663
...@@ -250,6 +250,20 @@ $$out = sin(x)$$ ...@@ -250,6 +250,20 @@ $$out = sin(x)$$
)DOC"; )DOC";
UNUSED constexpr char SinhDoc[] = R"DOC(
Sinh Activation Operator.
$$out = sinh(x)$$
)DOC";
UNUSED constexpr char CoshDoc[] = R"DOC(
Cosh Activation Operator.
$$out = cosh(x)$$
)DOC";
UNUSED constexpr char RoundDoc[] = R"DOC( UNUSED constexpr char RoundDoc[] = R"DOC(
The OP rounds the values in the input to the nearest integer value. The OP rounds the values in the input to the nearest integer value.
...@@ -642,6 +656,8 @@ REGISTER_ACTIVATION_OP_MAKER(Ceil, CeilDoc); ...@@ -642,6 +656,8 @@ REGISTER_ACTIVATION_OP_MAKER(Ceil, CeilDoc);
REGISTER_ACTIVATION_OP_MAKER(Floor, FloorDoc); REGISTER_ACTIVATION_OP_MAKER(Floor, FloorDoc);
REGISTER_ACTIVATION_OP_MAKER(Cos, CosDoc); REGISTER_ACTIVATION_OP_MAKER(Cos, CosDoc);
REGISTER_ACTIVATION_OP_MAKER(Sin, SinDoc); REGISTER_ACTIVATION_OP_MAKER(Sin, SinDoc);
REGISTER_ACTIVATION_OP_MAKER(Sinh, SinhDoc);
REGISTER_ACTIVATION_OP_MAKER(Cosh, CoshDoc);
REGISTER_ACTIVATION_OP_MAKER(Round, RoundDoc); REGISTER_ACTIVATION_OP_MAKER(Round, RoundDoc);
REGISTER_ACTIVATION_OP_MAKER(Reciprocal, ReciprocalDoc); REGISTER_ACTIVATION_OP_MAKER(Reciprocal, ReciprocalDoc);
REGISTER_ACTIVATION_OP_MAKER(Log, LogDoc); REGISTER_ACTIVATION_OP_MAKER(Log, LogDoc);
......
...@@ -584,6 +584,72 @@ struct SinFunctor : public BaseActivationFunctor<T> { ...@@ -584,6 +584,72 @@ struct SinFunctor : public BaseActivationFunctor<T> {
} }
}; };
template <typename T>
struct Sinh {
HOSTDEVICE T operator()(const T& val) const { return sinh(val); }
};
template <>
struct Sinh<platform::float16> {
HOSTDEVICE platform::float16 operator()(const platform::float16& val) const {
return platform::float16(sinhf(static_cast<float>(val)));
}
};
template <typename T>
struct Cosh {
HOSTDEVICE T operator()(const T& val) const { return cosh(val); }
};
template <>
struct Cosh<platform::float16> {
HOSTDEVICE platform::float16 operator()(const platform::float16& val) const {
return platform::float16(coshf(static_cast<float>(val)));
}
};
// sinh(x) = sinh(x)
template <typename T>
struct SinhFunctor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) = x.unaryExpr(Sinh<T>());
}
};
// cosh(x) = cosh(x)
template <typename T>
struct CoshFunctor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) = x.unaryExpr(Cosh<T>());
}
};
// sinh'(x) = cosh(x)
template <typename T>
struct SinhGradFunctor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out, typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
dx.device(d) = dout * x.unaryExpr(Cosh<T>());
}
static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
};
// cosh'(x) = sinh(x)
template <typename T>
struct CoshGradFunctor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out, typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
dx.device(d) = dout * x.unaryExpr(Sinh<T>());
}
static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
};
template <typename T> template <typename T>
struct Acos { struct Acos {
HOSTDEVICE T operator()(const T& val) const { return acos(val); } HOSTDEVICE T operator()(const T& val) const { return acos(val); }
...@@ -1752,6 +1818,8 @@ class PowGradKernel ...@@ -1752,6 +1818,8 @@ class PowGradKernel
__macro(acos, Acos, AcosFunctor, AcosGradFunctor); \ __macro(acos, Acos, AcosFunctor, AcosGradFunctor); \
__macro(sin, Sin, SinFunctor, SinGradFunctor); \ __macro(sin, Sin, SinFunctor, SinGradFunctor); \
__macro(asin, Asin, AsinFunctor, AsinGradFunctor); \ __macro(asin, Asin, AsinFunctor, AsinGradFunctor); \
__macro(sinh, Sinh, SinhFunctor, SinhGradFunctor); \
__macro(cosh, Cosh, CoshFunctor, CoshGradFunctor); \
__macro(round, Round, RoundFunctor, ZeroGradFunctor); \ __macro(round, Round, RoundFunctor, ZeroGradFunctor); \
__macro(reciprocal, Reciprocal, ReciprocalFunctor, ReciprocalGradFunctor); \ __macro(reciprocal, Reciprocal, ReciprocalFunctor, ReciprocalGradFunctor); \
__macro(log, Log, LogFunctor, LogGradFunctor); \ __macro(log, Log, LogFunctor, LogGradFunctor); \
......
...@@ -132,6 +132,7 @@ from .tensor.math import asin #DEFINE_ALIAS ...@@ -132,6 +132,7 @@ from .tensor.math import asin #DEFINE_ALIAS
from .tensor.math import atan #DEFINE_ALIAS from .tensor.math import atan #DEFINE_ALIAS
from .tensor.math import ceil #DEFINE_ALIAS from .tensor.math import ceil #DEFINE_ALIAS
from .tensor.math import cos #DEFINE_ALIAS from .tensor.math import cos #DEFINE_ALIAS
from .tensor.math import cosh #DEFINE_ALIAS
from .tensor.math import cumsum #DEFINE_ALIAS from .tensor.math import cumsum #DEFINE_ALIAS
from .tensor.math import elementwise_add #DEFINE_ALIAS from .tensor.math import elementwise_add #DEFINE_ALIAS
from .tensor.math import elementwise_div #DEFINE_ALIAS from .tensor.math import elementwise_div #DEFINE_ALIAS
...@@ -157,6 +158,7 @@ from .tensor.math import rsqrt #DEFINE_ALIAS ...@@ -157,6 +158,7 @@ from .tensor.math import rsqrt #DEFINE_ALIAS
from .tensor.math import scale #DEFINE_ALIAS from .tensor.math import scale #DEFINE_ALIAS
from .tensor.math import sign #DEFINE_ALIAS from .tensor.math import sign #DEFINE_ALIAS
from .tensor.math import sin #DEFINE_ALIAS from .tensor.math import sin #DEFINE_ALIAS
from .tensor.math import sinh #DEFINE_ALIAS
from .tensor.math import sqrt #DEFINE_ALIAS from .tensor.math import sqrt #DEFINE_ALIAS
from .tensor.math import square #DEFINE_ALIAS from .tensor.math import square #DEFINE_ALIAS
from .tensor.math import stanh #DEFINE_ALIAS from .tensor.math import stanh #DEFINE_ALIAS
......
...@@ -35,6 +35,8 @@ __activations_noattr__ = [ ...@@ -35,6 +35,8 @@ __activations_noattr__ = [
'acos', 'acos',
'asin', 'asin',
'sin', 'sin',
'sinh',
'cosh',
'round', 'round',
'reciprocal', 'reciprocal',
'square', 'square',
......
...@@ -183,6 +183,148 @@ class TestAtan(TestActivation, TestParameter): ...@@ -183,6 +183,148 @@ class TestAtan(TestActivation, TestParameter):
self.assertEqual(z, z_expected) self.assertEqual(z, z_expected)
class TestSinh(TestActivation):
def setUp(self):
self.op_type = "sinh"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.sinh(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
def test_dygraph(self):
with fluid.dygraph.guard():
np_x = np.array([0.1])
x = fluid.dygraph.to_variable(np_x)
z = fluid.layers.sinh(x).numpy()
z_expected = np.sinh(np_x)
self.assertTrue(np.allclose(z, z_expected))
def test_api(self):
test_data_shape = [11, 17]
with fluid.program_guard(fluid.Program(), fluid.Program()):
input_x = np.random.uniform(0.1, 1,
test_data_shape).astype("float32")
data_x = fluid.layers.data(
name="data_x",
shape=test_data_shape,
append_batch_size=False,
dtype="float32")
pd_sinh_out = fluid.layers.sinh(data_x)
exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(fluid.default_startup_program())
np_sinh_res = exe.run(fluid.default_main_program(),
feed={"data_x": input_x},
fetch_list=[pd_sinh_out])
expected_res = np.sinh(input_x)
self.assertTrue(np.allclose(np_sinh_res, expected_res))
def test_backward(self):
test_data_shape = [11, 17]
with fluid.dygraph.guard():
input_x = np.random.uniform(0.1, 1,
test_data_shape).astype("float32")
var = fluid.dygraph.to_variable(input_x)
var.stop_gradient = False
loss = fluid.layers.sinh(var)
loss.backward()
grad_var = var.gradient()
self.assertEqual(grad_var.shape, input_x.shape)
class TestSinhOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.sinh, 1)
# The input dtype must be float16, float32, float64.
x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, fluid.layers.sinh, x_int32)
# support the input dtype is float16
x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
fluid.layers.sinh(x_fp16)
class TestCosh(TestActivation):
def setUp(self):
self.op_type = "cosh"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.cosh(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
def test_dygraph(self):
with fluid.dygraph.guard():
np_x = np.array([0.1])
x = fluid.dygraph.to_variable(np_x)
z = fluid.layers.cosh(x).numpy()
z_expected = np.cosh(np_x)
self.assertTrue(np.allclose(z, z_expected))
def test_api(self):
test_data_shape = [11, 17]
with fluid.program_guard(fluid.Program(), fluid.Program()):
input_x = np.random.uniform(0.1, 1,
test_data_shape).astype("float32")
data_x = fluid.layers.data(
name="data_x",
shape=test_data_shape,
append_batch_size=False,
dtype="float32")
pd_cosh_out = paddle.cosh(data_x)
exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(fluid.default_startup_program())
np_cosh_res = exe.run(fluid.default_main_program(),
feed={"data_x": input_x},
fetch_list=[pd_cosh_out])
expected_res = np.cosh(input_x)
self.assertTrue(np.allclose(np_cosh_res, expected_res))
def test_backward(self):
test_data_shape = [11, 17]
with fluid.dygraph.guard():
input_x = np.random.uniform(0.1, 1,
test_data_shape).astype("float32")
var = fluid.dygraph.to_variable(input_x)
var.stop_gradient = False
loss = fluid.layers.cosh(var)
loss.backward()
grad_var = var.gradient()
self.assertEqual(grad_var.shape, input_x.shape)
class TestCoshOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.cosh, 1)
# The input dtype must be float16, float32, float64.
x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, fluid.layers.cosh, x_int32)
# support the input dtype is float16
x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
fluid.layers.cosh(x_fp16)
class TestTanhShrink(TestActivation): class TestTanhShrink(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "tanh_shrink" self.op_type = "tanh_shrink"
...@@ -1204,8 +1346,10 @@ create_test_act_fp16_class(TestAbs) ...@@ -1204,8 +1346,10 @@ create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False) create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False) create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85) create_test_act_fp16_class(TestCos, grad_atol=0.85)
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
create_test_act_fp16_class(TestAcos, grad_atol=0.85) create_test_act_fp16_class(TestAcos, grad_atol=0.85)
create_test_act_fp16_class(TestSin) create_test_act_fp16_class(TestSin)
create_test_act_fp16_class(TestSinh)
create_test_act_fp16_class(TestAsin) create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan) create_test_act_fp16_class(TestAtan)
create_test_act_fp16_class(TestRound, grad_check=False) create_test_act_fp16_class(TestRound, grad_check=False)
......
...@@ -105,6 +105,7 @@ from .math import asin #DEFINE_ALIAS ...@@ -105,6 +105,7 @@ from .math import asin #DEFINE_ALIAS
from .math import atan #DEFINE_ALIAS from .math import atan #DEFINE_ALIAS
from .math import ceil #DEFINE_ALIAS from .math import ceil #DEFINE_ALIAS
from .math import cos #DEFINE_ALIAS from .math import cos #DEFINE_ALIAS
from .math import cosh #DEFINE_ALIAS
from .math import cumsum #DEFINE_ALIAS from .math import cumsum #DEFINE_ALIAS
from .math import elementwise_add #DEFINE_ALIAS from .math import elementwise_add #DEFINE_ALIAS
from .math import elementwise_div #DEFINE_ALIAS from .math import elementwise_div #DEFINE_ALIAS
...@@ -130,6 +131,7 @@ from .math import rsqrt #DEFINE_ALIAS ...@@ -130,6 +131,7 @@ from .math import rsqrt #DEFINE_ALIAS
from .math import scale #DEFINE_ALIAS from .math import scale #DEFINE_ALIAS
from .math import sign #DEFINE_ALIAS from .math import sign #DEFINE_ALIAS
from .math import sin #DEFINE_ALIAS from .math import sin #DEFINE_ALIAS
from .math import sinh #DEFINE_ALIAS
from .math import sqrt #DEFINE_ALIAS from .math import sqrt #DEFINE_ALIAS
from .math import square #DEFINE_ALIAS from .math import square #DEFINE_ALIAS
from .math import stanh #DEFINE_ALIAS from .math import stanh #DEFINE_ALIAS
......
...@@ -31,6 +31,8 @@ from ..fluid.layers import acos #DEFINE_ALIAS ...@@ -31,6 +31,8 @@ from ..fluid.layers import acos #DEFINE_ALIAS
from ..fluid.layers import asin #DEFINE_ALIAS from ..fluid.layers import asin #DEFINE_ALIAS
from ..fluid.layers import ceil #DEFINE_ALIAS from ..fluid.layers import ceil #DEFINE_ALIAS
from ..fluid.layers import cos #DEFINE_ALIAS from ..fluid.layers import cos #DEFINE_ALIAS
from ..fluid.layers import sinh #DEFINE_ALIAS
from ..fluid.layers import cosh #DEFINE_ALIAS
from ..fluid.layers import cumsum #DEFINE_ALIAS from ..fluid.layers import cumsum #DEFINE_ALIAS
from ..fluid.layers import elementwise_add #DEFINE_ALIAS from ..fluid.layers import elementwise_add #DEFINE_ALIAS
from ..fluid.layers import elementwise_div #DEFINE_ALIAS from ..fluid.layers import elementwise_div #DEFINE_ALIAS
...@@ -69,6 +71,7 @@ __all__ = [ ...@@ -69,6 +71,7 @@ __all__ = [
'atan', 'atan',
'ceil', 'ceil',
'cos', 'cos',
'cosh',
'cumsum', 'cumsum',
'elementwise_add', 'elementwise_add',
'elementwise_div', 'elementwise_div',
...@@ -95,6 +98,7 @@ __all__ = [ ...@@ -95,6 +98,7 @@ __all__ = [
'scale', 'scale',
'sign', 'sign',
'sin', 'sin',
'sinh',
'sqrt', 'sqrt',
'square', 'square',
'stanh', 'stanh',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册