diff --git a/paddle/fluid/operators/math/complex_functors.h b/paddle/fluid/operators/math/complex_functors.h index 2e9e72eac12aaabe0ac658a2f1c8711267e75936..0e8aed40f6e16a6bd5395bdeadd49b80a132ae6f 100644 --- a/paddle/fluid/operators/math/complex_functors.h +++ b/paddle/fluid/operators/math/complex_functors.h @@ -144,7 +144,7 @@ struct AbsFunctor>> { : input_(input), output_(output), numel_(numel) {} HOSTDEVICE void operator()(int64_t idx) const { - output_[idx] = abs(input_[idx]); + output_[idx] = std::abs(input_[idx]); } const T* input_; @@ -162,7 +162,7 @@ struct AbsGradFunctor { if (x_[idx] == T(0)) { output_[idx] = T(0); } else { - output_[idx] = T(dout_[idx]) * (x_[idx] / T(abs(x_[idx]))); + output_[idx] = T(dout_[idx]) * (x_[idx] / T(std::abs(x_[idx]))); } } @@ -172,6 +172,48 @@ struct AbsGradFunctor { int64_t numel_; }; +template <> +struct AbsGradFunctor { + AbsGradFunctor(const float* dout, const paddle::platform::complex64* x, + paddle::platform::complex64* output, int64_t numel) + : dout_(dout), x_(x), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + if (x_[idx] == paddle::platform::complex64(0)) { + output_[idx] = paddle::platform::complex64(0); + } else { + output_[idx] = paddle::platform::complex64(dout_[idx]) * + (x_[idx] / paddle::platform::complex64(abs(x_[idx]))); + } + } + + const float* dout_; + const paddle::platform::complex64* x_; + paddle::platform::complex64* output_; + int64_t numel_; +}; + +template <> +struct AbsGradFunctor { + AbsGradFunctor(const double* dout, const paddle::platform::complex128* x, + paddle::platform::complex128* output, int64_t numel) + : dout_(dout), x_(x), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + if (x_[idx] == paddle::platform::complex128(0)) { + output_[idx] = paddle::platform::complex128(0); + } else { + output_[idx] = paddle::platform::complex128(dout_[idx]) * + (x_[idx] / paddle::platform::complex128(abs(x_[idx]))); + } + } + + const double* dout_; + const paddle::platform::complex128* x_; + paddle::platform::complex128* output_; + int64_t numel_; +}; + template struct AbsGradGradFunctor { AbsGradGradFunctor(const T* ddx, const T* x, T* output, int64_t numel) @@ -181,7 +223,7 @@ struct AbsGradGradFunctor { if (x_[idx] == T(0)) { output_[idx] = T(0); } else { - output_[idx] = T(ddx_[idx]) * x_[idx] / T(abs(x_[idx])); + output_[idx] = T(ddx_[idx]) * x_[idx] / T(std::abs(x_[idx])); } } @@ -191,6 +233,49 @@ struct AbsGradGradFunctor { int64_t numel_; }; +template <> +struct AbsGradGradFunctor { + AbsGradGradFunctor(const paddle::platform::complex128* ddx, + const paddle::platform::complex128* x, + paddle::platform::complex128* output, int64_t numel) + : ddx_(ddx), x_(x), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + if (x_[idx] == paddle::platform::complex128(0)) { + output_[idx] = paddle::platform::complex128(0); + } else { + output_[idx] = paddle::platform::complex128(ddx_[idx]) * x_[idx] / + paddle::platform::complex128(abs(x_[idx])); + } + } + + const paddle::platform::complex128* ddx_; + const paddle::platform::complex128* x_; + paddle::platform::complex128* output_; + int64_t numel_; +}; + +template <> +struct AbsGradGradFunctor { + AbsGradGradFunctor(const paddle::platform::complex64* ddx, + const paddle::platform::complex64* x, + paddle::platform::complex64* output, int64_t numel) + : ddx_(ddx), x_(x), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + if (x_[idx] == paddle::platform::complex64(0)) { + output_[idx] = paddle::platform::complex64(0); + } else { + output_[idx] = paddle::platform::complex64(ddx_[idx]) * x_[idx] / + paddle::platform::complex64(abs(x_[idx])); + } + } + + const paddle::platform::complex64* ddx_; + const paddle::platform::complex64* x_; + paddle::platform::complex64* output_; + int64_t numel_; +}; template struct RealToComplexFunctor; diff --git a/paddle/fluid/platform/float16.h b/paddle/fluid/platform/float16.h index 6f0b44f6af60298cca1a65445ef77ba6b1810396..d4b308e6bc54137259979ac3d85cec634f249c70 100644 --- a/paddle/fluid/platform/float16.h +++ b/paddle/fluid/platform/float16.h @@ -1013,6 +1013,11 @@ struct numeric_limits { } }; +HOSTDEVICE inline paddle::platform::float16 abs( + const paddle::platform::float16& a) { + return paddle::platform::abs(a); +} + } // namespace std namespace Eigen { diff --git a/python/paddle/fluid/tests/unittests/test_complex_abs.py b/python/paddle/fluid/tests/unittests/test_complex_abs.py index f9bce91e46d91057c3b879d32c43fac21b1482a2..d049eaaf506e594565a93de54ea8ff96fba55f30 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_abs.py +++ b/python/paddle/fluid/tests/unittests/test_complex_abs.py @@ -18,6 +18,7 @@ import unittest import numpy as np import paddle +import paddle.fluid.dygraph as dg from op_test import OpTest @@ -85,5 +86,52 @@ class TestComplexAbsOpZeroValues(OpTest): user_defined_grad_outputs=[self.grad_out]) +class TestAbs(unittest.TestCase): + def setUp(self): + self._dtypes = ["float32", "float64"] + self._places = [paddle.CPUPlace()] + if paddle.is_compiled_with_cuda(): + self._places.append(paddle.CUDAPlace(0)) + + def test_all_positive(self): + for dtype in self._dtypes: + x = 1 + 10 * np.random.random([13, 3, 3]).astype(dtype) + for place in self._places: + with dg.guard(place): + y = paddle.abs(paddle.to_tensor(x)) + self.assertTrue(np.allclose(np.abs(x), y.numpy())) + + +class TestRealAbsOp(OpTest): + def setUp(self): + paddle.enable_static() + self.op_type = "abs" + self.dtype = np.float64 + self.shape = (2, 3, 4, 5) + self.init_input_output() + self.init_grad_input_output() + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x)} + self.outputs = {'Out': self.out} + + def init_input_output(self): + self.x = 1 + np.random.random(self.shape).astype(self.dtype) + self.out = np.abs(self.x) + + def init_grad_input_output(self): + self.grad_out = np.ones(self.shape, self.dtype) + self.grad_x = self.grad_out * (self.x / np.abs(self.x)) + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out]) + + if __name__ == '__main__': unittest.main()