未验证 提交 fb7fbc7a 编写于 作者: C chentianyu03 提交者: GitHub

fix abs bug and add abs test case (#30637)

* add abs test case

* use std::abs to fix abs bug

* fix the abs bug

* fix abs bug
上级 37926611
......@@ -144,7 +144,7 @@ struct AbsFunctor<T, NoComplex<T, Real<T>>> {
: input_(input), output_(output), numel_(numel) {}
HOSTDEVICE void operator()(int64_t idx) const {
output_[idx] = abs(input_[idx]);
output_[idx] = std::abs(input_[idx]);
}
const T* input_;
......@@ -162,7 +162,7 @@ struct AbsGradFunctor {
if (x_[idx] == T(0)) {
output_[idx] = T(0);
} else {
output_[idx] = T(dout_[idx]) * (x_[idx] / T(abs(x_[idx])));
output_[idx] = T(dout_[idx]) * (x_[idx] / T(std::abs(x_[idx])));
}
}
......@@ -172,6 +172,48 @@ struct AbsGradFunctor {
int64_t numel_;
};
template <>
struct AbsGradFunctor<paddle::platform::complex64> {
AbsGradFunctor(const float* dout, const paddle::platform::complex64* x,
paddle::platform::complex64* output, int64_t numel)
: dout_(dout), x_(x), output_(output), numel_(numel) {}
HOSTDEVICE void operator()(int64_t idx) const {
if (x_[idx] == paddle::platform::complex64(0)) {
output_[idx] = paddle::platform::complex64(0);
} else {
output_[idx] = paddle::platform::complex64(dout_[idx]) *
(x_[idx] / paddle::platform::complex64(abs(x_[idx])));
}
}
const float* dout_;
const paddle::platform::complex64* x_;
paddle::platform::complex64* output_;
int64_t numel_;
};
template <>
struct AbsGradFunctor<paddle::platform::complex128> {
AbsGradFunctor(const double* dout, const paddle::platform::complex128* x,
paddle::platform::complex128* output, int64_t numel)
: dout_(dout), x_(x), output_(output), numel_(numel) {}
HOSTDEVICE void operator()(int64_t idx) const {
if (x_[idx] == paddle::platform::complex128(0)) {
output_[idx] = paddle::platform::complex128(0);
} else {
output_[idx] = paddle::platform::complex128(dout_[idx]) *
(x_[idx] / paddle::platform::complex128(abs(x_[idx])));
}
}
const double* dout_;
const paddle::platform::complex128* x_;
paddle::platform::complex128* output_;
int64_t numel_;
};
template <typename T>
struct AbsGradGradFunctor {
AbsGradGradFunctor(const T* ddx, const T* x, T* output, int64_t numel)
......@@ -181,7 +223,7 @@ struct AbsGradGradFunctor {
if (x_[idx] == T(0)) {
output_[idx] = T(0);
} else {
output_[idx] = T(ddx_[idx]) * x_[idx] / T(abs(x_[idx]));
output_[idx] = T(ddx_[idx]) * x_[idx] / T(std::abs(x_[idx]));
}
}
......@@ -191,6 +233,49 @@ struct AbsGradGradFunctor {
int64_t numel_;
};
template <>
struct AbsGradGradFunctor<paddle::platform::complex128> {
AbsGradGradFunctor(const paddle::platform::complex128* ddx,
const paddle::platform::complex128* x,
paddle::platform::complex128* output, int64_t numel)
: ddx_(ddx), x_(x), output_(output), numel_(numel) {}
HOSTDEVICE void operator()(int64_t idx) const {
if (x_[idx] == paddle::platform::complex128(0)) {
output_[idx] = paddle::platform::complex128(0);
} else {
output_[idx] = paddle::platform::complex128(ddx_[idx]) * x_[idx] /
paddle::platform::complex128(abs(x_[idx]));
}
}
const paddle::platform::complex128* ddx_;
const paddle::platform::complex128* x_;
paddle::platform::complex128* output_;
int64_t numel_;
};
template <>
struct AbsGradGradFunctor<paddle::platform::complex64> {
AbsGradGradFunctor(const paddle::platform::complex64* ddx,
const paddle::platform::complex64* x,
paddle::platform::complex64* output, int64_t numel)
: ddx_(ddx), x_(x), output_(output), numel_(numel) {}
HOSTDEVICE void operator()(int64_t idx) const {
if (x_[idx] == paddle::platform::complex64(0)) {
output_[idx] = paddle::platform::complex64(0);
} else {
output_[idx] = paddle::platform::complex64(ddx_[idx]) * x_[idx] /
paddle::platform::complex64(abs(x_[idx]));
}
}
const paddle::platform::complex64* ddx_;
const paddle::platform::complex64* x_;
paddle::platform::complex64* output_;
int64_t numel_;
};
template <typename T, typename Enable = void>
struct RealToComplexFunctor;
......
......@@ -1013,6 +1013,11 @@ struct numeric_limits<paddle::platform::float16> {
}
};
HOSTDEVICE inline paddle::platform::float16 abs(
const paddle::platform::float16& a) {
return paddle::platform::abs(a);
}
} // namespace std
namespace Eigen {
......
......@@ -18,6 +18,7 @@ import unittest
import numpy as np
import paddle
import paddle.fluid.dygraph as dg
from op_test import OpTest
......@@ -85,5 +86,52 @@ class TestComplexAbsOpZeroValues(OpTest):
user_defined_grad_outputs=[self.grad_out])
class TestAbs(unittest.TestCase):
def setUp(self):
self._dtypes = ["float32", "float64"]
self._places = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def test_all_positive(self):
for dtype in self._dtypes:
x = 1 + 10 * np.random.random([13, 3, 3]).astype(dtype)
for place in self._places:
with dg.guard(place):
y = paddle.abs(paddle.to_tensor(x))
self.assertTrue(np.allclose(np.abs(x), y.numpy()))
class TestRealAbsOp(OpTest):
def setUp(self):
paddle.enable_static()
self.op_type = "abs"
self.dtype = np.float64
self.shape = (2, 3, 4, 5)
self.init_input_output()
self.init_grad_input_output()
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x)}
self.outputs = {'Out': self.out}
def init_input_output(self):
self.x = 1 + np.random.random(self.shape).astype(self.dtype)
self.out = np.abs(self.x)
def init_grad_input_output(self):
self.grad_out = np.ones(self.shape, self.dtype)
self.grad_x = self.grad_out * (self.x / np.abs(self.x))
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'],
'Out',
user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out])
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册