diff --git a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h index b53816613a73c911bcf63f9782270208bf047a1f..129f70428a003a53e4516ddab392a656d3a542b4 100644 --- a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h +++ b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h @@ -284,6 +284,15 @@ void sqrt_grad(const Tensor& out, const Tensor& out_grad, Tensor* x_grad) { } } +template +void floor_grad(const Tensor& out_grad, Tensor* x_grad) { + if (x_grad) { + auto zero_tensor = + full(phi::vectorize(out_grad.dims()), 0.0, out_grad.dtype()); + set_output(zero_tensor, x_grad); + } +} + template void concat_grad(const std::vector& x, const Tensor& out_grad, diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index 52fa467f3d1e931d197c7af706876d4a84789063..fde5deeafe9fb40d4ff26da79e777416d4d4449f 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -533,6 +533,7 @@ param: [out_grad] kernel : func : floor_grad + composite : floor_grad(out_grad, x_grad) inplace : (out_grad -> x_grad) - backward_op : fold_grad diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index a11d8fae5b4cde9a863e84fbc6876eba223e0437..d1b821315f39905ea592c97eeb96b9e37d4bb5b7 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -1434,6 +1434,49 @@ class TestFloor_ZeroDim(TestFloor): self.shape = [] +class TestFloorPrim(TestActivation): + def setUp(self): + self.op_type = "floor" + self.prim_op_type = "prim" + + # the gradient on floor, ceil, round is undefined. + # we return zero as gradient, but the numpy return nan. + # for prim, we compare result with eager python api, + # so, we use only_prim flag to express we only test prim. + self.only_prim = True + self.check_eager = True + self.python_api = paddle.floor + self.init_dtype() + self.init_shape() + + if len(self.shape) == 0: + # for 0-D tensor, skip cinn testing + self.enable_cinn = False + + np.random.seed(1024) + x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) + out = np.floor(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def init_shape(self): + self.shape = [10, 12] + + def test_check_grad(self): + self.check_grad(['X'], 'Out', check_prim=True) + + +class TestFloorPrim_ZeroDim(TestFloorPrim): + def init_shape(self): + self.shape = [] + + +class TestFloorPrimFp16(TestFloorPrim): + def init_dtype(self): + self.dtype = np.float16 + + class TestCos(TestActivation): def setUp(self): self.op_type = "cos"