diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index faf2d7660ea3704c853fe3cc072a4ea154b4a90a..35cf2c66fd2f3116342290e18c2fe4ac0a9a7dac 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -478,6 +478,17 @@ func : silu_grad inplace : (out_grad -> x_grad) +- backward_op : sin_double_grad + forward : sin_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x) + args : (Tensor x, Tensor grad_out, Tensor grad_x_grad) + output : Tensor(x_grad), Tensor(grad_out_grad) + infer_meta : + func : GeneralBinaryGradInferMeta + param : [x, x] + kernel : + func : sin_double_grad + inplace : (grad_x_grad -> grad_out_grad) + - backward_op : sin_grad forward : sin (Tensor x) -> Tensor(out) args : (Tensor x, Tensor out_grad) @@ -487,6 +498,7 @@ param : [x] kernel : func : sin_grad + backward : sin_double_grad inplace : (out_grad -> x_grad) - backward_op : sinh_grad diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index e8a587a5da1a266ae78471627b334c0f8e6e399f..4dad61d58e3467792407024a068be04264d0e7bf 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -798,7 +798,7 @@ attrs : [bool use_mkldnn = false, bool use_cudnn = false] - op : sin - backward : sin_grad + backward : sin_grad, sin_double_grad inputs : x : X outputs : diff --git a/paddle/phi/kernels/activation_grad_kernel.h b/paddle/phi/kernels/activation_grad_kernel.h index d1f6c1c4e573e6ff32854bf42cf108213cc66324..0f99236eddeec4f92aa0cc70630aabb39a919a7b 100644 --- a/paddle/phi/kernels/activation_grad_kernel.h +++ b/paddle/phi/kernels/activation_grad_kernel.h @@ -80,6 +80,14 @@ void ReluDoubleGradKernel(const Context& dev_ctx, const DenseTensor& ddx, DenseTensor* ddout); +template +void SinDoubleGradKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& dout, + const DenseTensor& ddx, + DenseTensor* dx, + DenseTensor* ddout); + template void TanhDoubleGradKernel(const Context& dev_ctx, const DenseTensor& out, diff --git a/paddle/phi/kernels/cpu/activation_grad_kernel.cc b/paddle/phi/kernels/cpu/activation_grad_kernel.cc index c3df87de24349b41cb976560a4a58043a0456c82..f8602fff21f67e8b074854ef7bf38332a13ab378 100644 --- a/paddle/phi/kernels/cpu/activation_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/activation_grad_kernel.cc @@ -336,6 +336,15 @@ PD_REGISTER_KERNEL(square_double_grad, phi::dtype::float16, int, int64_t) {} +PD_REGISTER_KERNEL(sin_double_grad, + CPU, + ALL_LAYOUT, + phi::SinDoubleGradKernel, + float, + double, + phi::dtype::float16, + int, + int64_t) {} PD_REGISTER_ACTIVATION_GRAD_KERNEL(softsign_grad, SoftsignGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(sigmoid_grad, SigmoidGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(sigmoid_double_grad, SigmoidDoubleGradKernel) diff --git a/paddle/phi/kernels/funcs/activation_functor.h b/paddle/phi/kernels/funcs/activation_functor.h index d0ab1e7f2b3723b5eada8e8f1274addf790fb3ae..3e5a1e3bf6428fb0fb23556cc312a53f1b4c0351 100644 --- a/paddle/phi/kernels/funcs/activation_functor.h +++ b/paddle/phi/kernels/funcs/activation_functor.h @@ -106,6 +106,38 @@ struct SinFunctor : public BaseActivationFunctor { } }; +// sine''(x) = -sin(x) +template +struct SinDoubleGradFunctor : public BaseActivationFunctor { + template + void operator()(const Device& dev, + const DenseTensor* X, + const DenseTensor* dOut, + const DenseTensor* ddX, + DenseTensor* dX, + DenseTensor* ddOut) const { + auto* d = dev.eigen_device(); + auto ddx = EigenVector::Flatten( + GET_DATA_SAFELY(ddX, "Input", "DDX", "SinDoubleGrad")); + auto x = EigenVector::Flatten( + GET_DATA_SAFELY(X, "Input", "X", "SinDoubleGrad")); + // sin DoubleGrad: ddy=cos(x)*ddx, dx=-sin(x)*dy*ddx + + // calculate dx first, so ddy can inplace ddx + auto dx = EigenVector::Flatten( + GET_DATA_SAFELY(dX, "Output", "DX", "SinDoubleGrad")); + auto dout = EigenVector::Flatten( + GET_DATA_SAFELY(dOut, "Output", "DOut", "SinDoubleGrad")); + dx.device(*d) = -ddx * x.unaryExpr(Sine()) * dout; + + // calculate ddout + auto ddout = EigenVector::Flatten( + GET_DATA_SAFELY(ddOut, "Output", "DDOut", "SinDoubleGrad")); + ddout.device(*d) = ddx * x.unaryExpr(Cosine()); + } + static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; } +}; + // reciprocal(x) = 1 / x template struct ReciprocalFunctor : public BaseActivationFunctor { diff --git a/paddle/phi/kernels/gpu/activation_grad_kernel.cu b/paddle/phi/kernels/gpu/activation_grad_kernel.cu index f5cf46bc10361b3a41cd448505b15a1baf9540c8..f6764465bfa6aa86e0f1612617c01026c7f7a29f 100644 --- a/paddle/phi/kernels/gpu/activation_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/activation_grad_kernel.cu @@ -417,6 +417,16 @@ PD_REGISTER_KERNEL(square_double_grad, phi::dtype::float16, phi::dtype::bfloat16) {} +PD_REGISTER_KERNEL(sin_double_grad, + GPU, + ALL_LAYOUT, + phi::SinDoubleGradKernel, + float, + double, + int, + int64_t, + phi::dtype::float16) {} + PD_REGISTER_ACTIVATION_GRAD_KERNEL(softsign_grad, SoftsignGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(sigmoid_grad, SigmoidGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(sigmoid_double_grad, SigmoidDoubleGradKernel) diff --git a/paddle/phi/kernels/impl/activation_grad_impl.h b/paddle/phi/kernels/impl/activation_grad_impl.h index 58471eb3c8fc646b1f148d710e22e7f4d662a796..14d29c51286b9ce08e5e151766cd96145fb746b7 100644 --- a/paddle/phi/kernels/impl/activation_grad_impl.h +++ b/paddle/phi/kernels/impl/activation_grad_impl.h @@ -417,4 +417,22 @@ void SquareDoubleGradKernel(const Context& dev_ctx, functor(dev_ctx, &x, &dout, &ddx, dx, ddout); } +template +void SinDoubleGradKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& dout, + const DenseTensor& ddx, + DenseTensor* dx, + DenseTensor* ddout) { + if (dx) { + dx->Resize(x.dims()); + dev_ctx.template Alloc(dx); + } + if (ddout) { + dev_ctx.template Alloc(ddout); + } + phi::funcs::SinDoubleGradFunctor functor; + functor(dev_ctx, &x, &dout, &ddx, dx, ddout); +} + } // namespace phi diff --git a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py index bc5a14070e5cef130b1bb5d7cb492406fb5bb9b3..46549be7f649a0998515b27132953ce139743b57 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py @@ -471,5 +471,37 @@ class TestLogDoubleGradCheck(unittest.TestCase): self.func(p) +class TestSinDoubleGradCheck(unittest.TestCase): + def sin_wrapper(self, x): + return paddle.sin(x[0]) + + @prog_scope() + def func(self, place): + shape = [2, 3, 7, 9] + eps = 0.0005 + dtype = np.float64 + x = layers.data('x', shape, False, dtype=dtype) + x.persistable = True + y = paddle.sin(x) + x_arr = np.random.uniform(-1, 1, shape).astype(dtype) + x_arr[np.abs(x_arr) < 0.005] = 0.002 + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.double_grad_check_for_dygraph( + self.sin_wrapper, [x], y, x_init=x_arr, place=place + ) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + if __name__ == "__main__": unittest.main()