From 268156f84cafbe1720da3427827548c66c213bfc Mon Sep 17 00:00:00 2001 From: Zhang Zheng <32410583+ZzSean@users.noreply.github.com> Date: Mon, 15 May 2023 17:27:49 +0800 Subject: [PATCH] [Cherry-Pick] Fix the calculation of y_grad in divide_backward (#53672) --- paddle/phi/kernels/funcs/elementwise_functor.h | 8 ++++---- paddle/phi/kernels/gpu/elementwise_divide_grad_kernel.cu | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/paddle/phi/kernels/funcs/elementwise_functor.h b/paddle/phi/kernels/funcs/elementwise_functor.h index 3543c0c6aa0..da9abb0113d 100644 --- a/paddle/phi/kernels/funcs/elementwise_functor.h +++ b/paddle/phi/kernels/funcs/elementwise_functor.h @@ -116,7 +116,7 @@ struct DivGradXYFunctor { // dy = - dout * out / y phi::Array outs; outs[0] = a / c; - outs[1] = -a * b / c; + outs[1] = -a * ((b / c) / c); return outs; } }; @@ -129,7 +129,7 @@ struct DivGradXYFunctor, ComplexType> { const ComplexType c) { phi::Array, 2> outs; ComplexType c_conj(c.real, -c.imag); - ComplexType out_div_c_conj((b / c).real, -(b / c).imag); + ComplexType out_div_c_conj(((b / c) / c).real, -((b / c) / c).imag); outs[0] = a / c_conj; outs[1] = -a * out_div_c_conj; return outs; @@ -156,7 +156,7 @@ struct DivGradXFunctor> { template struct DivGradYFunctor { inline HOSTDEVICE T operator()(const T a, const T b, const T c) const { - return -a * b / c; + return -a * ((b / c) / c); } }; @@ -166,7 +166,7 @@ struct DivGradYFunctor> { inline HOSTDEVICE ComplexType operator()(const ComplexType a, const ComplexType b, const ComplexType c) const { - ComplexType out_div_c_conj((b / c).real, -(b / c).imag); + ComplexType out_div_c_conj(((b / c) / c).real, -((b / c) / c).imag); return -a * out_div_c_conj; } }; diff --git a/paddle/phi/kernels/gpu/elementwise_divide_grad_kernel.cu b/paddle/phi/kernels/gpu/elementwise_divide_grad_kernel.cu index 57bf6da4060..d63841f17e6 100644 --- a/paddle/phi/kernels/gpu/elementwise_divide_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/elementwise_divide_grad_kernel.cu @@ -36,7 +36,7 @@ void DivideGradKernel(const Context& dev_ctx, DenseTensor* dy) { const auto place = dev_ctx.GetPlace(); if (dx != nullptr && dy != nullptr) { - std::vector ins = {&dout, &out, &y}; + std::vector ins = {&dout, &x, &y}; GetGradXAndYOut( dev_ctx, place, @@ -51,7 +51,7 @@ void DivideGradKernel(const Context& dev_ctx, GetGradXOrYOut( dev_ctx, place, axis, ins, dout, dx, funcs::DivGradXFunctor()); } else if (dy != nullptr && dx == nullptr) { - std::vector ins = {&dout, &out, &y}; + std::vector ins = {&dout, &x, &y}; GetGradXOrYOut( dev_ctx, place, axis, ins, dout, dy, funcs::DivGradYFunctor()); } -- GitLab