未验证 提交 ddd72039 编写于 作者: L Leo Chen 提交者: GitHub

fix dist_grad kernel (#53239)

上级 562d2daf
...@@ -52,6 +52,10 @@ void DistGradKernel(const Context& dev_ctx, ...@@ -52,6 +52,10 @@ void DistGradKernel(const Context& dev_ctx,
float p, float p,
DenseTensor* x_grad, DenseTensor* x_grad,
DenseTensor* y_grad) { DenseTensor* y_grad) {
if ((!x_grad) && (!y_grad)) {
return;
}
auto t = Subtract<T, Context>(dev_ctx, x, y); auto t = Subtract<T, Context>(dev_ctx, x, y);
DenseTensor x_grad_tmp; DenseTensor x_grad_tmp;
x_grad_tmp.Resize(t.dims()); x_grad_tmp.Resize(t.dims());
...@@ -59,26 +63,32 @@ void DistGradKernel(const Context& dev_ctx, ...@@ -59,26 +63,32 @@ void DistGradKernel(const Context& dev_ctx,
y_grad_tmp.Resize(t.dims()); y_grad_tmp.Resize(t.dims());
PNormGradKernel<T, Context>( PNormGradKernel<T, Context>(
dev_ctx, t, out, out_grad, p, -1, 1e-12, false, true, &x_grad_tmp); dev_ctx, t, out, out_grad, p, -1, 1e-12, false, true, &x_grad_tmp);
ScaleKernel<T, Context>(dev_ctx, x_grad_tmp, -1.0, 0.0, false, &y_grad_tmp);
// do reduce, the implemetation of cpu SumKernel has bug, it changes if (x_grad) {
// the dims of output iternally, so we Resize x/y_grad twice. // do reduce, the implemetation of cpu SumKernel has bug, it changes
auto res_x = GetReduceDims(x_grad_tmp.dims(), x.dims()); // the dims of output iternally, so we Resize x/y_grad twice.
if (!std::get<0>(res_x).empty()) { auto res_x = GetReduceDims(x_grad_tmp.dims(), x.dims());
x_grad->Resize(phi::make_ddim(std::get<1>(res_x))); if (!std::get<0>(res_x).empty()) {
SumKernel<T, Context>( x_grad->Resize(phi::make_ddim(std::get<1>(res_x)));
dev_ctx, x_grad_tmp, std::get<0>(res_x), x.dtype(), false, x_grad); SumKernel<T, Context>(
x_grad->Resize(x.dims()); dev_ctx, x_grad_tmp, std::get<0>(res_x), x.dtype(), false, x_grad);
} else { x_grad->Resize(x.dims());
x_grad->ShareBufferWith(x_grad_tmp); } else {
x_grad->ShareBufferWith(x_grad_tmp);
}
} }
auto res_y = GetReduceDims(y_grad_tmp.dims(), y.dims());
if (!std::get<0>(res_y).empty()) { if (y_grad) {
y_grad->Resize(phi::make_ddim(std::get<1>(res_y))); ScaleKernel<T, Context>(dev_ctx, x_grad_tmp, -1.0, 0.0, false, &y_grad_tmp);
SumKernel<T, Context>( auto res_y = GetReduceDims(y_grad_tmp.dims(), y.dims());
dev_ctx, y_grad_tmp, std::get<0>(res_y), y.dtype(), false, y_grad); if (!std::get<0>(res_y).empty()) {
y_grad->Resize(y.dims()); y_grad->Resize(phi::make_ddim(std::get<1>(res_y)));
} else { SumKernel<T, Context>(
y_grad->ShareBufferWith(y_grad_tmp); dev_ctx, y_grad_tmp, std::get<0>(res_y), y.dtype(), false, y_grad);
y_grad->Resize(y.dims());
} else {
y_grad->ShareBufferWith(y_grad_tmp);
}
} }
} }
......
...@@ -192,6 +192,15 @@ class TestDistAPI(unittest.TestCase): ...@@ -192,6 +192,15 @@ class TestDistAPI(unittest.TestCase):
) )
np.testing.assert_allclose(dist(x_i, y_i, p), out[0], rtol=1e-05) np.testing.assert_allclose(dist(x_i, y_i, p), out[0], rtol=1e-05)
def test_grad_x(self):
paddle.disable_static()
a = paddle.rand([2, 2, 3, 2])
b = paddle.rand([1, 1, 3, 1])
a.stop_gradient = False
c = paddle.dist(a, b, 2)
c.backward()
paddle.enable_static()
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册