From ef6e8d09300b24bce7812f846640e8bcfda58b9c Mon Sep 17 00:00:00 2001 From: GGBond8488 <33050871+GGBond8488@users.noreply.github.com> Date: Fri, 28 Apr 2023 16:17:30 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=900D=20output=E3=80=91add=5F0D=5Foutput?= =?UTF-8?q?=5Fsupport=20(#52857)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add 0d support for dist, trace, paddle.linalg.cond test=allcase * add_0d_output_support_for_det * test=allcase * support_0d_output_for_linalg.norm * support linalg.norm 0d output, test=allcase * fix 0D test * fix zero dim test, test=allcase * fix 0D test * fix tets,test=allcase * fix error,test=allcase * fix errors ,test=allcase * add static backward , test=allcase * add static backwward test, test=allcase * fix pr-ci-build error;test=document_fix (#53060) * [Cherry-Pick] Unique support float16&bfloat16 (#53023) unique支持float16和bfloat16数据类型,并完善相关单测。 * slogdet_support_0D_output * add new case * fix tests, test=allcase * fix p_norm related test, test=allcase * fix some err, test=allcase * test=allcase * move out trace * open some case, test=allcase * fix norm all case, test=allcase * fix some test error, test=allcase * fix typro,test=allcase * fix test err, test=allcase * test=allcase * test * fix test error, test=allcase * fix test error, test=allcase * fallback norm, test=allcase --------- Co-authored-by: tianshuo78520a <707759223@qq.com> Co-authored-by: Zhang Zheng <32410583+ZzSean@users.noreply.github.com> --- paddle/phi/infermeta/binary.cc | 2 +- paddle/phi/infermeta/unary.cc | 1 - .../impl/determinant_grad_kernel_impl.h | 4 +- .../kernels/impl/determinant_kernel_impl.h | 2 +- .../tests/unittests/test_zero_dim_tensor.py | 243 ++++++++++++++++++ test/autograd/test_orig2prim.py | 7 +- 6 files changed, 253 insertions(+), 6 deletions(-) diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index 228d50f3c4a..efb6458601b 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -990,7 +990,7 @@ void DistInferMeta(const MetaTensor& x, "The Input(Y) has not been initialized properly. The " "shape of Input(Y) = [%s].", y_dims)); - out->set_dims({1}); + out->set_dims(phi::make_ddim({})); out->set_dtype(x.dtype()); } diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 78830c68226..845fe955639 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -2767,7 +2767,6 @@ void PNormInferMeta(const MetaTensor& x, if (reduce_dims.size() == 0) { reduce_dims.emplace_back(1); } - x_dim[axis] = 1; } diff --git a/paddle/phi/kernels/impl/determinant_grad_kernel_impl.h b/paddle/phi/kernels/impl/determinant_grad_kernel_impl.h index 4d58698c64d..b17512ad1da 100644 --- a/paddle/phi/kernels/impl/determinant_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/determinant_grad_kernel_impl.h @@ -91,10 +91,10 @@ void DeterminantGradKernel(const Context& dev_ctx, " input tensor's, but here differ %d", input_dims_size - out_grad.dims().size())); } else if (input_dims_size == 2) { - // input dims size 2 and grad dims size 1 is possible + // input dims size 2 and grad dims size 0 is possible PADDLE_ENFORCE_EQ( out_grad.dims().size(), - 1, + 0, phi::errors::InvalidArgument( "The grad tensor of det dims size should be 2 less than" " input tensor's, but here differ %d", diff --git a/paddle/phi/kernels/impl/determinant_kernel_impl.h b/paddle/phi/kernels/impl/determinant_kernel_impl.h index 3c437ad659c..01c54d780b4 100644 --- a/paddle/phi/kernels/impl/determinant_kernel_impl.h +++ b/paddle/phi/kernels/impl/determinant_kernel_impl.h @@ -126,7 +126,7 @@ void DeterminantKernel(const Context& dev_ctx, out->Resize(output_dims); } else { // when input is a two-dimension matrix, The det value is a number. - out->Resize({1}); + out->Resize(phi::make_ddim({})); } VLOG(10) << "output dim:" << out->dims(); } diff --git a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py index b08fe5c9b37..bc8a7b82c5b 100644 --- a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py @@ -2616,6 +2616,107 @@ class TestSundryAPI(unittest.TestCase): self.assertEqual(b.grad.shape, [4, 5]) self.assertEqual(c.grad.shape, [5]) + def test_cov(self): + xt = paddle.randn((3, 4)) + xt.stop_gradient = False + xt_1 = paddle.randn((12,)) + xt_1.stop_gradient = False + + xt_out = paddle.linalg.cov(xt) + xt_out.retain_grads() + xt_out.backward() + self.assertEqual(xt_out.shape, [3, 3]) + self.assertEqual(xt.grad.shape, [3, 4]) + + xt_1_out = paddle.linalg.cov(xt_1) + xt_1.retain_grads() + xt_1_out.backward() + self.assertEqual(xt_1_out.shape, []) + self.assertEqual(xt_1.grad.shape, [12]) + + def test_det(self): + xt = paddle.randn([3, 3, 3]) + xt.stop_gradient = False + xt_1 = paddle.randn([3, 3]) + xt_1.stop_gradient = False + + xt_out = paddle.linalg.det(xt) + xt.retain_grads() + xt_out.backward() + self.assertEqual(xt_out.shape, [3]) + self.assertEqual(xt.grad.shape, [3, 3, 3]) + + xt_1_out = paddle.linalg.det(xt_1) + xt_1.retain_grads() + xt_1_out.backward() + self.assertEqual(xt_1_out.shape, []) + self.assertEqual(xt_1.grad.shape, [3, 3]) + + def test_dist(self): + x = paddle.to_tensor([[3, 3], [3, 3]], dtype="float32") + y = paddle.to_tensor([[3, 3], [3, 1]], dtype="float32") + x.stop_gradient = False + y.stop_gradient = False + out = paddle.dist(x, y, 0) + out.backward() + + self.assertEqual(out.shape, []) + np.testing.assert_allclose(out, np.array(1)) + self.assertEqual(x.grad.shape, [2, 2]) + self.assertEqual(y.grad.shape, [2, 2]) + + def test_linalg_cond(self): + def assert_shape(out): + self.assertEqual(out.shape, []) + + # x1 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x1.stop_gradient = False + # p = 2 : use paddle.sum + # out = paddle.linalg.cond(x1) + # assert_shape(out) + + # p = fro : use paddle.sum + # x2 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x2.stop_gradient = False + # out_fro = paddle.linalg.cond(x2, p='fro') + # assert_shape(out_fro) + + # p = nuc : use paddle.sum + # x3 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x3.stop_gradient = False + # out_nuc = paddle.linalg.cond(x3, p='nuc') + # assert_shape(out_nuc) + + # p in (-1, 1) : use paddle.sum + # x4 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x4.stop_gradient = False + # out_1 = paddle.linalg.cond(x4, p=1) + # assert_shape(out_1) + # x5 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x5.stop_gradient = False + # out_minus_1 = paddle.linalg.cond(x5, p=-1) + # assert_shape(out_minus_1) + + # p in (-2, 2) depends on paddle.sum + # x6 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x6.stop_gradient = False + # out_2 = paddle.linalg.cond(x6, p=2) + # assert_shape(out_2) + + # p in (-inf, inf):use paddle.sum + # x8 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x8.stop_gradient = False + # out_inf = paddle.linalg.cond(x8, p=float("inf")) + # assert_shape(out_inf) + + # depends on paddle.sum + # a = paddle.randn([2, 4, 4]) + # a.stop_gradient = False + # a_cond_fro = paddle.linalg.cond(a, p='fro') + # a_cond_fro.backward() + # self.assertEqual(len(a_cond_fro.shape), 1) + # self.assertEqual(a.grad.shape, [2, 4, 4]) + def test_trace(self): x = paddle.to_tensor([[3, 2], [1, 9]], dtype="float32") x.stop_gradient = False @@ -4723,6 +4824,148 @@ class TestSundryAPIStatic(unittest.TestCase): self.assertEqual(res[2].shape, (4, 5)) self.assertEqual(res[3].shape, (5,)) + @prog_scope() + def test_cov(self): + xt_1 = paddle.randn((12,)) + xt_1.stop_gradient = False + + out = paddle.linalg.cov(xt_1) + paddle.static.append_backward(out) + + prog = paddle.static.default_main_program() + + res = self.exe.run(prog, fetch_list=[out, xt_1.grad_name]) + self.assertEqual(res[0].shape, ()) + self.assertEqual(res[1].shape, (12,)) + + @prog_scope() + def test_det(self): + xt_1 = paddle.randn((3, 3)) + xt_1.stop_gradient = False + + out = paddle.linalg.det(xt_1) + paddle.static.append_backward(out.sum()) + + prog = paddle.static.default_main_program() + res = self.exe.run(prog, fetch_list=[out, xt_1.grad_name]) + self.assertEqual(res[0].shape, ()) + self.assertEqual(res[1].shape, (3, 3)) + + @prog_scope() + def test_dist(self): + x = paddle.to_tensor([[3, 3], [3, 3]], dtype="float32") + y = paddle.to_tensor([[3, 3], [3, 1]], dtype="float32") + x.stop_gradient = False + y.stop_gradient = False + out = paddle.dist(x, y) + paddle.static.append_backward(out) + + prog = paddle.static.default_main_program() + res = self.exe.run(prog, fetch_list=[out, x.grad_name, y.grad_name]) + + self.assertEqual(res[0].shape, ()) + self.assertEqual(res[1].shape, (2, 2)) + self.assertEqual(res[1].shape, (2, 2)) + np.testing.assert_array_equal(res[0], np.array(2).astype(np.float32)) + + @prog_scope() + def test_linalg_cond(self): + pass + # use paddle.sum + # x = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x.stop_gradient = False + # out = paddle.linalg.cond(x) + # paddle.static.append_backward(out) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # p = fro : use paddle.sum + # x2 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x2.stop_gradient = False + # out_fro = paddle.linalg.cond(x2, p='fro') + # paddle.static.append_backward(out_fro) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out_fro, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # p = nuc : use paddle.sum + # x3 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x3.stop_gradient = False + # out_nuc = paddle.linalg.cond(x3, p='nuc') + # paddle.static.append_backward(out_nuc) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out_nuc, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # p in (-1, 1) : use paddle.sum + # x4 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x4.stop_gradient = False + # out_1 = paddle.linalg.cond(x4, p=1) + # paddle.static.append_backward(out_1) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out_1, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # x5 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x5.stop_gradient = False + # out_minus_1 = paddle.linalg.cond(x5, p=-1) + # paddle.static.append_backward(out_minus_1) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out_minus_1, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # p in (-2, 2) depends on paddle.sum + # x6 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x6.stop_gradient = False + # out_2 = paddle.linalg.cond(x6, p=2) + # paddle.static.append_backward(out_2) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out_2, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # p in (-inf, inf):use paddle.sum + # x8 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x8.stop_gradient = False + # out_inf = paddle.linalg.cond(x8, p=float("inf")) + # paddle.static.append_backward(out_inf) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out_inf, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # depends on paddle.sum + # a = paddle.randn([2, 4, 4]) + # a.stop_gradient = False + # a_cond_fro = paddle.linalg.cond(a, p='fro') + # paddle.static.append_backward(a_cond_fro) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[a_cond_fro, a.grad_name]) + + # self.assertEqual(res[0].shape, (2,)) + # self.assertEqual(res[1].shape, (2, 4, 4)) + @prog_scope() def test_trace(self): x = paddle.to_tensor([[3, 2], [1, 9]], dtype="float32") diff --git a/test/autograd/test_orig2prim.py b/test/autograd/test_orig2prim.py index 2f8f2a9648b..8a42255d711 100644 --- a/test/autograd/test_orig2prim.py +++ b/test/autograd/test_orig2prim.py @@ -463,7 +463,12 @@ class TestPNormOrig2Prim1(TestElementWiseAddOrig2Prim): } self.orig2prim_args = (X,) - self.all_ops = ['p_norm', 'reshape_p', 'abs_p', 'reduce_sum_p'] + self.all_ops = [ + 'p_norm', + 'reshape_p', + 'abs_p', + 'reduce_sum_p', + ] self.out_map = {0: self.output['Out']} -- GitLab