diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index 228d50f3c4a9e627f39329657857e3e259da213d..efb6458601ba93051a7b9cf089a0ce112aba3581 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -990,7 +990,7 @@ void DistInferMeta(const MetaTensor& x, "The Input(Y) has not been initialized properly. The " "shape of Input(Y) = [%s].", y_dims)); - out->set_dims({1}); + out->set_dims(phi::make_ddim({})); out->set_dtype(x.dtype()); } diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 78830c682268c8d645f1e060476b1bd5773b872d..845fe95563900f438ce6182b0798ccc92dc01bbd 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -2767,7 +2767,6 @@ void PNormInferMeta(const MetaTensor& x, if (reduce_dims.size() == 0) { reduce_dims.emplace_back(1); } - x_dim[axis] = 1; } diff --git a/paddle/phi/kernels/impl/determinant_grad_kernel_impl.h b/paddle/phi/kernels/impl/determinant_grad_kernel_impl.h index 4d58698c64d228d34d35b867f58134cee4ea7db9..b17512ad1da87918680d8dccb9f1f000bcf55762 100644 --- a/paddle/phi/kernels/impl/determinant_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/determinant_grad_kernel_impl.h @@ -91,10 +91,10 @@ void DeterminantGradKernel(const Context& dev_ctx, " input tensor's, but here differ %d", input_dims_size - out_grad.dims().size())); } else if (input_dims_size == 2) { - // input dims size 2 and grad dims size 1 is possible + // input dims size 2 and grad dims size 0 is possible PADDLE_ENFORCE_EQ( out_grad.dims().size(), - 1, + 0, phi::errors::InvalidArgument( "The grad tensor of det dims size should be 2 less than" " input tensor's, but here differ %d", diff --git a/paddle/phi/kernels/impl/determinant_kernel_impl.h b/paddle/phi/kernels/impl/determinant_kernel_impl.h index 3c437ad659c43ac3c7556b149e0f13cbcffe65d5..01c54d780b4b0ea6bafa91fdd6a739e3a1192041 100644 --- a/paddle/phi/kernels/impl/determinant_kernel_impl.h +++ b/paddle/phi/kernels/impl/determinant_kernel_impl.h @@ -126,7 +126,7 @@ void DeterminantKernel(const Context& dev_ctx, out->Resize(output_dims); } else { // when input is a two-dimension matrix, The det value is a number. - out->Resize({1}); + out->Resize(phi::make_ddim({})); } VLOG(10) << "output dim:" << out->dims(); } diff --git a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py index b08fe5c9b37d4d1c8dc3c3fe7917c3a4bffedecb..bc8a7b82c5bc8f996f09980a5259c7356fa77844 100644 --- a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py @@ -2616,6 +2616,107 @@ class TestSundryAPI(unittest.TestCase): self.assertEqual(b.grad.shape, [4, 5]) self.assertEqual(c.grad.shape, [5]) + def test_cov(self): + xt = paddle.randn((3, 4)) + xt.stop_gradient = False + xt_1 = paddle.randn((12,)) + xt_1.stop_gradient = False + + xt_out = paddle.linalg.cov(xt) + xt_out.retain_grads() + xt_out.backward() + self.assertEqual(xt_out.shape, [3, 3]) + self.assertEqual(xt.grad.shape, [3, 4]) + + xt_1_out = paddle.linalg.cov(xt_1) + xt_1.retain_grads() + xt_1_out.backward() + self.assertEqual(xt_1_out.shape, []) + self.assertEqual(xt_1.grad.shape, [12]) + + def test_det(self): + xt = paddle.randn([3, 3, 3]) + xt.stop_gradient = False + xt_1 = paddle.randn([3, 3]) + xt_1.stop_gradient = False + + xt_out = paddle.linalg.det(xt) + xt.retain_grads() + xt_out.backward() + self.assertEqual(xt_out.shape, [3]) + self.assertEqual(xt.grad.shape, [3, 3, 3]) + + xt_1_out = paddle.linalg.det(xt_1) + xt_1.retain_grads() + xt_1_out.backward() + self.assertEqual(xt_1_out.shape, []) + self.assertEqual(xt_1.grad.shape, [3, 3]) + + def test_dist(self): + x = paddle.to_tensor([[3, 3], [3, 3]], dtype="float32") + y = paddle.to_tensor([[3, 3], [3, 1]], dtype="float32") + x.stop_gradient = False + y.stop_gradient = False + out = paddle.dist(x, y, 0) + out.backward() + + self.assertEqual(out.shape, []) + np.testing.assert_allclose(out, np.array(1)) + self.assertEqual(x.grad.shape, [2, 2]) + self.assertEqual(y.grad.shape, [2, 2]) + + def test_linalg_cond(self): + def assert_shape(out): + self.assertEqual(out.shape, []) + + # x1 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x1.stop_gradient = False + # p = 2 : use paddle.sum + # out = paddle.linalg.cond(x1) + # assert_shape(out) + + # p = fro : use paddle.sum + # x2 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x2.stop_gradient = False + # out_fro = paddle.linalg.cond(x2, p='fro') + # assert_shape(out_fro) + + # p = nuc : use paddle.sum + # x3 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x3.stop_gradient = False + # out_nuc = paddle.linalg.cond(x3, p='nuc') + # assert_shape(out_nuc) + + # p in (-1, 1) : use paddle.sum + # x4 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x4.stop_gradient = False + # out_1 = paddle.linalg.cond(x4, p=1) + # assert_shape(out_1) + # x5 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x5.stop_gradient = False + # out_minus_1 = paddle.linalg.cond(x5, p=-1) + # assert_shape(out_minus_1) + + # p in (-2, 2) depends on paddle.sum + # x6 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x6.stop_gradient = False + # out_2 = paddle.linalg.cond(x6, p=2) + # assert_shape(out_2) + + # p in (-inf, inf):use paddle.sum + # x8 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x8.stop_gradient = False + # out_inf = paddle.linalg.cond(x8, p=float("inf")) + # assert_shape(out_inf) + + # depends on paddle.sum + # a = paddle.randn([2, 4, 4]) + # a.stop_gradient = False + # a_cond_fro = paddle.linalg.cond(a, p='fro') + # a_cond_fro.backward() + # self.assertEqual(len(a_cond_fro.shape), 1) + # self.assertEqual(a.grad.shape, [2, 4, 4]) + def test_trace(self): x = paddle.to_tensor([[3, 2], [1, 9]], dtype="float32") x.stop_gradient = False @@ -4723,6 +4824,148 @@ class TestSundryAPIStatic(unittest.TestCase): self.assertEqual(res[2].shape, (4, 5)) self.assertEqual(res[3].shape, (5,)) + @prog_scope() + def test_cov(self): + xt_1 = paddle.randn((12,)) + xt_1.stop_gradient = False + + out = paddle.linalg.cov(xt_1) + paddle.static.append_backward(out) + + prog = paddle.static.default_main_program() + + res = self.exe.run(prog, fetch_list=[out, xt_1.grad_name]) + self.assertEqual(res[0].shape, ()) + self.assertEqual(res[1].shape, (12,)) + + @prog_scope() + def test_det(self): + xt_1 = paddle.randn((3, 3)) + xt_1.stop_gradient = False + + out = paddle.linalg.det(xt_1) + paddle.static.append_backward(out.sum()) + + prog = paddle.static.default_main_program() + res = self.exe.run(prog, fetch_list=[out, xt_1.grad_name]) + self.assertEqual(res[0].shape, ()) + self.assertEqual(res[1].shape, (3, 3)) + + @prog_scope() + def test_dist(self): + x = paddle.to_tensor([[3, 3], [3, 3]], dtype="float32") + y = paddle.to_tensor([[3, 3], [3, 1]], dtype="float32") + x.stop_gradient = False + y.stop_gradient = False + out = paddle.dist(x, y) + paddle.static.append_backward(out) + + prog = paddle.static.default_main_program() + res = self.exe.run(prog, fetch_list=[out, x.grad_name, y.grad_name]) + + self.assertEqual(res[0].shape, ()) + self.assertEqual(res[1].shape, (2, 2)) + self.assertEqual(res[1].shape, (2, 2)) + np.testing.assert_array_equal(res[0], np.array(2).astype(np.float32)) + + @prog_scope() + def test_linalg_cond(self): + pass + # use paddle.sum + # x = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x.stop_gradient = False + # out = paddle.linalg.cond(x) + # paddle.static.append_backward(out) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # p = fro : use paddle.sum + # x2 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x2.stop_gradient = False + # out_fro = paddle.linalg.cond(x2, p='fro') + # paddle.static.append_backward(out_fro) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out_fro, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # p = nuc : use paddle.sum + # x3 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x3.stop_gradient = False + # out_nuc = paddle.linalg.cond(x3, p='nuc') + # paddle.static.append_backward(out_nuc) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out_nuc, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # p in (-1, 1) : use paddle.sum + # x4 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x4.stop_gradient = False + # out_1 = paddle.linalg.cond(x4, p=1) + # paddle.static.append_backward(out_1) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out_1, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # x5 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x5.stop_gradient = False + # out_minus_1 = paddle.linalg.cond(x5, p=-1) + # paddle.static.append_backward(out_minus_1) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out_minus_1, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # p in (-2, 2) depends on paddle.sum + # x6 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x6.stop_gradient = False + # out_2 = paddle.linalg.cond(x6, p=2) + # paddle.static.append_backward(out_2) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out_2, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # p in (-inf, inf):use paddle.sum + # x8 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) + # x8.stop_gradient = False + # out_inf = paddle.linalg.cond(x8, p=float("inf")) + # paddle.static.append_backward(out_inf) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[out_inf, x.grad_name]) + + # self.assertTrue(res[0].shape, ()) + # self.assertTrue(res[1].shape, (3, 3)) + + # depends on paddle.sum + # a = paddle.randn([2, 4, 4]) + # a.stop_gradient = False + # a_cond_fro = paddle.linalg.cond(a, p='fro') + # paddle.static.append_backward(a_cond_fro) + + # prog = paddle.static.default_main_program() + # res = self.exe.run(prog, fetch_list=[a_cond_fro, a.grad_name]) + + # self.assertEqual(res[0].shape, (2,)) + # self.assertEqual(res[1].shape, (2, 4, 4)) + @prog_scope() def test_trace(self): x = paddle.to_tensor([[3, 2], [1, 9]], dtype="float32") diff --git a/test/autograd/test_orig2prim.py b/test/autograd/test_orig2prim.py index 2f8f2a9648bfd8f4554391ab0280067c94fce6be..8a42255d711ee7e70e4d439fb41d83995e68eb6e 100644 --- a/test/autograd/test_orig2prim.py +++ b/test/autograd/test_orig2prim.py @@ -463,7 +463,12 @@ class TestPNormOrig2Prim1(TestElementWiseAddOrig2Prim): } self.orig2prim_args = (X,) - self.all_ops = ['p_norm', 'reshape_p', 'abs_p', 'reduce_sum_p'] + self.all_ops = [ + 'p_norm', + 'reshape_p', + 'abs_p', + 'reduce_sum_p', + ] self.out_map = {0: self.output['Out']}