未验证 提交 6029e02a 编写于 作者: G GGBond8488 提交者: GitHub

[Zero-Dim] add 0D test for linalg.norm/linalg.cond (#53592)

* add 0D test for linalg and linalg.cond

* remove p_norm test

* Update test_zero_dim_tensor.py

* Update test_zero_dim_tensor, test=allcase

* add 0D op test for cond and pnorm,test=allcase

* fix conda error
上级 eb12e627
......@@ -2658,57 +2658,156 @@ class TestSundryAPI(unittest.TestCase):
self.assertEqual(x.grad.shape, [2, 2])
self.assertEqual(y.grad.shape, [2, 2])
def test_linalg_norm(self):
# 1D input, p = fro ,axis = None, using reduceInferMeta
x_1 = paddle.arange(24, dtype="float32") - 12
x_1.stop_gradient = False
out_1 = paddle.linalg.norm(x_1)
out_1.retain_grads()
out_1.backward()
self.assertEqual(out_1.shape, [])
self.assertTrue(x_1.grad.shape, [24])
# 1D input, p = 1 ,axis = None,
# using p_nrom, as_vector = True
x_2 = paddle.arange(24, dtype="float32") - 12
x_2.stop_gradient = False
out_2 = paddle.linalg.norm(x_2, p=1)
out_2.retain_grads()
out_2.backward()
self.assertEqual(out_2.shape, [])
self.assertEqual(x_2.grad.shape, [24])
# 1D input, p = 1 ,axis = 0,
# using p_nrom, as_vector = False
x_2_p = paddle.arange(24, dtype="float32") - 12
x_2_p.stop_gradient = False
out_2_p = paddle.linalg.norm(x_2_p, p=1, axis=0)
out_2_p.retain_grads()
out_2_p.backward()
self.assertEqual(out_2_p.shape, [])
self.assertEqual(x_2_p.grad.shape, [24])
# 1D input, p = fro ,axis = 0,
# using p_nrom, as_vector = False
x_2_fro = paddle.arange(24, dtype="float32") - 12
x_2_fro.stop_gradient = False
out_2_fro = paddle.linalg.norm(x_2_fro, p="fro", axis=0)
out_2_fro.retain_grads()
out_2_fro.backward()
self.assertEqual(out_2_fro.shape, [])
self.assertEqual(x_2_fro.grad.shape, [24])
# 2D input, p = 1, axis = [0, 1]
# using p_matrix_norm ,depends on paddle.sum
x_3 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_3.stop_gradient = False
out_3 = paddle.linalg.norm(x_3, p=1, axis=[0, 1])
out_3.retain_grads()
out_3.backward()
self.assertEqual(out_3.shape, [])
self.assertEqual(x_3.grad.shape, [4, 6])
# 2D input, p = 1, axis = None
# using p_matrix_norm, depends on paddle.sum
x_4 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_4.stop_gradient = False
out_4 = paddle.linalg.norm(x_4)
out_4.retain_grads()
out_4.backward()
self.assertEqual(out_4.shape, [])
self.assertEqual(x_4.grad.shape, [4, 6])
# 2D input, p = inf, axis = [0, 1]
# using p_matrix_norm, depends on paddle.sum
x_5 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_5.stop_gradient = False
out_5 = paddle.linalg.norm(x_5, p=2, axis=[0, 1])
out_5.retain_grads()
out_5.backward()
self.assertEqual(out_5.shape, [])
self.assertEqual(x_5.grad.shape, [4, 6])
# 2D input, p = -inf, axis = [0, 1]
x_6 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_6.stop_gradient = False
out_6 = paddle.linalg.norm(x_6, p=-float("inf"), axis=[0, 1])
out_6.retain_grads()
out_6.backward()
self.assertEqual(out_6.shape, [])
self.assertEqual(x_6.grad.shape, [4, 6])
def test_linalg_cond(self):
def assert_shape(out):
self.assertEqual(out.shape, [])
# x1 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x1.stop_gradient = False
x1 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x1.stop_gradient = False
# p = 2 : use paddle.sum
# out = paddle.linalg.cond(x1)
# assert_shape(out)
out = paddle.linalg.cond(x1)
out.backward()
assert_shape(out)
self.assertEqual(x1.grad.shape, [3, 3])
# p = fro : use paddle.sum
# x2 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x2.stop_gradient = False
# out_fro = paddle.linalg.cond(x2, p='fro')
# assert_shape(out_fro)
x2 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x2.stop_gradient = False
out_fro = paddle.linalg.cond(x2, p='fro')
out_fro.backward()
assert_shape(out_fro)
self.assertEqual(x2.grad.shape, [3, 3])
# p = nuc : use paddle.sum
# x3 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x3.stop_gradient = False
# out_nuc = paddle.linalg.cond(x3, p='nuc')
# assert_shape(out_nuc)
x3 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x3.stop_gradient = False
out_nuc = paddle.linalg.cond(x3, p='nuc')
out_nuc.backward()
assert_shape(out_nuc)
self.assertEqual(x3.grad.shape, [3, 3])
# p in (-1, 1) : use paddle.sum
# x4 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x4.stop_gradient = False
# out_1 = paddle.linalg.cond(x4, p=1)
# assert_shape(out_1)
# x5 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x5.stop_gradient = False
# out_minus_1 = paddle.linalg.cond(x5, p=-1)
# assert_shape(out_minus_1)
x4 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x4.stop_gradient = False
out_1 = paddle.linalg.cond(x4, p=1)
out_1.backward()
assert_shape(out_1)
self.assertEqual(x4.grad.shape, [3, 3])
x5 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x5.stop_gradient = False
out_minus_1 = paddle.linalg.cond(x5, p=-1)
out_minus_1.backward()
assert_shape(out_minus_1)
self.assertEqual(x5.grad.shape, [3, 3])
# p in (-2, 2) depends on paddle.sum
# x6 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x6.stop_gradient = False
# out_2 = paddle.linalg.cond(x6, p=2)
# assert_shape(out_2)
x6 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x6.stop_gradient = False
out_2 = paddle.linalg.cond(x6, p=2)
out_2.backward()
assert_shape(out_2)
self.assertEqual(x6.grad.shape, [3, 3])
# p in (-inf, inf):use paddle.sum
# x8 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x8.stop_gradient = False
# out_inf = paddle.linalg.cond(x8, p=float("inf"))
# assert_shape(out_inf)
# depends on paddle.sum
# a = paddle.randn([2, 4, 4])
# a.stop_gradient = False
# a_cond_fro = paddle.linalg.cond(a, p='fro')
# a_cond_fro.backward()
# self.assertEqual(len(a_cond_fro.shape), 1)
# self.assertEqual(a.grad.shape, [2, 4, 4])
x8 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x8.stop_gradient = False
out_inf = paddle.linalg.cond(x8, p=float("inf"))
out_inf.backward()
assert_shape(out_inf)
self.assertEqual(x8.grad.shape, [3, 3])
a = paddle.randn([2, 4, 4])
a.stop_gradient = False
a_cond_fro = paddle.linalg.cond(a, p='fro')
a_cond_fro.backward()
self.assertEqual(len(a_cond_fro.shape), 1)
self.assertEqual(a.grad.shape, [2, 4, 4])
def test_trace(self):
x = paddle.to_tensor([[3, 2], [1, 9]], dtype="float32")
......@@ -4861,103 +4960,198 @@ class TestSundryAPIStatic(unittest.TestCase):
self.assertEqual(res[1].shape, (2, 2))
np.testing.assert_array_equal(res[0], np.array(2).astype(np.float32))
@prog_scope()
def test_linalg_norm(self):
# 1D input, p = fro ,axis = None, using reduceInferMeta
x_1 = paddle.arange(24, dtype="float32") - 12
x_1.stop_gradient = False
out_1 = paddle.linalg.norm(x_1)
paddle.static.append_backward(out_1)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_1, x_1.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (24,))
# 1D input, p = 1 ,axis = None,
# using p_nrom, as_vector = True
x_2 = paddle.arange(24, dtype="float32") - 12
x_2.stop_gradient = False
out_2 = paddle.linalg.norm(x_2, p=1)
paddle.static.append_backward(out_2.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_2, x_2.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (24,))
# 1D input, p = 1 ,axis = 0,
# using p_nrom, as_vector = False
x_2_p = paddle.arange(24, dtype="float32") - 12
x_2_p.stop_gradient = False
out_2_p = paddle.linalg.norm(x_2_p, p=1, axis=0)
paddle.static.append_backward(out_2_p.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_2_p, x_2_p.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (24,))
# 1D input, p = fro ,axis = 0,
# using p_nrom, as_vector = False
x_2_fro = paddle.arange(24, dtype="float32") - 12
x_2_fro.stop_gradient = False
out_2_fro = paddle.linalg.norm(x_2_fro, p="fro", axis=0)
paddle.static.append_backward(out_2_fro.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_2_fro, x_2_fro.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (24,))
# 2D input, p = 1, axis = [0, 1]
# using p_matrix_norm ,depends on paddle.sum
x_3 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_3.stop_gradient = False
out_3 = paddle.linalg.norm(x_3, p=1, axis=[0, 1])
paddle.static.append_backward(out_3.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_3, x_3.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (4, 6))
# 2D input, p = 1, axis = None
# using p_matrix_norm, depends on paddle.sum
x_4 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_4.stop_gradient = False
out_4 = paddle.linalg.norm(x_4)
paddle.static.append_backward(out_4.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_4, x_4.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (4, 6))
# 2D input, p = inf, axis = None
x_5 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_5.stop_gradient = False
out_5 = paddle.linalg.norm(x_5)
paddle.static.append_backward(out_5.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_5, x_5.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (4, 6))
# 2D input, p = -inf, axis = [0, 1]
x_6 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_6.stop_gradient = False
out_6 = paddle.linalg.norm(x_6, p=-float("inf"), axis=[0, 1])
paddle.static.append_backward(out_6.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_6, x_6.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (4, 6))
@prog_scope()
def test_linalg_cond(self):
pass
# use paddle.sum
# x = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x.stop_gradient = False
# out = paddle.linalg.cond(x)
# paddle.static.append_backward(out)
# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out, x.grad_name])
x = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x.stop_gradient = False
out = paddle.linalg.cond(x)
paddle.static.append_backward(out)
# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out, x.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
# p = fro : use paddle.sum
# x2 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x2.stop_gradient = False
# out_fro = paddle.linalg.cond(x2, p='fro')
# paddle.static.append_backward(out_fro)
x2 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x2.stop_gradient = False
out_fro = paddle.linalg.cond(x2, p='fro')
paddle.static.append_backward(out_fro)
# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out_fro, x.grad_name])
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_fro, x.grad_name])
# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
# p = nuc : use paddle.sum
# x3 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x3.stop_gradient = False
# out_nuc = paddle.linalg.cond(x3, p='nuc')
# paddle.static.append_backward(out_nuc)
x3 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x3.stop_gradient = False
out_nuc = paddle.linalg.cond(x3, p='nuc')
paddle.static.append_backward(out_nuc)
# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out_nuc, x.grad_name])
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_nuc, x.grad_name])
# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
# p in (-1, 1) : use paddle.sum
# x4 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x4.stop_gradient = False
# out_1 = paddle.linalg.cond(x4, p=1)
# paddle.static.append_backward(out_1)
x4 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x4.stop_gradient = False
out_1 = paddle.linalg.cond(x4, p=1)
paddle.static.append_backward(out_1)
# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out_1, x.grad_name])
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_1, x.grad_name])
# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
# x5 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x5.stop_gradient = False
# out_minus_1 = paddle.linalg.cond(x5, p=-1)
# paddle.static.append_backward(out_minus_1)
x5 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x5.stop_gradient = False
out_minus_1 = paddle.linalg.cond(x5, p=-1)
paddle.static.append_backward(out_minus_1)
# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out_minus_1, x.grad_name])
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_minus_1, x.grad_name])
# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
# p in (-2, 2) depends on paddle.sum
# x6 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x6.stop_gradient = False
# out_2 = paddle.linalg.cond(x6, p=2)
# paddle.static.append_backward(out_2)
x6 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x6.stop_gradient = False
out_2 = paddle.linalg.cond(x6, p=2)
paddle.static.append_backward(out_2)
# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out_2, x.grad_name])
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_2, x.grad_name])
# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
# p in (-inf, inf):use paddle.sum
# x8 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x8.stop_gradient = False
# out_inf = paddle.linalg.cond(x8, p=float("inf"))
# paddle.static.append_backward(out_inf)
x8 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x8.stop_gradient = False
out_inf = paddle.linalg.cond(x8, p=float("inf"))
paddle.static.append_backward(out_inf)
# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out_inf, x.grad_name])
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_inf, x.grad_name])
# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
# depends on paddle.sum
# a = paddle.randn([2, 4, 4])
# a.stop_gradient = False
# a_cond_fro = paddle.linalg.cond(a, p='fro')
# paddle.static.append_backward(a_cond_fro)
a = paddle.randn([2, 4, 4])
a.stop_gradient = False
a_cond_fro = paddle.linalg.cond(a, p='fro')
paddle.static.append_backward(a_cond_fro.sum())
# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[a_cond_fro, a.grad_name])
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[a_cond_fro, a.grad_name])
# self.assertEqual(res[0].shape, (2,))
# self.assertEqual(res[1].shape, (2, 4, 4))
self.assertEqual(res[0].shape, (2,))
self.assertEqual(res[1].shape, (2, 4, 4))
@prog_scope()
def test_trace(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册