未验证 提交 7edcd05c 编写于 作者: G GGBond8488 提交者: GitHub

add and open 0D test pnorm and cond (#53616)

上级 c27e6d2f
...@@ -2478,51 +2478,156 @@ class TestSundryAPI(unittest.TestCase): ...@@ -2478,51 +2478,156 @@ class TestSundryAPI(unittest.TestCase):
np.testing.assert_allclose(out, np.array(12)) np.testing.assert_allclose(out, np.array(12))
self.assertEqual(x.grad.shape, [2, 2]) self.assertEqual(x.grad.shape, [2, 2])
def test_cond(self): def test_linalg_norm(self):
pass # 1D input, p = fro ,axis = None, using reduceInferMeta
# def assert_shape(out): x_1 = paddle.arange(24, dtype="float32") - 12
# self.assertEqual(out.shape, []) x_1.stop_gradient = False
out_1 = paddle.linalg.norm(x_1)
out_1.retain_grads()
out_1.backward()
self.assertEqual(out_1.shape, [])
self.assertTrue(x_1.grad.shape, [24])
# 1D input, p = 1 ,axis = None,
# using p_nrom, as_vector = True
x_2 = paddle.arange(24, dtype="float32") - 12
x_2.stop_gradient = False
out_2 = paddle.linalg.norm(x_2, p=1)
out_2.retain_grads()
out_2.backward()
self.assertEqual(out_2.shape, [])
self.assertEqual(x_2.grad.shape, [24])
# 1D input, p = 1 ,axis = 0,
# using p_nrom, as_vector = False
x_2_p = paddle.arange(24, dtype="float32") - 12
x_2_p.stop_gradient = False
out_2_p = paddle.linalg.norm(x_2_p, p=1, axis=0)
out_2_p.retain_grads()
out_2_p.backward()
self.assertEqual(out_2_p.shape, [])
self.assertEqual(x_2_p.grad.shape, [24])
# 1D input, p = fro ,axis = 0,
# using p_nrom, as_vector = False
x_2_fro = paddle.arange(24, dtype="float32") - 12
x_2_fro.stop_gradient = False
out_2_fro = paddle.linalg.norm(x_2_fro, p="fro", axis=0)
out_2_fro.retain_grads()
out_2_fro.backward()
self.assertEqual(out_2_fro.shape, [])
self.assertEqual(x_2_fro.grad.shape, [24])
# 2D input, p = 1, axis = [0, 1]
# using p_matrix_norm ,depends on paddle.sum
x_3 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_3.stop_gradient = False
out_3 = paddle.linalg.norm(x_3, p=1, axis=[0, 1])
out_3.retain_grads()
out_3.backward()
self.assertEqual(out_3.shape, [])
self.assertEqual(x_3.grad.shape, [4, 6])
# 2D input, p = 1, axis = None
# using p_matrix_norm, depends on paddle.sum
x_4 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_4.stop_gradient = False
out_4 = paddle.linalg.norm(x_4)
out_4.retain_grads()
out_4.backward()
self.assertEqual(out_4.shape, [])
self.assertEqual(x_4.grad.shape, [4, 6])
# 2D input, p = inf, axis = [0, 1]
# using p_matrix_norm, depends on paddle.sum
x_5 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_5.stop_gradient = False
out_5 = paddle.linalg.norm(x_5, p=2, axis=[0, 1])
out_5.retain_grads()
out_5.backward()
self.assertEqual(out_5.shape, [])
self.assertEqual(x_5.grad.shape, [4, 6])
# 2D input, p = -inf, axis = [0, 1]
x_6 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_6.stop_gradient = False
out_6 = paddle.linalg.norm(x_6, p=-float("inf"), axis=[0, 1])
out_6.retain_grads()
out_6.backward()
self.assertEqual(out_6.shape, [])
self.assertEqual(x_6.grad.shape, [4, 6])
def test_linalg_cond(self):
def assert_shape(out):
self.assertEqual(out.shape, [])
# x = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) x1 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x.stop_gradient = False x1.stop_gradient = False
# p = 2 : use paddle.sum, paddle.max, paddle.min # p = 2 : use paddle.sum
# out = paddle.linalg.cond(x) out = paddle.linalg.cond(x1)
# assert_shape(out) out.backward()
assert_shape(out)
self.assertEqual(x1.grad.shape, [3, 3])
# p = fro : use paddle.sum # p = fro : use paddle.sum
# out_fro = paddle.linalg.cond(x, p='fro') x2 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# assert_shape(out_fro) x2.stop_gradient = False
out_fro = paddle.linalg.cond(x2, p='fro')
# p = nuc : use paddle.sum, paddle.max, paddle.min out_fro.backward()
# out_nuc = paddle.linalg.cond(x, p='nuc') assert_shape(out_fro)
# assert_shape(out_nuc) self.assertEqual(x2.grad.shape, [3, 3])
# p in (-1, 1) : use paddle.sum, paddle.max, paddle.min # p = nuc : use paddle.sum
# out_1 = paddle.linalg.cond(x, p=1) x3 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# assert_shape(out_1) x3.stop_gradient = False
# out_minus_1 = paddle.linalg.cond(x, p=-1) out_nuc = paddle.linalg.cond(x3, p='nuc')
# assert_shape(out_minus_1) out_nuc.backward()
assert_shape(out_nuc)
# p in (-2, 2) :use paddle.max, paddle.min self.assertEqual(x3.grad.shape, [3, 3])
# out_2 = paddle.linalg.cond(x, p=2)
# assert_shape(out_2) # p in (-1, 1) : use paddle.sum
# out_minus_2 = paddle.linalg.cond(x, p=-2) x4 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# assert_shape(out_minus_2) x4.stop_gradient = False
out_1 = paddle.linalg.cond(x4, p=1)
# p in (-inf, inf):use paddle.sum, paddle.max, paddle.min out_1.backward()
# out_inf = paddle.linalg.cond(x, p=float("inf")) assert_shape(out_1)
# assert_shape(out_inf) self.assertEqual(x4.grad.shape, [3, 3])
# out_minus_inf = paddle.linalg.cond(x, p=-float("inf"))
# assert_shape(out_minus_inf) x5 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# out_minus_inf.backward() x5.stop_gradient = False
# self.assertTrue(x.grad.shape, [3, 3]) out_minus_1 = paddle.linalg.cond(x5, p=-1)
out_minus_1.backward()
# a = paddle.randn([2, 4, 4]) assert_shape(out_minus_1)
# a.stop_gradient = False self.assertEqual(x5.grad.shape, [3, 3])
# a_cond_fro = paddle.linalg.cond(a, p='fro')
# a_cond_fro.backward() # p in (-2, 2) depends on paddle.sum
# self.assertEqual(len(a_cond_fro.shape), 1) x6 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# self.assertEqual(a.grad.shape, [2, 4, 4]) x6.stop_gradient = False
out_2 = paddle.linalg.cond(x6, p=2)
out_2.backward()
assert_shape(out_2)
self.assertEqual(x6.grad.shape, [3, 3])
# p in (-inf, inf):use paddle.sum
x8 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x8.stop_gradient = False
out_inf = paddle.linalg.cond(x8, p=float("inf"))
out_inf.backward()
assert_shape(out_inf)
self.assertEqual(x8.grad.shape, [3, 3])
a = paddle.randn([2, 4, 4])
a.stop_gradient = False
a_cond_fro = paddle.linalg.cond(a, p='fro')
a_cond_fro.backward()
self.assertEqual(len(a_cond_fro.shape), 1)
self.assertEqual(a.grad.shape, [2, 4, 4])
def test_cov(self): def test_cov(self):
xt = paddle.randn((3, 4)) xt = paddle.randn((3, 4))
...@@ -4549,20 +4654,197 @@ class TestSundryAPIStatic(unittest.TestCase): ...@@ -4549,20 +4654,197 @@ class TestSundryAPIStatic(unittest.TestCase):
np.testing.assert_allclose(res[0], np.array(12)) np.testing.assert_allclose(res[0], np.array(12))
@prog_scope() @prog_scope()
def test_cond(self): def test_linalg_norm(self):
pass # 1D input, p = fro ,axis = None, using reduceInferMeta
# use paddle.sum, paddle.max, paddle.min x_1 = paddle.arange(24, dtype="float32") - 12
# x = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]]) x_1.stop_gradient = False
# x.stop_gradient = False out_1 = paddle.linalg.norm(x_1)
# out = paddle.linalg.cond(x) paddle.static.append_backward(out_1)
# paddle.static.append_backward(out)
prog = paddle.static.default_main_program()
# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out, x.grad_name]) res = self.exe.run(prog, fetch_list=[out_1, x_1.grad_name])
self.assertEqual(res[0].shape, ())
# self.assertTrue(res[0].shape, ()) self.assertEqual(res[1].shape, (24,))
# self.assertTrue(res[1].shape, (3, 3))
# np.testing.assert_allclose(out, np.array(1.41421342)) # 1D input, p = 1 ,axis = None,
# using p_nrom, as_vector = True
x_2 = paddle.arange(24, dtype="float32") - 12
x_2.stop_gradient = False
out_2 = paddle.linalg.norm(x_2, p=1)
paddle.static.append_backward(out_2.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_2, x_2.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (24,))
# 1D input, p = 1 ,axis = 0,
# using p_nrom, as_vector = False
x_2_p = paddle.arange(24, dtype="float32") - 12
x_2_p.stop_gradient = False
out_2_p = paddle.linalg.norm(x_2_p, p=1, axis=0)
paddle.static.append_backward(out_2_p.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_2_p, x_2_p.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (24,))
# 1D input, p = fro ,axis = 0,
# using p_nrom, as_vector = False
x_2_fro = paddle.arange(24, dtype="float32") - 12
x_2_fro.stop_gradient = False
out_2_fro = paddle.linalg.norm(x_2_fro, p="fro", axis=0)
paddle.static.append_backward(out_2_fro.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_2_fro, x_2_fro.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (24,))
# 2D input, p = 1, axis = [0, 1]
# using p_matrix_norm ,depends on paddle.sum
x_3 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_3.stop_gradient = False
out_3 = paddle.linalg.norm(x_3, p=1, axis=[0, 1])
paddle.static.append_backward(out_3.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_3, x_3.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (4, 6))
# 2D input, p = 1, axis = None
# using p_matrix_norm, depends on paddle.sum
x_4 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_4.stop_gradient = False
out_4 = paddle.linalg.norm(x_4)
paddle.static.append_backward(out_4.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_4, x_4.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (4, 6))
# 2D input, p = inf, axis = None
x_5 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_5.stop_gradient = False
out_5 = paddle.linalg.norm(x_5)
paddle.static.append_backward(out_5.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_5, x_5.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (4, 6))
# 2D input, p = -inf, axis = [0, 1]
x_6 = paddle.arange(24, dtype="float32").reshape([4, 6])
x_6.stop_gradient = False
out_6 = paddle.linalg.norm(x_6, p=-float("inf"), axis=[0, 1])
paddle.static.append_backward(out_6.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_6, x_6.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (4, 6))
@prog_scope()
def test_linalg_cond(self):
# use paddle.sum
x = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x.stop_gradient = False
out = paddle.linalg.cond(x)
paddle.static.append_backward(out)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out, x.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
# p = fro : use paddle.sum
x2 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x2.stop_gradient = False
out_fro = paddle.linalg.cond(x2, p='fro')
paddle.static.append_backward(out_fro)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_fro, x.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
# p = nuc : use paddle.sum
x3 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x3.stop_gradient = False
out_nuc = paddle.linalg.cond(x3, p='nuc')
paddle.static.append_backward(out_nuc)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_nuc, x.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
# p in (-1, 1) : use paddle.sum
x4 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x4.stop_gradient = False
out_1 = paddle.linalg.cond(x4, p=1)
paddle.static.append_backward(out_1)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_1, x.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
x5 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x5.stop_gradient = False
out_minus_1 = paddle.linalg.cond(x5, p=-1)
paddle.static.append_backward(out_minus_1)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_minus_1, x.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
# p in (-2, 2) depends on paddle.sum
x6 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x6.stop_gradient = False
out_2 = paddle.linalg.cond(x6, p=2)
paddle.static.append_backward(out_2)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_2, x.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
# p in (-inf, inf):use paddle.sum
x8 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
x8.stop_gradient = False
out_inf = paddle.linalg.cond(x8, p=float("inf"))
paddle.static.append_backward(out_inf)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out_inf, x.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))
# depends on paddle.sum
a = paddle.randn([2, 4, 4])
a.stop_gradient = False
a_cond_fro = paddle.linalg.cond(a, p='fro')
paddle.static.append_backward(a_cond_fro.sum())
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[a_cond_fro, a.grad_name])
self.assertEqual(res[0].shape, (2,))
self.assertEqual(res[1].shape, (2, 4, 4))
@prog_scope() @prog_scope()
def test_cov(self): def test_cov(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册