未验证 提交 4f975b41 编写于 作者: zhouweiwei2014's avatar zhouweiwei2014 提交者: GitHub

[Zero-Dim] Support input 0D Tensor for some api (#48007)

上级 561b7278
...@@ -14,16 +14,20 @@ ...@@ -14,16 +14,20 @@
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from decorator_helper import prog_scope
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np import numpy as np
import unittest import unittest
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
unary_api_list = [ unary_api_list = [
paddle.nn.functional.elu, paddle.nn.functional.elu,
paddle.nn.functional.gelu, paddle.nn.functional.gelu,
paddle.nn.functional.hardsigmoid, paddle.nn.functional.hardsigmoid,
paddle.nn.functional.hardswish, paddle.nn.functional.hardswish,
paddle.nn.functional.hardshrink,
paddle.nn.functional.hardtanh,
paddle.nn.functional.leaky_relu, paddle.nn.functional.leaky_relu,
paddle.nn.functional.log_sigmoid, paddle.nn.functional.log_sigmoid,
paddle.nn.functional.relu, paddle.nn.functional.relu,
...@@ -37,9 +41,11 @@ unary_api_list = [ ...@@ -37,9 +41,11 @@ unary_api_list = [
paddle.nn.functional.thresholded_relu, paddle.nn.functional.thresholded_relu,
paddle.stanh, paddle.stanh,
paddle.nn.functional.celu, paddle.nn.functional.celu,
paddle.nn.functional.selu,
paddle.nn.functional.mish, paddle.nn.functional.mish,
paddle.nn.functional.silu, paddle.nn.functional.silu,
paddle.nn.functional.tanh, paddle.nn.functional.tanh,
paddle.nn.functional.dropout,
paddle.cosh, paddle.cosh,
paddle.sinh, paddle.sinh,
paddle.abs, paddle.abs,
...@@ -65,6 +71,24 @@ unary_api_list = [ ...@@ -65,6 +71,24 @@ unary_api_list = [
paddle.log10, paddle.log10,
paddle.log2, paddle.log2,
paddle.tan, paddle.tan,
paddle.erf,
paddle.erfinv,
paddle.rsqrt,
paddle.sign,
paddle.deg2rad,
paddle.rad2deg,
paddle.neg,
paddle.logit,
paddle.trunc,
paddle.digamma,
paddle.lgamma,
paddle.poisson,
paddle.bernoulli,
]
inplace_api_list = [
paddle.nn.functional.relu_,
paddle.nn.functional.tanh_,
] ]
...@@ -72,7 +96,6 @@ unary_api_list = [ ...@@ -72,7 +96,6 @@ unary_api_list = [
class TestUnaryAPI(unittest.TestCase): class TestUnaryAPI(unittest.TestCase):
def test_dygraph_unary(self): def test_dygraph_unary(self):
paddle.disable_static() paddle.disable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
for api in unary_api_list: for api in unary_api_list:
x = paddle.rand([]) x = paddle.rand([])
x.stop_gradient = False x.stop_gradient = False
...@@ -81,8 +104,15 @@ class TestUnaryAPI(unittest.TestCase): ...@@ -81,8 +104,15 @@ class TestUnaryAPI(unittest.TestCase):
self.assertEqual(x.shape, []) self.assertEqual(x.shape, [])
self.assertEqual(out.shape, []) self.assertEqual(out.shape, [])
self.assertEqual(x.grad.shape, []) if x.grad is not None:
self.assertEqual(out.grad.shape, []) self.assertEqual(x.grad.shape, [])
self.assertEqual(out.grad.shape, [])
for api in inplace_api_list:
x = paddle.rand([])
out = api(x)
self.assertEqual(x.shape, [])
self.assertEqual(out.shape, [])
paddle.enable_static() paddle.enable_static()
...@@ -95,28 +125,32 @@ class TestUnaryAPI(unittest.TestCase): ...@@ -95,28 +125,32 @@ class TestUnaryAPI(unittest.TestCase):
x = paddle.rand([]) x = paddle.rand([])
x.stop_gradient = False x.stop_gradient = False
out = api(x) out = api(x)
fluid.backward.append_backward(out) paddle.static.append_backward(out)
# ScaleLossGradOp / append_backward always set grad shape to [1] # Test compile shape
prog = paddle.static.default_main_program()
block = prog.global_block()
x_grad = block.var(fluid.framework.grad_var_name(x.name))
out_grad = block.var(fluid.framework.grad_var_name(out.name))
# Test compile shape, grad is always [1]
self.assertEqual(x.shape, ()) self.assertEqual(x.shape, ())
self.assertEqual(out.shape, ()) self.assertEqual(out.shape, ())
exe = fluid.Executor() fetch_list = [x, out]
result = exe.run( # TODO(zhouwei): ScaleLossGradOp / append_backward set grad shape to [1]
main_prog, fetch_list=[x, out, x_grad, out_grad] # will change to [] after kernel is fixed
) prog = paddle.static.default_main_program()
block = prog.global_block()
if block.has_var(fluid.framework.grad_var_name(x.name)):
out_grad = block.var(
fluid.framework.grad_var_name(out.name)
)
fetch_list.append(out_grad)
self.assertEqual(out_grad.shape, ())
# Test runtime shape # Test runtime shape
exe = fluid.Executor()
result = exe.run(main_prog, fetch_list=fetch_list)
self.assertEqual(result[0].shape, ()) self.assertEqual(result[0].shape, ())
self.assertEqual(result[1].shape, ()) self.assertEqual(result[1].shape, ())
self.assertEqual(result[3].shape, (1,)) if len(result) == 3:
# TODO(zhouwei): will change to [] after kernel is fixed
self.assertEqual(result[2].shape, (1,))
# 0D will be stacked when 1+ place, due to it cannot be concated # 0D will be stacked when 1+ place, due to it cannot be concated
# for 1 place: [ x-place1 ] # for 1 place: [ x-place1 ]
...@@ -135,28 +169,30 @@ class TestUnaryAPI(unittest.TestCase): ...@@ -135,28 +169,30 @@ class TestUnaryAPI(unittest.TestCase):
).with_data_parallel(out.name, places=places) ).with_data_parallel(out.name, places=places)
result = exe.run( result = exe.run(
compiled_program, compiled_program,
fetch_list=[x, out, x_grad, out_grad], fetch_list=fetch_list,
return_merged=True, return_merged=True,
) )
# Test runtime parallel shape # Test runtime parallel shape
self.assertEqual(result[0].shape, expect_shape) self.assertEqual(result[0].shape, expect_shape)
self.assertEqual(result[1].shape, expect_shape) self.assertEqual(result[1].shape, expect_shape)
self.assertEqual(result[3].shape, (device_num,)) if len(result) == 3:
self.assertEqual(result[2].shape, (device_num,))
compiled_program = fluid.CompiledProgram( compiled_program = fluid.CompiledProgram(
main_prog main_prog
).with_data_parallel(out.name, places=places) ).with_data_parallel(out.name, places=places)
result = exe.run( result = exe.run(
compiled_program, compiled_program,
fetch_list=[x, out, x_grad, out_grad], fetch_list=fetch_list,
return_merged=False, return_merged=False,
) )
# [[x-place1, x-place2, ...], [], [], ...] # [[x-place1, x-place2, ...], [], [], ...]
self.assertEqual(np.array(result[0]).shape, (device_num,)) self.assertEqual(np.array(result[0]).shape, (device_num,))
self.assertEqual(np.array(result[1]).shape, (device_num,)) self.assertEqual(np.array(result[1]).shape, (device_num,))
self.assertEqual(np.array(result[3]).shape, (device_num, 1)) if len(result) == 3:
self.assertEqual(np.array(result[2]).shape, (device_num, 1))
paddle.disable_static() paddle.disable_static()
...@@ -181,7 +217,6 @@ reduce_api_list = [ ...@@ -181,7 +217,6 @@ reduce_api_list = [
class TestReduceAPI(unittest.TestCase): class TestReduceAPI(unittest.TestCase):
def test_dygraph(self): def test_dygraph(self):
paddle.disable_static() paddle.disable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
for api in reduce_api_list: for api in reduce_api_list:
if api in [paddle.all, paddle.any]: if api in [paddle.all, paddle.any]:
x = paddle.randint(0, 2, []).astype('bool') x = paddle.randint(0, 2, []).astype('bool')
...@@ -234,9 +269,6 @@ binary_api_list = [ ...@@ -234,9 +269,6 @@ binary_api_list = [
{'func': paddle.multiply, 'cls_method': '__mul__'}, {'func': paddle.multiply, 'cls_method': '__mul__'},
{'func': paddle.divide, 'cls_method': '__div__'}, {'func': paddle.divide, 'cls_method': '__div__'},
{'func': paddle.pow, 'cls_method': '__pow__'}, {'func': paddle.pow, 'cls_method': '__pow__'},
]
binary_api_list_without_grad = [
{'func': paddle.equal, 'cls_method': '__eq__'}, {'func': paddle.equal, 'cls_method': '__eq__'},
{'func': paddle.not_equal, 'cls_method': '__ne__'}, {'func': paddle.not_equal, 'cls_method': '__ne__'},
{'func': paddle.greater_equal, 'cls_method': '__ge__'}, {'func': paddle.greater_equal, 'cls_method': '__ge__'},
...@@ -251,7 +283,7 @@ binary_api_list_without_grad = [ ...@@ -251,7 +283,7 @@ binary_api_list_without_grad = [
paddle.logical_xor, paddle.logical_xor,
] ]
binary_int_api_list_without_grad = [ binary_int_api_list = [
paddle.bitwise_and, paddle.bitwise_and,
paddle.bitwise_or, paddle.bitwise_or,
paddle.bitwise_xor, paddle.bitwise_xor,
...@@ -262,8 +294,7 @@ binary_int_api_list_without_grad = [ ...@@ -262,8 +294,7 @@ binary_int_api_list_without_grad = [
class TestBinaryAPI(unittest.TestCase): class TestBinaryAPI(unittest.TestCase):
def test_dygraph_binary(self): def test_dygraph_binary(self):
paddle.disable_static() paddle.disable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) for api in binary_api_list:
for api in binary_api_list + binary_api_list_without_grad:
# 1) x/y is 0D # 1) x/y is 0D
x = paddle.rand([]) x = paddle.rand([])
y = paddle.rand([]) y = paddle.rand([])
...@@ -275,10 +306,10 @@ class TestBinaryAPI(unittest.TestCase): ...@@ -275,10 +306,10 @@ class TestBinaryAPI(unittest.TestCase):
np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) np.testing.assert_array_equal(out_cls.numpy(), out.numpy())
else: else:
out = api(x, y) out = api(x, y)
self.assertEqual(out.shape, []) self.assertEqual(out.shape, [])
if api not in binary_api_list_without_grad:
out.backward() out.backward()
if x.grad is not None:
self.assertEqual(x.grad.shape, []) self.assertEqual(x.grad.shape, [])
self.assertEqual(y.grad.shape, []) self.assertEqual(y.grad.shape, [])
self.assertEqual(out.grad.shape, []) self.assertEqual(out.grad.shape, [])
...@@ -294,10 +325,10 @@ class TestBinaryAPI(unittest.TestCase): ...@@ -294,10 +325,10 @@ class TestBinaryAPI(unittest.TestCase):
np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) np.testing.assert_array_equal(out_cls.numpy(), out.numpy())
else: else:
out = api(x, y) out = api(x, y)
self.assertEqual(out.shape, [2, 3, 4]) self.assertEqual(out.shape, [2, 3, 4])
if api not in binary_api_list_without_grad:
out.backward() out.backward()
if x.grad is not None:
self.assertEqual(x.grad.shape, [2, 3, 4]) self.assertEqual(x.grad.shape, [2, 3, 4])
self.assertEqual(y.grad.shape, []) self.assertEqual(y.grad.shape, [])
self.assertEqual(out.grad.shape, [2, 3, 4]) self.assertEqual(out.grad.shape, [2, 3, 4])
...@@ -313,10 +344,10 @@ class TestBinaryAPI(unittest.TestCase): ...@@ -313,10 +344,10 @@ class TestBinaryAPI(unittest.TestCase):
np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) np.testing.assert_array_equal(out_cls.numpy(), out.numpy())
else: else:
out = api(x, y) out = api(x, y)
self.assertEqual(out.shape, [2, 3, 4]) self.assertEqual(out.shape, [2, 3, 4])
if api not in binary_api_list_without_grad:
out.backward() out.backward()
if x.grad is not None:
self.assertEqual(x.grad.shape, []) self.assertEqual(x.grad.shape, [])
self.assertEqual(y.grad.shape, [2, 3, 4]) self.assertEqual(y.grad.shape, [2, 3, 4])
self.assertEqual(out.grad.shape, [2, 3, 4]) self.assertEqual(out.grad.shape, [2, 3, 4])
...@@ -329,7 +360,7 @@ class TestBinaryAPI(unittest.TestCase): ...@@ -329,7 +360,7 @@ class TestBinaryAPI(unittest.TestCase):
out = getattr(paddle.Tensor, api['cls_method'])(x, y) out = getattr(paddle.Tensor, api['cls_method'])(x, y)
self.assertEqual(out.shape, []) self.assertEqual(out.shape, [])
for api in binary_int_api_list_without_grad: for api in binary_int_api_list:
# 1) x/y is 0D # 1) x/y is 0D
x = paddle.randint(-10, 10, []) x = paddle.randint(-10, 10, [])
y = paddle.randint(-10, 10, []) y = paddle.randint(-10, 10, [])
...@@ -352,7 +383,7 @@ class TestBinaryAPI(unittest.TestCase): ...@@ -352,7 +383,7 @@ class TestBinaryAPI(unittest.TestCase):
def test_static_binary(self): def test_static_binary(self):
paddle.enable_static() paddle.enable_static()
for api in binary_api_list + binary_api_list_without_grad: for api in binary_api_list:
main_prog = fluid.Program() main_prog = fluid.Program()
with fluid.program_guard(main_prog, fluid.Program()): with fluid.program_guard(main_prog, fluid.Program()):
# 1) x/y is 0D # 1) x/y is 0D
...@@ -368,16 +399,15 @@ class TestBinaryAPI(unittest.TestCase): ...@@ -368,16 +399,15 @@ class TestBinaryAPI(unittest.TestCase):
self.assertEqual(out.shape, out_cls.shape) self.assertEqual(out.shape, out_cls.shape)
else: else:
out = api(x, y) out = api(x, y)
fluid.backward.append_backward(out) paddle.static.append_backward(out)
# Test compile shape
self.assertEqual(out.shape, ()) self.assertEqual(out.shape, ())
exe = fluid.Executor() exe = fluid.Executor()
out_np = exe.run(main_prog, fetch_list=[out])[0] result = exe.run(main_prog, fetch_list=[out])
# Test runtime shape self.assertEqual(result[0].shape, ())
self.assertEqual(out_np.shape, ())
# TODO(zhouwei): will open when create_scalar is [] # TODO: will open when create_scalar is []
# 2) x is 0D , y is scalar # 2) x is 0D , y is scalar
''' '''
x = paddle.rand([]) x = paddle.rand([])
...@@ -391,7 +421,7 @@ class TestBinaryAPI(unittest.TestCase): ...@@ -391,7 +421,7 @@ class TestBinaryAPI(unittest.TestCase):
self.assertEqual(out.shape, ()) self.assertEqual(out.shape, ())
''' '''
for api in binary_int_api_list_without_grad: for api in binary_int_api_list:
main_prog = fluid.Program() main_prog = fluid.Program()
with fluid.program_guard(main_prog, fluid.Program()): with fluid.program_guard(main_prog, fluid.Program()):
# 1) x/y is 0D # 1) x/y is 0D
...@@ -415,10 +445,11 @@ class TestBinaryAPI(unittest.TestCase): ...@@ -415,10 +445,11 @@ class TestBinaryAPI(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
# Use to test zero-dim of Sundry API, which is simple and do # Use to test zero-dim of Sundry API, which is unique and can not be classified
# not have backward, or is not need to test backward in OpTest. # with others. It can be implemented here flexibly.
class TestSundryAPI(unittest.TestCase): class TestSundryAPI(unittest.TestCase):
def setUp(self): def setUp(self):
paddle.disable_static()
self.x = paddle.rand([]) self.x = paddle.rand([])
def test_linear(self): def test_linear(self):
...@@ -501,6 +532,130 @@ class TestSundryAPI(unittest.TestCase): ...@@ -501,6 +532,130 @@ class TestSundryAPI(unittest.TestCase):
self.assertEqual(out.shape, [0]) self.assertEqual(out.shape, [0])
np.testing.assert_array_equal(out.numpy(), np.array([])) np.testing.assert_array_equal(out.numpy(), np.array([]))
def test_pow_factor(self):
x = paddle.rand([])
x.stop_gradient = False
out = paddle.pow(x, 2.0)
out.backward()
self.assertEqual(out.shape, [])
self.assertEqual(out.grad.shape, [])
self.assertEqual(x.grad.shape, [])
def test_cast(self):
x = paddle.full([], 1.0, 'float32')
x.stop_gradient = False
out = paddle.cast(x, 'int32')
out.backward()
self.assertEqual(out.shape, [])
self.assertEqual(out.grad.shape, [])
self.assertEqual(x.grad.shape, [])
def test_clip(self):
x = paddle.uniform([], None, -10, 10)
x.stop_gradient = False
out = paddle.clip(x, -5, 5)
out.backward()
self.assertEqual(out.shape, [])
self.assertEqual(out.grad.shape, [])
self.assertEqual(x.grad.shape, [])
def test_increment(self):
x = paddle.rand([])
x.stop_gradient = False
out = paddle.increment(x, 1.0)
out.backward()
self.assertEqual(out.shape, [])
self.assertEqual(out.grad.shape, [])
self.assertEqual(x.grad.shape, [])
def test_bitwise_not(self):
x = paddle.randint(-1, 1, [])
out1 = ~x
out2 = paddle.bitwise_not(x)
self.assertEqual(out1.shape, [])
self.assertEqual(out2.shape, [])
def test_logical_not(self):
x = paddle.randint(0, 1, [])
out = paddle.logical_not(x)
self.assertEqual(out.shape, [])
class TestSundryAPIStatic(unittest.TestCase):
def setUp(self):
paddle.enable_static()
self.exe = paddle.static.Executor()
@prog_scope()
def test_pow_factor(self):
x = paddle.rand([])
x.stop_gradient = False
out = paddle.pow(x, 2.0)
paddle.static.append_backward(out)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out])
self.assertEqual(res[0].shape, ())
@prog_scope()
def test_cast(self):
x = paddle.full([], 1.0, 'float32')
x.stop_gradient = False
out = paddle.cast(x, 'int32')
paddle.static.append_backward(out)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out])
self.assertEqual(res[0].shape, ())
@prog_scope()
def test_clip(self):
x = paddle.uniform([], None, -10, 10)
x.stop_gradient = False
out = paddle.clip(x, -5, 5)
paddle.static.append_backward(out)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out])
self.assertEqual(res[0].shape, ())
@prog_scope()
def test_increment(self):
x = paddle.rand([])
x.stop_gradient = False
out = paddle.increment(x, 1.0)
paddle.static.append_backward(out)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out])
self.assertEqual(res[0].shape, ())
@prog_scope()
def test_bitwise_not(self):
x = paddle.randint(-1, 1, [])
out = paddle.bitwise_not(x)
paddle.static.append_backward(out)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out])
self.assertEqual(res[0].shape, ())
@prog_scope()
def test_logical_not(self):
x = paddle.randint(0, 1, [])
out = paddle.logical_not(x)
paddle.static.append_backward(out)
prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out])
self.assertEqual(res[0].shape, ())
# Use to test API whose zero-dim input tensors don't have grad and not need to test backward in OpTest. # Use to test API whose zero-dim input tensors don't have grad and not need to test backward in OpTest.
class TestNoBackwardAPI(unittest.TestCase): class TestNoBackwardAPI(unittest.TestCase):
......
...@@ -20,12 +20,15 @@ import unittest ...@@ -20,12 +20,15 @@ import unittest
paddle.set_device('xpu') paddle.set_device('xpu')
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
unary_api_list = [ unary_api_list = [
paddle.nn.functional.elu, paddle.nn.functional.elu,
paddle.nn.functional.gelu, paddle.nn.functional.gelu,
paddle.nn.functional.hardsigmoid, paddle.nn.functional.hardsigmoid,
paddle.nn.functional.hardswish, paddle.nn.functional.hardswish,
paddle.nn.functional.hardshrink,
paddle.nn.functional.hardtanh,
paddle.nn.functional.leaky_relu, paddle.nn.functional.leaky_relu,
paddle.nn.functional.log_sigmoid, paddle.nn.functional.log_sigmoid,
paddle.nn.functional.relu, paddle.nn.functional.relu,
...@@ -39,9 +42,11 @@ unary_api_list = [ ...@@ -39,9 +42,11 @@ unary_api_list = [
paddle.nn.functional.thresholded_relu, paddle.nn.functional.thresholded_relu,
paddle.stanh, paddle.stanh,
paddle.nn.functional.celu, paddle.nn.functional.celu,
paddle.nn.functional.selu,
paddle.nn.functional.mish, paddle.nn.functional.mish,
paddle.nn.functional.silu, paddle.nn.functional.silu,
paddle.nn.functional.tanh, paddle.nn.functional.tanh,
paddle.nn.functional.dropout,
paddle.cosh, paddle.cosh,
paddle.sinh, paddle.sinh,
paddle.abs, paddle.abs,
...@@ -67,14 +72,31 @@ unary_api_list = [ ...@@ -67,14 +72,31 @@ unary_api_list = [
paddle.log10, paddle.log10,
paddle.log2, paddle.log2,
paddle.tan, paddle.tan,
paddle.erf,
paddle.erfinv,
paddle.rsqrt,
paddle.sign,
paddle.deg2rad,
paddle.rad2deg,
paddle.neg,
paddle.logit,
paddle.trunc,
paddle.digamma,
paddle.lgamma,
paddle.poisson,
paddle.bernoulli,
]
inplace_api_list = [
paddle.nn.functional.relu_,
paddle.nn.functional.tanh_,
] ]
# Use to test zero-dim in unary API. # Use to test zero-dim in unary API.
class TestUnaryAPI(unittest.TestCase): class TestUnaryAPI(unittest.TestCase):
def test(self): def test_dygraph_unary(self):
paddle.disable_static() paddle.disable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
for api in unary_api_list: for api in unary_api_list:
x = paddle.rand([]) x = paddle.rand([])
x.stop_gradient = False x.stop_gradient = False
...@@ -83,8 +105,15 @@ class TestUnaryAPI(unittest.TestCase): ...@@ -83,8 +105,15 @@ class TestUnaryAPI(unittest.TestCase):
self.assertEqual(x.shape, []) self.assertEqual(x.shape, [])
self.assertEqual(out.shape, []) self.assertEqual(out.shape, [])
self.assertEqual(x.grad.shape, []) if x.grad is not None:
self.assertEqual(out.grad.shape, []) self.assertEqual(x.grad.shape, [])
self.assertEqual(out.grad.shape, [])
for api in inplace_api_list:
x = paddle.rand([])
out = api(x)
self.assertEqual(x.shape, [])
self.assertEqual(out.shape, [])
paddle.enable_static() paddle.enable_static()
...@@ -107,9 +136,8 @@ reduce_api_list = [ ...@@ -107,9 +136,8 @@ reduce_api_list = [
# Use to test zero-dim of reduce API # Use to test zero-dim of reduce API
class TestReduceAPI(unittest.TestCase): class TestReduceAPI(unittest.TestCase):
def test(self): def test_dygraph(self):
paddle.disable_static() paddle.disable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
for api in reduce_api_list: for api in reduce_api_list:
if api in [paddle.all, paddle.any]: if api in [paddle.all, paddle.any]:
x = paddle.randint(0, 2, []).astype('bool') x = paddle.randint(0, 2, []).astype('bool')
...@@ -136,9 +164,6 @@ binary_api_list = [ ...@@ -136,9 +164,6 @@ binary_api_list = [
{'func': paddle.multiply, 'cls_method': '__mul__'}, {'func': paddle.multiply, 'cls_method': '__mul__'},
{'func': paddle.divide, 'cls_method': '__div__'}, {'func': paddle.divide, 'cls_method': '__div__'},
{'func': paddle.pow, 'cls_method': '__pow__'}, {'func': paddle.pow, 'cls_method': '__pow__'},
]
binary_api_list_without_grad = [
{'func': paddle.equal, 'cls_method': '__eq__'}, {'func': paddle.equal, 'cls_method': '__eq__'},
{'func': paddle.not_equal, 'cls_method': '__ne__'}, {'func': paddle.not_equal, 'cls_method': '__ne__'},
{'func': paddle.greater_equal, 'cls_method': '__ge__'}, {'func': paddle.greater_equal, 'cls_method': '__ge__'},
...@@ -153,7 +178,7 @@ binary_api_list_without_grad = [ ...@@ -153,7 +178,7 @@ binary_api_list_without_grad = [
paddle.logical_xor, paddle.logical_xor,
] ]
binary_int_api_list_without_grad = [ binary_int_api_list = [
paddle.bitwise_and, paddle.bitwise_and,
paddle.bitwise_or, paddle.bitwise_or,
paddle.bitwise_xor, paddle.bitwise_xor,
...@@ -162,10 +187,9 @@ binary_int_api_list_without_grad = [ ...@@ -162,10 +187,9 @@ binary_int_api_list_without_grad = [
# Use to test zero-dim of binary API # Use to test zero-dim of binary API
class TestBinaryAPI(unittest.TestCase): class TestBinaryAPI(unittest.TestCase):
def test(self): def test_dygraph_binary(self):
paddle.disable_static() paddle.disable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) for api in binary_api_list:
for api in binary_api_list + binary_api_list_without_grad:
# 1) x/y is 0D # 1) x/y is 0D
x = paddle.rand([]) x = paddle.rand([])
y = paddle.rand([]) y = paddle.rand([])
...@@ -177,10 +201,10 @@ class TestBinaryAPI(unittest.TestCase): ...@@ -177,10 +201,10 @@ class TestBinaryAPI(unittest.TestCase):
np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) np.testing.assert_array_equal(out_cls.numpy(), out.numpy())
else: else:
out = api(x, y) out = api(x, y)
self.assertEqual(out.shape, []) self.assertEqual(out.shape, [])
if api not in binary_api_list_without_grad:
out.backward() out.backward()
if x.grad is not None:
self.assertEqual(x.grad.shape, []) self.assertEqual(x.grad.shape, [])
self.assertEqual(y.grad.shape, []) self.assertEqual(y.grad.shape, [])
self.assertEqual(out.grad.shape, []) self.assertEqual(out.grad.shape, [])
...@@ -196,10 +220,10 @@ class TestBinaryAPI(unittest.TestCase): ...@@ -196,10 +220,10 @@ class TestBinaryAPI(unittest.TestCase):
np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) np.testing.assert_array_equal(out_cls.numpy(), out.numpy())
else: else:
out = api(x, y) out = api(x, y)
self.assertEqual(out.shape, [2, 3, 4]) self.assertEqual(out.shape, [2, 3, 4])
if api not in binary_api_list_without_grad:
out.backward() out.backward()
if x.grad is not None:
self.assertEqual(x.grad.shape, [2, 3, 4]) self.assertEqual(x.grad.shape, [2, 3, 4])
self.assertEqual(y.grad.shape, []) self.assertEqual(y.grad.shape, [])
self.assertEqual(out.grad.shape, [2, 3, 4]) self.assertEqual(out.grad.shape, [2, 3, 4])
...@@ -215,10 +239,10 @@ class TestBinaryAPI(unittest.TestCase): ...@@ -215,10 +239,10 @@ class TestBinaryAPI(unittest.TestCase):
np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) np.testing.assert_array_equal(out_cls.numpy(), out.numpy())
else: else:
out = api(x, y) out = api(x, y)
self.assertEqual(out.shape, [2, 3, 4]) self.assertEqual(out.shape, [2, 3, 4])
if api not in binary_api_list_without_grad:
out.backward() out.backward()
if x.grad is not None:
self.assertEqual(x.grad.shape, []) self.assertEqual(x.grad.shape, [])
self.assertEqual(y.grad.shape, [2, 3, 4]) self.assertEqual(y.grad.shape, [2, 3, 4])
self.assertEqual(out.grad.shape, [2, 3, 4]) self.assertEqual(out.grad.shape, [2, 3, 4])
...@@ -231,7 +255,7 @@ class TestBinaryAPI(unittest.TestCase): ...@@ -231,7 +255,7 @@ class TestBinaryAPI(unittest.TestCase):
out = getattr(paddle.Tensor, api['cls_method'])(x, y) out = getattr(paddle.Tensor, api['cls_method'])(x, y)
self.assertEqual(out.shape, []) self.assertEqual(out.shape, [])
for api in binary_int_api_list_without_grad: for api in binary_int_api_list:
# 1) x/y is 0D # 1) x/y is 0D
x = paddle.randint(-10, 10, []) x = paddle.randint(-10, 10, [])
y = paddle.randint(-10, 10, []) y = paddle.randint(-10, 10, [])
...@@ -253,8 +277,8 @@ class TestBinaryAPI(unittest.TestCase): ...@@ -253,8 +277,8 @@ class TestBinaryAPI(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
# Use to test zero-dim of Sundry API, which is simple and do # Use to test zero-dim of Sundry API, which is unique and can not be classified
# not have backward, or is not need to test backward in OpTest. # with others. It can be implemented here flexibly.
class TestSundryAPI(unittest.TestCase): class TestSundryAPI(unittest.TestCase):
def setUp(self): def setUp(self):
paddle.disable_static() paddle.disable_static()
...@@ -336,6 +360,190 @@ class TestSundryAPI(unittest.TestCase): ...@@ -336,6 +360,190 @@ class TestSundryAPI(unittest.TestCase):
self.assertEqual(out.shape, [0]) self.assertEqual(out.shape, [0])
np.testing.assert_array_equal(out.numpy(), np.array([])) np.testing.assert_array_equal(out.numpy(), np.array([]))
def test_pow_factor(self):
x = paddle.rand([])
x.stop_gradient = False
out = paddle.pow(x, 2.0)
out.backward()
self.assertEqual(out.shape, [])
self.assertEqual(out.grad.shape, [])
self.assertEqual(x.grad.shape, [])
def test_cast(self):
x = paddle.full([], 1.0, 'float32')
x.stop_gradient = False
out = paddle.cast(x, 'int32')
out.backward()
self.assertEqual(out.shape, [])
self.assertEqual(out.grad.shape, [])
self.assertEqual(x.grad.shape, [])
def test_clip(self):
x = paddle.uniform([], None, -10, 10)
x.stop_gradient = False
out = paddle.clip(x, -5, 5)
out.backward()
self.assertEqual(out.shape, [])
self.assertEqual(out.grad.shape, [])
self.assertEqual(x.grad.shape, [])
def test_increment(self):
x = paddle.rand([])
x.stop_gradient = False
out = paddle.increment(x, 1.0)
out.backward()
self.assertEqual(out.shape, [])
self.assertEqual(out.grad.shape, [])
self.assertEqual(x.grad.shape, [])
def test_bitwise_not(self):
x = paddle.randint(-1, 1, [])
out1 = ~x
out2 = paddle.bitwise_not(x)
self.assertEqual(out1.shape, [])
self.assertEqual(out2.shape, [])
def test_logical_not(self):
x = paddle.randint(0, 1, [])
out = paddle.logical_not(x)
self.assertEqual(out.shape, [])
# Use to test API whose zero-dim input tensors don't have grad and not need to test backward in OpTest.
class TestNoBackwardAPI(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.shape = [
paddle.full([], 2, 'int32'),
paddle.full([], 3, 'int32'),
paddle.full([], 4, 'int32'),
]
def test_slice(self):
starts = [paddle.full([], 1, 'int32'), paddle.full([], 1, 'int32')]
ends = [paddle.full([], 3, 'int32'), paddle.full([], 3, 'int32')]
x = paddle.rand([5, 3, 3])
out = paddle.slice(x, [1, 2], starts, ends)
self.assertEqual(out.shape, [5, 2, 2])
def test_strided_slice(self):
starts = [paddle.full([], 0, 'int32'), paddle.full([], 0, 'int32')]
ends = [paddle.full([], 4, 'int32'), paddle.full([], 4, 'int32')]
strides = [paddle.full([], 2, 'int32'), paddle.full([], 2, 'int32')]
x = paddle.rand([5, 5, 5])
out = paddle.strided_slice(x, [1, 2], starts, ends, strides)
self.assertEqual(out.shape, [5, 2, 2])
def test_linspace(self):
start = paddle.full([], 1.0)
stop = paddle.full([], 5.0)
num = paddle.full([], 5, 'int32')
out = paddle.linspace(start, stop, num)
np.testing.assert_array_equal(out.numpy(), [1.0, 2.0, 3.0, 4.0, 5.0])
def test_arange(self):
start = paddle.full([], 1.0)
stop = paddle.full([], 6.0)
step = paddle.full([], 1.0)
out = paddle.arange(start, stop, step)
np.testing.assert_array_equal(out.numpy(), [1.0, 2.0, 3.0, 4.0, 5.0])
def test_normal(self):
mean = paddle.full([], 0.0)
std = paddle.full([], 0.0)
out = paddle.normal(mean, std)
self.assertEqual(out.shape, [])
out = paddle.normal(0.0, 1.0, [])
self.assertEqual(out.shape, [])
out = paddle.normal(0.0, 1.0, self.shape)
self.assertEqual(out.shape, [2, 3, 4])
def test_rand(self):
out = paddle.rand([])
self.assertEqual(out.shape, [])
out = paddle.rand(self.shape)
self.assertEqual(out.shape, [2, 3, 4])
def test_randn(self):
out = paddle.randn([])
self.assertEqual(out.shape, [])
out = paddle.randn(self.shape)
self.assertEqual(out.shape, [2, 3, 4])
def test_randint_and_randint_like(self):
out = paddle.randint(-10, 10, [])
self.assertEqual(out.shape, [])
out = paddle.randint_like(out, -10, 10)
self.assertEqual(out.shape, [])
out = paddle.randint(-10, 10, self.shape)
self.assertEqual(out.shape, [2, 3, 4])
def test_standard_normal(self):
out = paddle.standard_normal([])
self.assertEqual(out.shape, [])
out = paddle.standard_normal(self.shape)
self.assertEqual(out.shape, [2, 3, 4])
def test_uniform(self):
out = paddle.uniform([])
self.assertEqual(out.shape, [])
out = paddle.uniform(self.shape)
self.assertEqual(out.shape, [2, 3, 4])
def test_empty_and_empty_like(self):
out = paddle.empty([])
self.assertEqual(out.shape, [])
out = paddle.empty_like(out)
self.assertEqual(out.shape, [])
out = paddle.empty(self.shape)
self.assertEqual(out.shape, [2, 3, 4])
def test_full_and_full_like(self):
out = paddle.full([], 0.5)
self.assertEqual(out.shape, [])
out = paddle.full_like(out, 0.5)
self.assertEqual(out.shape, [])
out = paddle.full(self.shape, 0.5)
self.assertEqual(out.shape, [2, 3, 4])
def test_ones_and_ones_like(self):
out = paddle.ones([])
self.assertEqual(out.shape, [])
out = paddle.ones_like(out)
self.assertEqual(out.shape, [])
out = paddle.ones(self.shape)
self.assertEqual(out.shape, [2, 3, 4])
def test_zeros_and_zeros_like(self):
out = paddle.zeros([])
self.assertEqual(out.shape, [])
out = paddle.zeros_like(out)
self.assertEqual(out.shape, [])
out = paddle.zeros(self.shape)
self.assertEqual(out.shape, [2, 3, 4])
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册