diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index 5efae3fdd5d6cb4c5d68eb6e6a55ac355e0702db..c7fc518986d3513659268d66292ba6aaa3a7b924 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -18,6 +18,7 @@ import eager_op_test import gradient_checker import numpy as np from decorator_helper import prog_scope +from eager_op_test import convert_float_to_uint16, convert_uint16_to_float import paddle from paddle import fluid @@ -47,6 +48,9 @@ class TestAssignOp(eager_op_test.OpTest): paddle.disable_static() +@unittest.skipIf( + not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU" +) class TestAssignFP16Op(eager_op_test.OpTest): def setUp(self): self.python_api = paddle.assign @@ -69,6 +73,32 @@ class TestAssignFP16Op(eager_op_test.OpTest): paddle.disable_static() +@unittest.skipIf( + not paddle.is_compiled_with_cuda(), "BFP16 test runs only on GPU" +) +class TestAssignBFP16Op(eager_op_test.OpTest): + def setUp(self): + self.python_api = paddle.assign + self.public_python_api = paddle.assign + self.op_type = "assign" + self.prim_op_type = "prim" + self.enable_cinn = False + x = np.random.uniform(0, 1, [100, 10]).astype(np.float32) + x = convert_float_to_uint16(x) + self.inputs = {'X': x} + self.outputs = {'Out': x} + + def test_forward(self): + paddle.enable_static() + self.check_output() + paddle.disable_static() + + def test_backward(self): + paddle.enable_static() + self.check_grad(['X'], 'Out', check_prim=True) + paddle.disable_static() + + class TestAssignOpWithLoDTensorArray(unittest.TestCase): def test_assign_LoDTensorArray(self): paddle.enable_static() @@ -160,32 +190,12 @@ class TestAssignOApi(unittest.TestCase): paddle.disable_static() def test_assign_NumpyArray(self): - with fluid.dygraph.guard(): - array = np.random.random(size=(100, 10)).astype(np.bool_) - result1 = paddle.zeros(shape=[3, 3], dtype='float32') - paddle.assign(array, result1) - np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) - - def test_assign_NumpyArray1(self): - with fluid.dygraph.guard(): - array = np.random.random(size=(100, 10)).astype(np.float32) - result1 = paddle.zeros(shape=[3, 3], dtype='float32') - paddle.assign(array, result1) - np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) - - def test_assign_NumpyArray2(self): - with fluid.dygraph.guard(): - array = np.random.random(size=(100, 10)).astype(np.int32) - result1 = paddle.zeros(shape=[3, 3], dtype='float32') - paddle.assign(array, result1) - np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) - - def test_assign_NumpyArray3(self): - with fluid.dygraph.guard(): - array = np.random.random(size=(100, 10)).astype(np.int64) - result1 = paddle.zeros(shape=[3, 3], dtype='float32') - paddle.assign(array, result1) - np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) + for dtype in [np.bool_, np.float32, np.int32, np.int64]: + with fluid.dygraph.guard(): + array = np.random.random(size=(100, 10)).astype(dtype) + result1 = paddle.zeros(shape=[3, 3], dtype='float32') + paddle.assign(array, result1) + np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) def test_assign_List(self): l = [1, 2, 3] @@ -232,6 +242,31 @@ class TestAssignOApi(unittest.TestCase): paddle.disable_static() +@unittest.skipIf( + not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU" +) +class TestAssignOApiFP16(unittest.TestCase): + def test_assign_fp16(self): + x = np.random.uniform(0, 10, [3, 3]).astype(np.float16) + x = paddle.to_tensor(x) + result = paddle.zeros(shape=[3, 3], dtype='float16') + paddle.assign(x, result) + np.testing.assert_equal(result.numpy(), x.numpy()) + + def test_assign_bfp16(self): + x_f = np.random.uniform(0, 10, [3, 3]).astype(np.float32) + x = convert_float_to_uint16(x_f) + x = paddle.to_tensor(x) + result = paddle.zeros(shape=[3, 3], dtype='bfloat16') + paddle.assign(x, result) + np.testing.assert_allclose( + convert_uint16_to_float(result.numpy()), x_f, rtol=1e-02 + ) + np.testing.assert_equal( + convert_uint16_to_float(result.numpy()), convert_uint16_to_float(x) + ) + + class TestAssignOpErrorApi(unittest.TestCase): def test_errors(self): paddle.enable_static() @@ -284,6 +319,7 @@ class TestAssignDoubleGradCheck(unittest.TestCase): places.append(fluid.CUDAPlace(0)) for p in places: self.func(p) + paddle.disable_static() class TestAssignTripleGradCheck(unittest.TestCase): @@ -315,6 +351,7 @@ class TestAssignTripleGradCheck(unittest.TestCase): places.append(fluid.CUDAPlace(0)) for p in places: self.func(p) + paddle.disable_static() if __name__ == '__main__':