未验证 提交 41f0e3c3 编写于 作者: zhenhailiu's avatar zhenhailiu 提交者: GitHub

[AMP OP&Test] assign op add fp16 、bfp16 test (#52233)

* add fp16 bfp16 test

* polish

* polish

* polish
上级 3161e6c3
...@@ -18,6 +18,7 @@ import eager_op_test ...@@ -18,6 +18,7 @@ import eager_op_test
import gradient_checker import gradient_checker
import numpy as np import numpy as np
from decorator_helper import prog_scope from decorator_helper import prog_scope
from eager_op_test import convert_float_to_uint16, convert_uint16_to_float
import paddle import paddle
from paddle import fluid from paddle import fluid
...@@ -47,6 +48,9 @@ class TestAssignOp(eager_op_test.OpTest): ...@@ -47,6 +48,9 @@ class TestAssignOp(eager_op_test.OpTest):
paddle.disable_static() paddle.disable_static()
@unittest.skipIf(
not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU"
)
class TestAssignFP16Op(eager_op_test.OpTest): class TestAssignFP16Op(eager_op_test.OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.assign self.python_api = paddle.assign
...@@ -69,6 +73,32 @@ class TestAssignFP16Op(eager_op_test.OpTest): ...@@ -69,6 +73,32 @@ class TestAssignFP16Op(eager_op_test.OpTest):
paddle.disable_static() paddle.disable_static()
@unittest.skipIf(
not paddle.is_compiled_with_cuda(), "BFP16 test runs only on GPU"
)
class TestAssignBFP16Op(eager_op_test.OpTest):
def setUp(self):
self.python_api = paddle.assign
self.public_python_api = paddle.assign
self.op_type = "assign"
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.uniform(0, 1, [100, 10]).astype(np.float32)
x = convert_float_to_uint16(x)
self.inputs = {'X': x}
self.outputs = {'Out': x}
def test_forward(self):
paddle.enable_static()
self.check_output()
paddle.disable_static()
def test_backward(self):
paddle.enable_static()
self.check_grad(['X'], 'Out', check_prim=True)
paddle.disable_static()
class TestAssignOpWithLoDTensorArray(unittest.TestCase): class TestAssignOpWithLoDTensorArray(unittest.TestCase):
def test_assign_LoDTensorArray(self): def test_assign_LoDTensorArray(self):
paddle.enable_static() paddle.enable_static()
...@@ -160,32 +190,12 @@ class TestAssignOApi(unittest.TestCase): ...@@ -160,32 +190,12 @@ class TestAssignOApi(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
def test_assign_NumpyArray(self): def test_assign_NumpyArray(self):
with fluid.dygraph.guard(): for dtype in [np.bool_, np.float32, np.int32, np.int64]:
array = np.random.random(size=(100, 10)).astype(np.bool_) with fluid.dygraph.guard():
result1 = paddle.zeros(shape=[3, 3], dtype='float32') array = np.random.random(size=(100, 10)).astype(dtype)
paddle.assign(array, result1) result1 = paddle.zeros(shape=[3, 3], dtype='float32')
np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) paddle.assign(array, result1)
np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
def test_assign_NumpyArray1(self):
with fluid.dygraph.guard():
array = np.random.random(size=(100, 10)).astype(np.float32)
result1 = paddle.zeros(shape=[3, 3], dtype='float32')
paddle.assign(array, result1)
np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
def test_assign_NumpyArray2(self):
with fluid.dygraph.guard():
array = np.random.random(size=(100, 10)).astype(np.int32)
result1 = paddle.zeros(shape=[3, 3], dtype='float32')
paddle.assign(array, result1)
np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
def test_assign_NumpyArray3(self):
with fluid.dygraph.guard():
array = np.random.random(size=(100, 10)).astype(np.int64)
result1 = paddle.zeros(shape=[3, 3], dtype='float32')
paddle.assign(array, result1)
np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
def test_assign_List(self): def test_assign_List(self):
l = [1, 2, 3] l = [1, 2, 3]
...@@ -232,6 +242,31 @@ class TestAssignOApi(unittest.TestCase): ...@@ -232,6 +242,31 @@ class TestAssignOApi(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
@unittest.skipIf(
not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU"
)
class TestAssignOApiFP16(unittest.TestCase):
def test_assign_fp16(self):
x = np.random.uniform(0, 10, [3, 3]).astype(np.float16)
x = paddle.to_tensor(x)
result = paddle.zeros(shape=[3, 3], dtype='float16')
paddle.assign(x, result)
np.testing.assert_equal(result.numpy(), x.numpy())
def test_assign_bfp16(self):
x_f = np.random.uniform(0, 10, [3, 3]).astype(np.float32)
x = convert_float_to_uint16(x_f)
x = paddle.to_tensor(x)
result = paddle.zeros(shape=[3, 3], dtype='bfloat16')
paddle.assign(x, result)
np.testing.assert_allclose(
convert_uint16_to_float(result.numpy()), x_f, rtol=1e-02
)
np.testing.assert_equal(
convert_uint16_to_float(result.numpy()), convert_uint16_to_float(x)
)
class TestAssignOpErrorApi(unittest.TestCase): class TestAssignOpErrorApi(unittest.TestCase):
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
...@@ -284,6 +319,7 @@ class TestAssignDoubleGradCheck(unittest.TestCase): ...@@ -284,6 +319,7 @@ class TestAssignDoubleGradCheck(unittest.TestCase):
places.append(fluid.CUDAPlace(0)) places.append(fluid.CUDAPlace(0))
for p in places: for p in places:
self.func(p) self.func(p)
paddle.disable_static()
class TestAssignTripleGradCheck(unittest.TestCase): class TestAssignTripleGradCheck(unittest.TestCase):
...@@ -315,6 +351,7 @@ class TestAssignTripleGradCheck(unittest.TestCase): ...@@ -315,6 +351,7 @@ class TestAssignTripleGradCheck(unittest.TestCase):
places.append(fluid.CUDAPlace(0)) places.append(fluid.CUDAPlace(0))
for p in places: for p in places:
self.func(p) self.func(p)
paddle.disable_static()
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册