未验证 提交 12218931 编写于 作者: westfisha's avatar westfisha 提交者: GitHub

[AMP OP&Test] Support bf16/fp16 for dropout and full_like op (#51809)

* update full_like and dropout

* update

* update

* update

* remove check_grad
上级 c4abe26c
......@@ -324,6 +324,9 @@ class TestFP16DropoutOp(OpTest):
core.CUDAPlace(0), atol=1e-3, check_prim=True
)
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
@unittest.skipIf(
not core.is_compiled_with_cuda() or not core.op_support_gpu("dropout"),
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from eager_op_test import OpTest
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.framework.dtype as dtypes
......@@ -115,11 +115,19 @@ class TestFullLikeOp1(OpTest):
self.init_data()
self.if_enable_cinn()
x = np.zeros(self.shape)
bf16_flag = self.dtype == np.uint16
x = np.zeros(self.shape).astype(np.float32 if bf16_flag else self.dtype)
x = OpTest.np_dtype_to_fluid_dtype(x)
out = np.full_like(x, self.fill_value, self.dtype)
if bf16_flag:
x = convert_float_to_uint16(x)
out = convert_float_to_uint16(out)
self.inputs = {'X': x}
self.outputs = {'Out': out}
self.attrs = {
'value': self.fill_value,
'dtype': convert_np_dtype_to_dtype_(self.dtype),
......@@ -140,7 +148,7 @@ class TestFullLikeOp1(OpTest):
class TestFullLikeOp2(TestFullLikeOp1):
def init_data(self):
self.fill_value = 1000
self.shape = [1024, 1024]
self.shape = [10, 10]
self.dtype = np.float64
def if_enable_cinn(self):
......@@ -150,7 +158,7 @@ class TestFullLikeOp2(TestFullLikeOp1):
class TestFullLikeOp3(TestFullLikeOp1):
def init_data(self):
self.fill_value = 8888
self.shape = [5000, 5000]
self.shape = [10, 10]
self.dtype = np.int64
def if_enable_cinn(self):
......@@ -173,5 +181,24 @@ class TestFullLikeOp4(unittest.TestCase):
paddle.enable_static()
class TestFullLikeFP16Op(TestFullLikeOp1):
def init_data(self):
self.fill_value = 6666
self.shape = [10, 10]
self.dtype = np.float16
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
)
class TestFullLikeBF16Op(TestFullLikeOp1):
def init_data(self):
self.fill_value = 6666
self.shape = [10, 10]
self.dtype = np.uint16
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册