未验证 提交 c2b24166 编写于 作者: S Siming Dai 提交者: GitHub

【AMP OP&Test】Add fp16 test for divide, matmul, pnorm (#51005)

* add fp16 test for divide, matmul, pnorm

* add cumsum fp16 unittest

* fix threshold

* revert cumsum

* fix code-style

* fix according to review

* fix kernel not found
上级 b3caa233
......@@ -417,16 +417,68 @@ class TestElementwiseDivOpInt(ElementwiseDivOp):
return x // y
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestElementwiseDivOpFp16(ElementwiseDivOp):
def init_dtype(self):
self.dtype = np.float16
self.val_dtype = np.float16
def if_enable_cinn(self):
self.enable_cinn = True
def create_test_fp16_class(parent, max_relative_error=2e-3):
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestElementwiseDivFP16Op(parent):
def init_dtype(self):
self.dtype = np.float16
self.val_dtype = np.float16
def if_enable_cinn(self):
self.enable_cinn = True
def test_check_gradient(self):
check_list = []
check_list.append(
{
'grad': ['X', 'Y'],
'no_grad': None,
'val_grad': [self.grad_x, self.grad_y],
}
)
check_list.append(
{'grad': ['Y'], 'no_grad': set('X'), 'val_grad': [self.grad_y]}
)
check_list.append(
{'grad': ['X'], 'no_grad': set('Y'), 'val_grad': [self.grad_x]}
)
for check_option in check_list:
check_args = [check_option['grad'], 'Out']
check_kwargs = {
'no_grad_set': check_option['no_grad'],
'user_defined_grads': check_option['val_grad'],
'user_defined_grad_outputs': [self.grad_out],
'check_dygraph': self.check_dygraph,
'max_relative_error': max_relative_error,
}
if self.place is None:
self.check_grad(*check_args, **check_kwargs)
else:
check_args.insert(0, self.place)
self.check_grad_with_place(*check_args, **check_kwargs)
cls_name = "{0}_{1}".format(parent.__name__, "Fp16")
TestElementwiseDivFP16Op.__name__ = cls_name
globals()[cls_name] = TestElementwiseDivFP16Op
create_test_fp16_class(ElementwiseDivOp)
create_test_fp16_class(TestElementwiseDivOp_ZeroDim1)
create_test_fp16_class(TestElementwiseDivOp_ZeroDim2)
create_test_fp16_class(TestElementwiseDivOp_ZeroDim3)
create_test_fp16_class(TestElementwiseDivOpScalar)
create_test_fp16_class(TestElementwiseDivOpVector)
create_test_fp16_class(TestElementwiseDivOpBroadcast0)
create_test_fp16_class(TestElementwiseDivOpBroadcast1)
create_test_fp16_class(TestElementwiseDivOpBroadcast2)
create_test_fp16_class(TestElementwiseDivOpBroadcast3)
create_test_fp16_class(TestElementwiseDivOpBroadcast4)
create_test_fp16_class(TestElementwiseDivOpBroadcast5)
create_test_fp16_class(TestElementwiseDivOpCommonuse1)
create_test_fp16_class(TestElementwiseDivOpCommonuse2)
create_test_fp16_class(TestElementwiseDivOpXsizeLessThanYsize)
class TestElementwiseDivBroadcast(unittest.TestCase):
......
......@@ -377,6 +377,8 @@ create_test_fp16_class(TestMatMulOp14)
create_test_fp16_class(TestMatMulOp15)
create_test_fp16_class(TestMatMulOp16)
create_test_fp16_class(TestMatMulOp17)
create_test_fp16_class(TestMatMulOpBroadcast1)
create_test_fp16_class(TestMatMulOpBroadcast2)
# --------------------test matmul bf16--------------------
......
......@@ -90,6 +90,7 @@ class TestFrobeniusNormOp(OpTest):
self.python_api = frobenius_norm
self.op_type = "frobenius_norm"
self.init_test_case()
self.init_dtype()
x = (np.random.random(self.shape) + 1.0).astype(self.dtype)
norm = numpy_frobenius_norm(x, self.axis, self.keepdim)
self.reduce_all = len(self.axis) == len(self.shape)
......@@ -111,6 +112,8 @@ class TestFrobeniusNormOp(OpTest):
self.shape = [2, 3, 4, 5]
self.axis = (1, 2)
self.keepdim = False
def init_dtype(self):
self.dtype = "float64"
......@@ -119,6 +122,8 @@ class TestFrobeniusNormOp2(TestFrobeniusNormOp):
self.shape = [5, 5, 5]
self.axis = (0, 1)
self.keepdim = True
def init_dtype(self):
self.dtype = "float32"
def test_check_grad(self):
......@@ -130,6 +135,7 @@ class TestPnormOp(OpTest):
self.op_type = "p_norm"
self.python_api = p_norm_python_api
self.init_test_case()
self.init_dtype()
x = (np.random.random(self.shape) + 0.5).astype(self.dtype)
norm = p_norm(x, self.axis, self.porder, self.keepdim, self.asvector)
self.inputs = {'X': x}
......@@ -155,9 +161,11 @@ class TestPnormOp(OpTest):
self.epsilon = 1e-12
self.porder = 2.0
self.keepdim = False
self.dtype = "float64"
self.asvector = False
def init_dtype(self):
self.dtype = "float64"
def calc_gradient(self):
self.attrs = {
'epsilon': self.epsilon,
......@@ -206,9 +214,11 @@ class TestPnormOp2(TestPnormOp):
self.epsilon = 1e-12
self.porder = 2.0
self.keepdim = True
self.dtype = "float32"
self.asvector = False
def init_dtype(self):
self.dtype = "float32"
def test_check_grad(self):
self.check_grad(['X'], 'Out')
......@@ -220,9 +230,11 @@ class TestPnormOp3(TestPnormOp):
self.epsilon = 1e-12
self.porder = np.inf
self.keepdim = True
self.dtype = "float32"
self.asvector = False
def init_dtype(self):
self.dtype = "float32"
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
......@@ -234,9 +246,11 @@ class TestPnormOp4(TestPnormOp):
self.epsilon = 1e-12
self.porder = -np.inf
self.keepdim = True
self.dtype = "float32"
self.asvector = False
def init_dtype(self):
self.dtype = "float32"
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
......@@ -248,9 +262,11 @@ class TestPnormOp5(TestPnormOp):
self.epsilon = 1e-12
self.porder = 0
self.keepdim = True
self.dtype = "float32"
self.asvector = False
def init_dtype(self):
self.dtype = "float32"
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
......@@ -262,51 +278,50 @@ class TestPnormOp6(TestPnormOp):
self.epsilon = 1e-12
self.porder = 2
self.keepdim = False
self.dtype = "float32"
self.asvector = True
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestPnormOpFP16(TestPnormOp):
def init_test_case(self):
self.shape = [2, 3, 4, 5]
self.axis = 1
self.epsilon = 1e-12
self.porder = 2.0
self.keepdim = False
self.dtype = "float16"
self.asvector = False
def test_check_output(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
def init_dtype(self):
self.dtype = "float32"
def test_check_grad(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_grad_with_place(
place, ['X'], 'Out', user_defined_grads=self.gradient
)
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestPnormOpFP161(TestPnormOpFP16):
def init_test_case(self):
self.shape = [2, 3, 4, 5]
self.axis = -1
self.epsilon = 1e-12
self.porder = 2.0
self.keepdim = False
self.dtype = "float16"
self.asvector = True
def create_test_fp16_class(parent, max_relative_error=2e-3):
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestPnormFP16Op(parent):
def init_dtype(self):
self.dtype = "float16"
def test_check_output(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place)
def test_check_grad(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_grad_with_place(
place,
['X'],
'Out',
user_defined_grads=self.gradient,
max_relative_error=max_relative_error,
)
cls_name = "{0}_{1}".format(parent.__name__, "Fp16")
TestPnormFP16Op.__name__ = cls_name
globals()[cls_name] = TestPnormFP16Op
create_test_fp16_class(TestPnormOp)
create_test_fp16_class(TestPnormOp2)
create_test_fp16_class(TestPnormOp3)
create_test_fp16_class(TestPnormOp4)
create_test_fp16_class(TestPnormOp5)
create_test_fp16_class(TestPnormOp6)
@unittest.skipIf(
......@@ -352,9 +367,11 @@ class TestPnormBF16Op(OpTest):
self.epsilon = 1e-12
self.porder = 2.0
self.keepdim = False
self.dtype = np.uint16
self.asvector = False
def init_dtype(self):
self.dtype = np.uint16
def calc_gradient(self):
self.attrs = {
'epsilon': self.epsilon,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册