From b0ebd3447077b4603b530cbacd11fad963852e34 Mon Sep 17 00:00:00 2001 From: mhy-666 <57670156+mhy-666@users.noreply.github.com> Date: Tue, 11 Apr 2023 10:42:13 +0800 Subject: [PATCH] [AMP OP&Test] add fp16/bf16 unittest for softmax_with_cross_entropy ops (#52412) * add softmax_with_cross_entropybf16 test * correct defalut value in testBF16/FP16 op * fix test checkout/grad, add skipif --- .../test_softmax_with_cross_entropy_op.py | 65 +++++++++++++++++-- 1 file changed, 61 insertions(+), 4 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py index ca11dd0a2bd..51aa2330648 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from eager_op_test import OpTest +from eager_op_test import OpTest, convert_float_to_uint16 from test_softmax_op import stable_softmax import paddle @@ -478,6 +478,7 @@ class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp): def setUp(self): self.initParams() self.op_type = "softmax_with_cross_entropy" + self.dtype = np.float16 # NOTE: numpy float16 have very low accuracy, use float32 for numpy check. date_type = np.float32 if core.is_compiled_with_rocm() else np.float64 @@ -508,12 +509,12 @@ class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp): def test_check_output(self): if self.python_api is not None: - self.check_output(atol=1e-2) - self.check_output(atol=1e-2) + self.check_output() + self.check_output() def test_check_grad(self): if self.python_api is not None: - self.check_grad(["Logits"], "Loss", max_relative_error=0.1) + self.check_grad(["Logits"], "Loss") self.check_grad(["Logits"], "Loss", max_relative_error=0.1) @@ -917,6 +918,62 @@ class TestSoftmaxWithCrossEntropyOpBoundary1(TestSoftmaxWithCrossEntropyOp): self.use_softmax = True +@unittest.skipIf( + not core.is_compiled_with_cuda() + or not core.is_bfloat16_supported(core.CUDAPlace(0)), + "core is not compiled with CUDA and not support the bfloat16", +) +class TestSoftmaxWithCrossEntropyOpBF16(TestSoftmaxWithCrossEntropyOp): + def setUp(self): + self.initParams() + self.op_type = "softmax_with_cross_entropy" + self.dtype = np.uint16 + + # NOTE: numpy bf16 have very low accuracy, use float32 for numpy check. + date_type = np.float32 + logits = getattr( + self, + "logits", + np.random.uniform(0.1, 1.0, self.shape).astype(date_type), + ) + softmax = np.apply_along_axis(stable_softmax, self.axis, logits) + + axis_dim = self.shape[self.axis] + self.shape[self.axis] = 1 + labels = np.random.randint(0, axis_dim, self.shape, dtype="int64") + + loss = cross_entropy(softmax, labels, self.soft_label, self.axis) + + self.inputs = { + "Logits": convert_float_to_uint16(logits), + "Label": labels, + } + self.outputs = { + "Softmax": convert_float_to_uint16(softmax), + "Loss": convert_float_to_uint16(loss), + } + self.attrs = { + "numeric_stable_mode": self.numeric_stable_mode, + "soft_label": self.soft_label, + } + if self.axis != -1: + self.attrs['axis'] = self.axis + + def test_check_output(self): + place = core.CUDAPlace(0) + if self.python_api is not None: + self.check_output_with_place(place) + self.check_output_with_place(place, atol=1e-2) + + def test_check_grad(self): + place = core.CUDAPlace(0) + if self.python_api is not None: + self.check_grad_with_place(place, ["Logits"], "Loss") + self.check_grad_with_place( + place, ["Logits"], "Loss", max_relative_error=0.1 + ) + + if __name__ == "__main__": paddle.enable_static() unittest.main() -- GitLab