未验证 提交 69d3f4e3 编写于 作者: D Danyang Zhang 提交者: GitHub

delete bf16 of cross entropy (#53922)

* delete bf16 of cross entropy

* delete bf16 of cross entropy
上级 eb193d8d
...@@ -297,8 +297,7 @@ PD_REGISTER_KERNEL(cross_entropy_with_softmax_grad, ...@@ -297,8 +297,7 @@ PD_REGISTER_KERNEL(cross_entropy_with_softmax_grad,
phi::CrossEntropyWithSoftmaxGradKernel, phi::CrossEntropyWithSoftmaxGradKernel,
float, float,
double, double,
phi::dtype::float16, phi::dtype::float16) {}
phi::dtype::bfloat16) {}
#else #else
PD_REGISTER_KERNEL(cross_entropy_with_softmax_grad, PD_REGISTER_KERNEL(cross_entropy_with_softmax_grad,
GPU, GPU,
......
...@@ -1479,8 +1479,7 @@ PD_REGISTER_KERNEL(cross_entropy_with_softmax, ...@@ -1479,8 +1479,7 @@ PD_REGISTER_KERNEL(cross_entropy_with_softmax,
phi::CrossEntropyWithSoftmaxKernel, phi::CrossEntropyWithSoftmaxKernel,
float, float,
double, double,
phi::dtype::float16, phi::dtype::float16) {}
phi::dtype::bfloat16) {}
#else #else
PD_REGISTER_KERNEL(cross_entropy_with_softmax, PD_REGISTER_KERNEL(cross_entropy_with_softmax,
GPU, GPU,
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from eager_op_test import OpTest, convert_float_to_uint16, paddle_static_guard from eager_op_test import OpTest, paddle_static_guard
from test_softmax_op import stable_softmax from test_softmax_op import stable_softmax
import paddle import paddle
...@@ -918,62 +918,6 @@ class TestSoftmaxWithCrossEntropyOpBoundary1(TestSoftmaxWithCrossEntropyOp): ...@@ -918,62 +918,6 @@ class TestSoftmaxWithCrossEntropyOpBoundary1(TestSoftmaxWithCrossEntropyOp):
self.use_softmax = True self.use_softmax = True
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA and not support the bfloat16",
)
class TestSoftmaxWithCrossEntropyOpBF16(TestSoftmaxWithCrossEntropyOp):
def setUp(self):
self.initParams()
self.op_type = "softmax_with_cross_entropy"
self.dtype = np.uint16
# NOTE: numpy bf16 have very low accuracy, use float32 for numpy check.
date_type = np.float32
logits = getattr(
self,
"logits",
np.random.uniform(0.1, 1.0, self.shape).astype(date_type),
)
softmax = np.apply_along_axis(stable_softmax, self.axis, logits)
axis_dim = self.shape[self.axis]
self.shape[self.axis] = 1
labels = np.random.randint(0, axis_dim, self.shape, dtype="int64")
loss = cross_entropy(softmax, labels, self.soft_label, self.axis)
self.inputs = {
"Logits": convert_float_to_uint16(logits),
"Label": labels,
}
self.outputs = {
"Softmax": convert_float_to_uint16(softmax),
"Loss": convert_float_to_uint16(loss),
}
self.attrs = {
"numeric_stable_mode": self.numeric_stable_mode,
"soft_label": self.soft_label,
}
if self.axis != -1:
self.attrs['axis'] = self.axis
def test_check_output(self):
place = core.CUDAPlace(0)
if self.python_api is not None:
self.check_output_with_place(place)
self.check_output_with_place(place, atol=1e-2)
def test_check_grad(self):
place = core.CUDAPlace(0)
if self.python_api is not None:
self.check_grad_with_place(place, ["Logits"], "Loss")
self.check_grad_with_place(
place, ["Logits"], "Loss", max_relative_error=0.1
)
class TestSoftmaxWithCrossEntropyOpError(unittest.TestCase): class TestSoftmaxWithCrossEntropyOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册