未验证 提交 dee7d78d 编写于 作者: Y YuhangLi 提交者: GitHub

[AMP OP&Test]stack & unstack ops fp16 bf16 support (#50999)

* stack fp16 & bf16 support

* unstack fp16 support

* unstack bf16 support

* append stack fp16 ut

* add unstack

* recover unstack cpu kernel

* fix some issue for unstack ut

* delete unuse var

* add check_place

* fix inference err
上级 aaf873b2
......@@ -105,6 +105,47 @@ class TestStackOp_ZeroDim(TestStackOpBase):
self.enable_cinn = False
class TestStackFP16Op(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16
class TestStackFP16Op1(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.num_inputs = 8
class TestStackFP16Op2(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.num_inputs = 10
class TestStackFP16Op3(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = -1
class TestStackFP16Op4(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = -4
class TestStackFP16Op5(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = 1
class TestStackFP16Op6(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = 3
class TestStackBF16Op(OpTest):
def initDefaultParameters(self):
self.num_inputs = 4
......
......@@ -15,9 +15,11 @@
import unittest
import numpy as np
from eager_op_test import OpTest
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
from paddle import fluid
from paddle.fluid import core
class TestUnStackOpBase(OpTest):
......@@ -64,6 +66,35 @@ class TestUnStackOpBase(OpTest):
self.check_grad(['X'], self.get_y_names())
class TestUnStackFP16Op(TestUnStackOpBase):
def initParameters(self):
self.dtype = np.float16
class TestStackFP16Op3(TestUnStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = -1
class TestStackFP16Op4(TestUnStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = -3
class TestStackFP16Op5(TestUnStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = 1
class TestStackFP16Op6(TestUnStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = 2
class TestStackOp3(TestUnStackOpBase):
def initParameters(self):
self.axis = -1
......@@ -84,6 +115,71 @@ class TestStackOp6(TestUnStackOpBase):
self.axis = 2
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA and do not support bfloat16",
)
class TestUnStackBF16Op(OpTest):
def initDefaultParameters(self):
self.input_dim = (5, 6, 7)
self.axis = 0
self.dtype = np.uint16
def initParameters(self):
pass
def get_y_names(self):
y_names = []
for i in range(self.input_dim[self.axis]):
y_names.append(f'y{i}')
return y_names
def setUp(self):
self.initDefaultParameters()
self.initParameters()
self.op_type = 'unstack'
self.python_api = paddle.unstack
self.x = np.random.random(size=self.input_dim).astype(np.float32)
outs = np.split(self.x, self.input_dim[self.axis], self.axis)
new_shape = list(self.input_dim)
del new_shape[self.axis]
y_names = self.get_y_names()
tmp = []
tmp_names = []
for i in range(self.input_dim[self.axis]):
tmp.append(
(
y_names[i],
np.reshape(convert_float_to_uint16(outs[i]), new_shape),
)
)
tmp_names.append(y_names[i])
self.x = convert_float_to_uint16(self.x)
self.python_out_sig = tmp_names
self.inputs = {'X': self.x}
self.outputs = {'Y': tmp}
self.attrs = {'axis': self.axis, 'num': self.input_dim[self.axis]}
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
with fluid.dygraph.guard():
x = paddle.to_tensor(self.inputs['X'])
x.stop_gradient = False
y = paddle.unstack(
x, axis=self.attrs['axis'], num=self.attrs['num']
)
dx = paddle.grad(y, x)[0].numpy()
dx_expected = convert_float_to_uint16(
np.ones(self.input_dim, np.float32)
)
np.testing.assert_array_equal(dx, dx_expected)
class TestUnstackZeroInputOp(unittest.TestCase):
def unstack_zero_input_static(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册