未验证 提交 f5332cad 编写于 作者: J jiangcheng 提交者: GitHub

[CINN] Reopen some prim+cinn unittest (#52595)

* [CINN] Reopen some prim+cinn unittest

* revert scatter for ci

* change cinn test timeout from 120 to 150
上级 ab754417
......@@ -1136,6 +1136,7 @@ set(TEST_CINN_OPS
foreach(TEST_CINN_OPS ${TEST_CINN_OPS})
if(WITH_CINN)
set_tests_properties(${TEST_CINN_OPS} PROPERTIES LABELS "RUN_TYPE=CINN")
set_tests_properties(${TEST_CINN_OPS} PROPERTIES TIMEOUT 150)
endif()
endforeach()
......
......@@ -32,7 +32,6 @@ class TestAssignOp(eager_op_test.OpTest):
self.public_python_api = paddle.assign
self.op_type = "assign"
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.random(size=(100, 10)).astype('float64')
self.inputs = {'X': x}
self.outputs = {'Out': x}
......@@ -57,7 +56,6 @@ class TestAssignFP16Op(eager_op_test.OpTest):
self.public_python_api = paddle.assign
self.op_type = "assign"
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.random(size=(100, 10)).astype('float16')
self.inputs = {'X': x}
self.outputs = {'Out': x}
......
......@@ -30,9 +30,9 @@ class TestConcatOp(OpTest):
self.python_api = paddle.concat
self.public_python_api = paddle.concat
self.prim_op_type = "prim"
self.enable_cinn = False
self.dtype = self.get_dtype()
self.init_test_data()
self.if_enable_cinn()
self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]}
self.attrs = {'axis': self.axis}
if self.axis < 0:
......@@ -82,6 +82,9 @@ class TestConcatOp(OpTest):
self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
self.axis = 1
def if_enable_cinn(self):
pass
class TestConcatOp2(TestConcatOp):
def init_test_data(self):
......@@ -291,6 +294,9 @@ def create_test_bf16(parent):
def get_dtype(self):
return np.uint16
def if_enable_cinn(self):
self.enable_cinn = False
cls_name = "{}_{}".format(parent.__name__, "Bf16")
TestConcatBf16.__name__ = cls_name
globals()[cls_name] = TestConcatBf16
......
......@@ -314,7 +314,7 @@ class TestSumOpExclusiveFP16(OpTest):
self.python_api = cumsum_wrapper
self.public_python_api = paddle.cumsum
self.init_dtype()
self.enable_cinn = False
self.enable_cinn = True
self.attrs = {'axis': 2, "exclusive": True}
self.x = np.random.random((4, 5, 20)).astype(self.dtype)
self.out = np.concatenate(
......@@ -389,7 +389,7 @@ def create_test_fp16_class(parent, max_relative_error=1e-2):
self.dtype = self.dtype_ = np.float16
def set_enable_cinn(self):
self.enable_cinn = False
self.enable_cinn = True
def test_check_output(self):
self.check_output()
......
......@@ -308,7 +308,6 @@ class TestFP16DropoutOp(OpTest):
'is_test': True,
}
self.outputs = {'Out': out}
self.enable_cinn = False
# Because prim op compare res with dygraph
# when p = 0 dropout api return x,in dygraph mode x_grad = out_grad,
# but in static mode x_grad = []
......@@ -1689,23 +1688,25 @@ class TestCompositeDropout(unittest.TestCase):
rev_actual = []
paddle.disable_static()
for place in self.places:
if isinstance(place, fluid.CPUPlace):
paddle.set_device("cpu")
if isinstance(place, fluid.CUDAPlace):
paddle.set_device("gpu")
if not isinstance(place, fluid.CUDAPlace):
continue
paddle.set_device("gpu")
paddle.seed(self.seed)
input_ = paddle.to_tensor(
data=self.x, dtype=self.dtype, place=place, stop_gradient=False
)
net = PrimNet()
net = apply_to_static(net, False)
net = apply_to_static(net, True)
output = net(
input_, self.p, training=(not self.is_test), mode=self.mode
)
grad = paddle.grad(output, input_)
fwd_actual.append(output.numpy())
rev_actual.append(grad[0].numpy())
for i in range(len(self.places)):
i = 0
for place in self.places:
if not isinstance(self.places[i], fluid.CUDAPlace):
continue
np.testing.assert_allclose(
self.fwd_desire[i].sum(),
fwd_actual[i].sum(),
......@@ -1718,6 +1719,7 @@ class TestCompositeDropout(unittest.TestCase):
rtol=1e-2, # mean of uniform distribution, scale for avoid random failed
atol=0,
)
i += 1
if __name__ == '__main__':
......
......@@ -497,9 +497,6 @@ class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
self.y = np.random.rand(100, 1).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 100, 1)
def if_enable_cinn(self):
self.enable_cinn = False
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast."
......
......@@ -36,7 +36,7 @@ class TestElementwiseOp(OpTest):
self.init_data()
self.op_type = "elementwise_max"
self.prim_op_type = "prim"
self.enable_cinn = False
self.if_enbale_cinn()
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.inputs = {'X': self.x, 'Y': self.y}
......@@ -95,6 +95,9 @@ class TestElementwiseOp(OpTest):
check_prim=True,
)
def if_enbale_cinn(self):
pass
class TestElementwiseFP16Op(TestElementwiseOp):
def init_data(self):
......@@ -108,7 +111,7 @@ class TestElementwiseFP16Op(TestElementwiseOp):
self.init_data()
self.op_type = "elementwise_max"
self.prim_op_type = "prim"
self.enable_cinn = False
self.if_enbale_cinn()
self.python_api = paddle.maximum
self.dtype = np.float16
self.public_python_api = paddle.maximum
......@@ -121,36 +124,54 @@ class TestElementwiseMaxOp_ZeroDim1(TestElementwiseOp):
self.x = np.random.uniform(0.1, 1, []).astype("float64")
self.y = np.random.uniform(0.1, 1, []).astype("float64")
def if_enbale_cinn(self):
self.enable_cinn = False
class TestElementwiseMaxFP16Op_ZeroDim1(TestElementwiseFP16Op):
def init_data(self):
self.x = np.random.uniform(0.1, 1, []).astype(np.float16)
self.y = np.random.uniform(0.1, 1, []).astype(np.float16)
def if_enbale_cinn(self):
self.enable_cinn = False
class TestElementwiseMaxOp_ZeroDim2(TestElementwiseOp):
def init_data(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
self.y = np.random.uniform(0.1, 1, []).astype("float64")
def if_enbale_cinn(self):
self.enable_cinn = False
class TestElementwiseMaxFP16Op_ZeroDim2(TestElementwiseFP16Op):
def init_data(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float16)
self.y = np.random.uniform(0.1, 1, []).astype(np.float16)
def if_enbale_cinn(self):
self.enable_cinn = False
class TestElementwiseMaxOp_ZeroDim3(TestElementwiseOp):
def init_data(self):
self.x = np.random.uniform(0.1, 1, []).astype("float64")
self.y = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
def if_enbale_cinn(self):
self.enable_cinn = False
class TestElementwiseMaxFP16Op_ZeroDim3(TestElementwiseFP16Op):
def init_data(self):
self.x = np.random.uniform(0.1, 1, []).astype(np.float16)
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float16)
def if_enbale_cinn(self):
self.enable_cinn = False
@unittest.skipIf(
core.is_compiled_with_cuda()
......
......@@ -141,7 +141,6 @@ class TestGatherNdOpWithLowIndex(OpTest):
self.prim_op_type = "prim"
self.python_api = paddle.gather_nd
self.public_python_api = paddle.gather_nd
self.enable_cinn = False
self.config_dtype()
if self.dtype == np.float64:
target_dtype = "float64"
......@@ -216,6 +215,7 @@ class TestGatherNdOpIndex1(OpTest):
output = convert_float_to_uint16(output)
self.inputs = {'X': xnp, 'Index': index}
self.outputs = {'Out': output}
# the outputs are 0D-tensor, CINN not support
self.enable_cinn = False
def config_dtype(self):
......@@ -258,7 +258,6 @@ class TestGatherNdOpWithSameIndexAsX(OpTest):
self.prim_op_type = "prim"
self.python_api = paddle.gather_nd
self.public_python_api = paddle.gather_nd
self.enable_cinn = False
self.config_dtype()
if self.dtype == np.float64:
target_dtype = "float64"
......
......@@ -1217,6 +1217,8 @@ class TestCompositeGroupNorm(unittest.TestCase):
fwd_actual = []
rev_actual = []
for place in self.places:
if not isinstance(place, fluid.CUDAPlace):
continue
input_ = paddle.to_tensor(
data=self.x, dtype=self.dtype, place=place, stop_gradient=False
)
......@@ -1241,13 +1243,16 @@ class TestCompositeGroupNorm(unittest.TestCase):
self.data_format,
)
# failed in cinn test
net = apply_to_static(net, False)
net = apply_to_static(net, True)
output = net(input_)
grad = paddle.grad(output, input_)
fwd_actual.append(output.numpy())
rev_actual.append(grad[0].numpy())
for i in range(len(self.places)):
i = 0
for place in self.places:
if not isinstance(place, fluid.CUDAPlace):
continue
atol = self.threshold_list[i][2]
rtol = self.threshold_list[i][2]
np.testing.assert_allclose(
......@@ -1269,6 +1274,7 @@ class TestCompositeGroupNorm(unittest.TestCase):
atol=atol,
err_msg='%s jit_cinn rev' % self.places[i],
)
i += 1
if __name__ == '__main__':
......
......@@ -176,7 +176,7 @@ class TestReduceMeanOp(OpTest):
pass
def if_enable_cinn(self):
self.enable_cinn = False
pass
def test_check_output(self):
if self.dtype != 'float16':
......
......@@ -196,7 +196,6 @@ class TestSplitOp_unk_section(OpTest):
self.python_out_sig = ['out0', 'out1', 'out2']
self._set_op_type()
self.prim_op_type = "prim"
self.enable_cinn = False
self.dtype = self.get_dtype()
self.init_data()
self.inputs = {'X': self.x}
......
......@@ -73,7 +73,6 @@ class TestSqueezeOp2(TestSqueezeOp):
self.prim_op_type = "comp"
self.python_api = paddle.squeeze
self.public_python_api = paddle.squeeze
self.enable_cinn = False
self.python_out_sig = [
"Out"
] # python out sig is customized output signature.
......
......@@ -125,7 +125,6 @@ class TestCase10(TestTransposeOp):
self.python_api = paddle.transpose
self.public_python_api = paddle.transpose
self.prim_op_type = "prim"
self.enable_cinn = False
self.inputs = {'X': np.random.random(self.shape).astype("float64")}
self.attrs = {
'axis': list(self.axis),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册