未验证 提交 ce482b60 编写于 作者: C Charles-hit 提交者: GitHub

Norm prim test for some ops (#51329)

* support elementwise_pow bfloat16

* add only_check_prim parameters in check_grad

* modify unit test

* fix floor test

* fix sigmoid bfloat16 test

* norm some ops prim test

* add unit16 for sqrt
上级 c9e6c8ce
...@@ -52,6 +52,7 @@ class TestSqrtOpError(unittest.TestCase): ...@@ -52,6 +52,7 @@ class TestSqrtOpError(unittest.TestCase):
class TestActivation(OpTest): class TestActivation(OpTest):
def setUp(self): def setUp(self):
self.op_type = "exp" self.op_type = "exp"
self.prim_op_type = "prim"
self.init_dtype() self.init_dtype()
self.init_shape() self.init_shape()
self.init_kernel_type() self.init_kernel_type()
...@@ -131,20 +132,6 @@ class TestExpFp64_Prim(TestExpFp32_Prim): ...@@ -131,20 +132,6 @@ class TestExpFp64_Prim(TestExpFp32_Prim):
self.dtype = np.float64 self.dtype = np.float64
class TestExpFp16_Prim(TestExpFp32_Prim):
def init_dtype(self):
self.dtype = np.float16
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True, only_check_prim=True)
def if_skip_cinn(self):
self.enable_cinn = True
class TestExpPrim_ZeroDim(TestExpFp32_Prim): class TestExpPrim_ZeroDim(TestExpFp32_Prim):
def init_shape(self): def init_shape(self):
self.shape = [] self.shape = []
...@@ -1230,7 +1217,6 @@ class TestSqrtBF16(OpTest): ...@@ -1230,7 +1217,6 @@ class TestSqrtBF16(OpTest):
'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x)) 'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
} }
self.outputs = {'Out': convert_float_to_uint16(out)} self.outputs = {'Out': convert_float_to_uint16(out)}
# TODO(wanghao107): add prim test
self.enable_cinn = False self.enable_cinn = False
def init_dtype(self): def init_dtype(self):
...@@ -1245,7 +1231,9 @@ class TestSqrtBF16(OpTest): ...@@ -1245,7 +1231,9 @@ class TestSqrtBF16(OpTest):
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) self.check_grad_with_place(
place, ['X'], 'Out', check_eager=True, check_prim=True
)
class TestRsqrt(TestActivation): class TestRsqrt(TestActivation):
...@@ -2259,27 +2247,6 @@ class TestHardSwish_ZeroDim(TestHardSwish): ...@@ -2259,27 +2247,6 @@ class TestHardSwish_ZeroDim(TestHardSwish):
self.shape = [] self.shape = []
class TestHardSwishFP16(TestHardSwish):
def setUp(self):
super().setUp()
self.enable_cinn = False
def if_only_check_prim(self):
return True
def init_dtype(self):
self.dtype = np.float16
class TestHardSwish_ZeroDim_FP16(TestHardSwishFP16):
def setUp(self):
super().setUp()
self.enable_cinn = False
def init_shape(self):
self.shape = []
class TestHardswishAPI(unittest.TestCase): class TestHardswishAPI(unittest.TestCase):
# test paddle.nn.Hardswish, paddle.nn.functional.hardswish # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
def setUp(self): def setUp(self):
...@@ -3821,7 +3788,7 @@ def create_test_act_fp16_class( ...@@ -3821,7 +3788,7 @@ def create_test_act_fp16_class(
globals()[cls_name] = TestActFp16 globals()[cls_name] = TestActFp16
create_test_act_fp16_class(TestActivation) create_test_act_fp16_class(TestActivation, check_prim=True)
create_test_act_fp16_class(TestExpm1) create_test_act_fp16_class(TestExpm1)
create_test_act_fp16_class(TestSigmoid, check_prim=True) create_test_act_fp16_class(TestSigmoid, check_prim=True)
create_test_act_fp16_class(TestSilu, check_prim=True) create_test_act_fp16_class(TestSilu, check_prim=True)
...@@ -3870,7 +3837,7 @@ create_test_act_fp16_class(TestSoftsign) ...@@ -3870,7 +3837,7 @@ create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu) create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid) create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish, grad_atol=0.85) create_test_act_fp16_class(TestSwish, grad_atol=0.85)
create_test_act_fp16_class(TestHardSwish) create_test_act_fp16_class(TestHardSwish, check_prim=True)
create_test_act_fp16_class(TestMish, grad_atol=0.9) create_test_act_fp16_class(TestMish, grad_atol=0.9)
......
...@@ -205,10 +205,10 @@ class TestSumOp6(OpTest): ...@@ -205,10 +205,10 @@ class TestSumOp6(OpTest):
self.op_type = "cumsum" self.op_type = "cumsum"
self.prim_op_type = "prim" self.prim_op_type = "prim"
self.python_api = paddle.cumsum self.python_api = paddle.cumsum
self.enable_cinn = False
self.attrs = {'axis': -1, 'flatten': True} self.attrs = {'axis': -1, 'flatten': True}
self.inputs = {'X': np.random.random((5, 6, 5)).astype("float64")} self.inputs = {'X': np.random.random((5, 6, 5)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].cumsum()} self.outputs = {'Out': self.inputs['X'].cumsum()}
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
...@@ -394,13 +394,13 @@ class TestSumOpExclusiveFP16(OpTest): ...@@ -394,13 +394,13 @@ class TestSumOpExclusiveFP16(OpTest):
self.prim_op_type = "prim" self.prim_op_type = "prim"
self.python_api = paddle.cumsum self.python_api = paddle.cumsum
self.enable_cinn = False self.enable_cinn = False
self.attrs = {'axis': 2, "exclusive": True, "dtype": "float16"} self.attrs = {'axis': 2, "exclusive": True}
a = np.random.random((4, 5, 20)).astype("float64") a = np.random.random((4, 5, 20)).astype("float16")
self.inputs = {'X': a} self.inputs = {'X': a}
self.outputs = { self.outputs = {
'Out': np.concatenate( 'Out': np.concatenate(
( (
np.zeros((4, 5, 1), dtype=np.float64), np.zeros((4, 5, 1), dtype=np.float16),
a[:, :, :-1].cumsum(axis=2), a[:, :, :-1].cumsum(axis=2),
), ),
axis=2, axis=2,
...@@ -443,13 +443,16 @@ class TestSumOpReverseExclusive(OpTest): ...@@ -443,13 +443,16 @@ class TestSumOpReverseExclusive(OpTest):
class BadInputTest(unittest.TestCase): class BadInputTest(unittest.TestCase):
def test_error(self): def test_error(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
def test_bad_x(): def test_bad_x():
data = [1, 2, 4] data = [1, 2, 4]
result = paddle.cumsum(data, axis=0) result = paddle.cumsum(data, axis=0)
self.assertRaises(TypeError, test_bad_x) with self.assertRaises(TypeError):
test_bad_x()
paddle.disable_static()
class TestTensorAxis(unittest.TestCase): class TestTensorAxis(unittest.TestCase):
...@@ -519,6 +522,7 @@ class TestTensorAxis(unittest.TestCase): ...@@ -519,6 +522,7 @@ class TestTensorAxis(unittest.TestCase):
class TestCumSumOpFp16(unittest.TestCase): class TestCumSumOpFp16(unittest.TestCase):
def test_fp16(self): def test_fp16(self):
paddle.enable_static()
x_np = np.random.random((100, 100)).astype('float16') x_np = np.random.random((100, 100)).astype('float16')
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data(shape=[100, 100], name='x', dtype='float16') x = paddle.static.data(shape=[100, 100], name='x', dtype='float16')
...@@ -531,6 +535,7 @@ class TestCumSumOpFp16(unittest.TestCase): ...@@ -531,6 +535,7 @@ class TestCumSumOpFp16(unittest.TestCase):
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program()) exe.run(paddle.static.default_startup_program())
out = exe.run(feed={'x': x_np}, fetch_list=[y1, y2, y3, y4]) out = exe.run(feed={'x': x_np}, fetch_list=[y1, y2, y3, y4])
paddle.disable_static()
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -65,7 +65,7 @@ class ElementwiseDivOp(OpTest): ...@@ -65,7 +65,7 @@ class ElementwiseDivOp(OpTest):
self.grad_y = grad_y self.grad_y = grad_y
def if_skip_cinn(self): def if_skip_cinn(self):
self.enable_cinn = False pass
def init_args(self): def init_args(self):
self.check_dygraph = True self.check_dygraph = True
...@@ -145,6 +145,9 @@ class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp): ...@@ -145,6 +145,9 @@ class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp):
self.x_shape = [] self.x_shape = []
self.y_shape = [] self.y_shape = []
def if_skip_cinn(self):
self.enable_cinn = False
class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp): class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp):
def init_shape(self): def init_shape(self):
...@@ -160,6 +163,9 @@ class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp): ...@@ -160,6 +163,9 @@ class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp):
def compute_gradient_y(self, grad_out, out, y): def compute_gradient_y(self, grad_out, out, y):
return np.sum(-1 * grad_out * out / y.reshape([1, 1])) return np.sum(-1 * grad_out * out / y.reshape([1, 1]))
def if_skip_cinn(self):
self.enable_cinn = False
class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp): class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp):
def init_shape(self): def init_shape(self):
...@@ -175,6 +181,9 @@ class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp): ...@@ -175,6 +181,9 @@ class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp):
def compute_gradient_y(self, grad_out, out, y): def compute_gradient_y(self, grad_out, out, y):
return -1 * grad_out * out / y return -1 * grad_out * out / y
def if_skip_cinn(self):
self.enable_cinn = False
@unittest.skipIf( @unittest.skipIf(
not core.is_compiled_with_cuda() not core.is_compiled_with_cuda()
...@@ -366,8 +375,11 @@ class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp): ...@@ -366,8 +375,11 @@ class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp):
def compute_gradient_x(self, grad_out, y): def compute_gradient_x(self, grad_out, y):
return np.sum(grad_out / y, axis=(0, 1)) return np.sum(grad_out / y, axis=(0, 1))
def if_skip_cinn(self):
self.enable_cinn = False
class TestElementwiseDivOpInt(TestElementwiseDivOpNoPrim): class TestElementwiseDivOpInt(ElementwiseDivOp):
def init_dtype(self): def init_dtype(self):
self.dtype = np.int32 self.dtype = np.int32
self.val_dtype = np.int32 self.val_dtype = np.int32
......
...@@ -350,7 +350,7 @@ class TestElementwiseMulOpFp16(ElementwiseMulOp): ...@@ -350,7 +350,7 @@ class TestElementwiseMulOpFp16(ElementwiseMulOp):
self.dtype = np.float16 self.dtype = np.float16
def if_skip_cinn(self): def if_skip_cinn(self):
self.enable_cinn = False pass
class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
......
...@@ -105,10 +105,10 @@ class TestExpandV2OpRank1_tensor_attr(OpTest): ...@@ -105,10 +105,10 @@ class TestExpandV2OpRank1_tensor_attr(OpTest):
self.infer_expand_shape = [-1] self.infer_expand_shape = [-1]
def test_check_output(self): def test_check_output(self):
self.check_output(check_prim=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True) self.check_grad(['X'], 'Out')
class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr): class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr):
......
...@@ -25,8 +25,6 @@ import paddle.framework.dtype as dtypes ...@@ -25,8 +25,6 @@ import paddle.framework.dtype as dtypes
def fill_any_like_wrapper(x, value, out_dtype=None, name=None): def fill_any_like_wrapper(x, value, out_dtype=None, name=None):
if isinstance(out_dtype, int): if isinstance(out_dtype, int):
tmp_dtype = dtypes.dtype(out_dtype) tmp_dtype = dtypes.dtype(out_dtype)
elif out_dtype == np.complex64:
raise ValueError("Not supported dtype %s" % out_dtype)
else: else:
tmp_dtype = out_dtype tmp_dtype = out_dtype
return paddle.full_like(x, value, tmp_dtype, name) return paddle.full_like(x, value, tmp_dtype, name)
...@@ -43,7 +41,7 @@ class TestFillAnyLikeOp(OpTest): ...@@ -43,7 +41,7 @@ class TestFillAnyLikeOp(OpTest):
self.inputs = {'X': np.random.random((219, 232)).astype(self.dtype)} self.inputs = {'X': np.random.random((219, 232)).astype(self.dtype)}
self.attrs = {'value': self.value} self.attrs = {'value': self.value}
self.outputs = {'Out': self.value * np.ones_like(self.inputs["X"])} self.outputs = {'Out': self.value * np.ones_like(self.inputs["X"])}
self.skip_cinn() self.if_skip_cinn()
def init(self): def init(self):
pass pass
...@@ -51,7 +49,7 @@ class TestFillAnyLikeOp(OpTest): ...@@ -51,7 +49,7 @@ class TestFillAnyLikeOp(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output(check_prim=True) self.check_output(check_prim=True)
def skip_cinn(self): def if_skip_cinn(self):
pass pass
...@@ -60,8 +58,8 @@ class TestFillAnyLikeOpFloat32(TestFillAnyLikeOp): ...@@ -60,8 +58,8 @@ class TestFillAnyLikeOpFloat32(TestFillAnyLikeOp):
self.dtype = np.float32 self.dtype = np.float32
self.value = 0.0 self.value = 0.0
def skip_cinn(self): def if_skip_cinn(self):
self.enable_cinn = True pass
@unittest.skipIf( @unittest.skipIf(
...@@ -81,13 +79,13 @@ class TestFillAnyLikeOpBfloat16(OpTest): ...@@ -81,13 +79,13 @@ class TestFillAnyLikeOpBfloat16(OpTest):
self.value * np.ones_like(self.inputs["X"]) self.value * np.ones_like(self.inputs["X"])
) )
} }
self.skip_cinn() self.if_skip_cinn()
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, check_prim=True) self.check_output_with_place(place, check_prim=True)
def skip_cinn(self): def if_skip_cinn(self):
self.enable_cinn = False self.enable_cinn = False
...@@ -95,24 +93,24 @@ class TestFillAnyLikeOpValue1(TestFillAnyLikeOp): ...@@ -95,24 +93,24 @@ class TestFillAnyLikeOpValue1(TestFillAnyLikeOp):
def init(self): def init(self):
self.value = 1.0 self.value = 1.0
def skip_cinn(self): def if_skip_cinn(self):
self.enable_cinn = True pass
class TestFillAnyLikeOpValue2(TestFillAnyLikeOp): class TestFillAnyLikeOpValue2(TestFillAnyLikeOp):
def init(self): def init(self):
self.value = 1e-10 self.value = 1e-10
def skip_cinn(self): def if_skip_cinn(self):
self.enable_cinn = True pass
class TestFillAnyLikeOpValue3(TestFillAnyLikeOp): class TestFillAnyLikeOpValue3(TestFillAnyLikeOp):
def init(self): def init(self):
self.value = 1e-100 self.value = 1e-100
def skip_cinn(self): def if_skip_cinn(self):
self.enable_cinn = True pass
class TestFillAnyLikeOpType(TestFillAnyLikeOp): class TestFillAnyLikeOpType(TestFillAnyLikeOp):
...@@ -133,18 +131,18 @@ class TestFillAnyLikeOpType(TestFillAnyLikeOp): ...@@ -133,18 +131,18 @@ class TestFillAnyLikeOpType(TestFillAnyLikeOp):
* np.ones_like(self.inputs["X"]).astype(np.float32) * np.ones_like(self.inputs["X"]).astype(np.float32)
} }
self.skip_cinn() self.if_skip_cinn()
def skip_cinn(self): def if_skip_cinn(self):
self.enable_cinn = True pass
class TestFillAnyLikeOpFloat16(TestFillAnyLikeOp): class TestFillAnyLikeOpFloat16(TestFillAnyLikeOp):
def init(self): def init(self):
self.dtype = np.float16 self.dtype = np.float16
def skip_cinn(self): def if_skip_cinn(self):
self.enable_cinn = True pass
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -27,8 +27,6 @@ from paddle.static import Program, program_guard ...@@ -27,8 +27,6 @@ from paddle.static import Program, program_guard
def fill_any_like_wrapper(x, value, out_dtype=None, name=None): def fill_any_like_wrapper(x, value, out_dtype=None, name=None):
if isinstance(out_dtype, int): if isinstance(out_dtype, int):
tmp_dtype = dtypes.dtype(out_dtype) tmp_dtype = dtypes.dtype(out_dtype)
elif out_dtype == np.complex64:
raise ValueError("Not supported dtype %s" % out_dtype)
else: else:
tmp_dtype = out_dtype tmp_dtype = out_dtype
return paddle.full_like(x, value, tmp_dtype, name) return paddle.full_like(x, value, tmp_dtype, name)
...@@ -114,7 +112,7 @@ class TestFullLikeOp1(OpTest): ...@@ -114,7 +112,7 @@ class TestFullLikeOp1(OpTest):
self.prim_op_type = "comp" self.prim_op_type = "comp"
self.python_api = fill_any_like_wrapper self.python_api = fill_any_like_wrapper
self.init_data() self.init_data()
self.skip_cinn() self.if_skip_cinn()
x = np.zeros(self.shape) x = np.zeros(self.shape)
out = np.full_like(x, self.fill_value, self.dtype) out = np.full_like(x, self.fill_value, self.dtype)
...@@ -134,7 +132,7 @@ class TestFullLikeOp1(OpTest): ...@@ -134,7 +132,7 @@ class TestFullLikeOp1(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True, check_prim=True) self.check_output(check_eager=True, check_prim=True)
def skip_cinn(self): def if_skip_cinn(self):
pass pass
...@@ -144,8 +142,8 @@ class TestFullLikeOp2(TestFullLikeOp1): ...@@ -144,8 +142,8 @@ class TestFullLikeOp2(TestFullLikeOp1):
self.shape = [1024, 1024] self.shape = [1024, 1024]
self.dtype = np.float64 self.dtype = np.float64
def skip_cinn(self): def if_skip_cinn(self):
self.enable_cinn = True pass
class TestFullLikeOp3(TestFullLikeOp1): class TestFullLikeOp3(TestFullLikeOp1):
...@@ -154,8 +152,8 @@ class TestFullLikeOp3(TestFullLikeOp1): ...@@ -154,8 +152,8 @@ class TestFullLikeOp3(TestFullLikeOp1):
self.shape = [5000, 5000] self.shape = [5000, 5000]
self.dtype = np.int64 self.dtype = np.int64
def skip_cinn(self): def if_skip_cinn(self):
self.enable_cinn = True pass
@unittest.skipIf( @unittest.skipIf(
......
...@@ -88,23 +88,32 @@ class TestGatherNdOpIndex1(OpTest): ...@@ -88,23 +88,32 @@ class TestGatherNdOpIndex1(OpTest):
self.op_type = "gather_nd" self.op_type = "gather_nd"
self.prim_op_type = "prim" self.prim_op_type = "prim"
self.python_api = paddle.gather_nd self.python_api = paddle.gather_nd
xnp = np.random.uniform(0, 100, (10, 10)).astype("float64") self.init_input()
index = np.array([1, 2]).astype("int32")
self.inputs = {'X': xnp, 'Index': index} self.inputs = {'X': self.xnp, 'Index': self.index}
self.outputs = {'Out': xnp[tuple(index.T)]} self.outputs = {'Out': self.xnp[tuple(self.index.T)]}
self.enable_cinn = False
def init_input(self):
self.xnp = np.random.uniform(0, 100, (10, 10)).astype("float64")
self.index = np.array([1, 2]).astype("int32")
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output(check_eager=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False) self.check_grad(['X'], 'Out', check_eager=False, check_prim=True)
class TestGatherNdOpIndex1FP16(TestGatherNdOpIndex1):
def init_input(self):
self.xnp = np.random.uniform(0, 100, (10, 10)).astype("float16")
self.index = np.array([1, 2]).astype("int32")
class TestGatherNdOpWithSameIndexAsX(OpTest): class TestGatherNdOpWithSameIndexAsX(OpTest):
# Index has same rank as X's rank # Index has same rank as X's rank
def setUp(self): def setUp(self):
self.op_type = "gather_nd" self.op_type = "gather_nd"
self.prim_op_type = "prim" self.prim_op_type = "prim"
......
...@@ -199,8 +199,6 @@ class TestSumOp6D(OpTest): ...@@ -199,8 +199,6 @@ class TestSumOp6D(OpTest):
} }
self.attrs = {'dim': [0]} self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)} self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
# error occurred in cinn
self.enable_cinn = True
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output(check_eager=True)
...@@ -693,7 +691,6 @@ class Test2DReduce0(Test1DReduce): ...@@ -693,7 +691,6 @@ class Test2DReduce0(Test1DReduce):
self.attrs = {'dim': [0]} self.attrs = {'dim': [0]}
self.inputs = {'X': np.random.random((20, 10)).astype("float64")} self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)} self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
self.enable_cinn = True
class Test2DReduce1(Test1DReduce): class Test2DReduce1(Test1DReduce):
...@@ -706,7 +703,6 @@ class Test2DReduce1(Test1DReduce): ...@@ -706,7 +703,6 @@ class Test2DReduce1(Test1DReduce):
self.outputs = { self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
} }
self.enable_cinn = True
class Test3DReduce0(Test1DReduce): class Test3DReduce0(Test1DReduce):
...@@ -719,7 +715,6 @@ class Test3DReduce0(Test1DReduce): ...@@ -719,7 +715,6 @@ class Test3DReduce0(Test1DReduce):
self.outputs = { self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
} }
self.enable_cinn = True
class Test3DReduce1(Test1DReduce): class Test3DReduce1(Test1DReduce):
...@@ -732,7 +727,6 @@ class Test3DReduce1(Test1DReduce): ...@@ -732,7 +727,6 @@ class Test3DReduce1(Test1DReduce):
self.outputs = { self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
} }
self.enable_cinn = True
class Test3DReduce2(Test1DReduce): class Test3DReduce2(Test1DReduce):
...@@ -745,7 +739,6 @@ class Test3DReduce2(Test1DReduce): ...@@ -745,7 +739,6 @@ class Test3DReduce2(Test1DReduce):
self.outputs = { self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
} }
self.enable_cinn = True
class Test3DReduce3(Test1DReduce): class Test3DReduce3(Test1DReduce):
...@@ -758,7 +751,6 @@ class Test3DReduce3(Test1DReduce): ...@@ -758,7 +751,6 @@ class Test3DReduce3(Test1DReduce):
self.outputs = { self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
} }
self.enable_cinn = True
class Test8DReduce0(Test1DReduce): class Test8DReduce0(Test1DReduce):
...@@ -791,7 +783,6 @@ class TestKeepDimReduce(Test1DReduce): ...@@ -791,7 +783,6 @@ class TestKeepDimReduce(Test1DReduce):
axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
) )
} }
self.enable_cinn = True
class TestKeepDim8DReduce(Test1DReduce): class TestKeepDim8DReduce(Test1DReduce):
...@@ -871,8 +862,7 @@ class TestKeepDimReduceSumMultiAxises(OpTest): ...@@ -871,8 +862,7 @@ class TestKeepDimReduceSumMultiAxises(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
# rev_comp error self.check_grad(['X'], 'Out', check_prim=True)
self.check_grad(['X'], 'Out')
class TestReduceSumWithDimOne(OpTest): class TestReduceSumWithDimOne(OpTest):
...@@ -962,16 +952,22 @@ class Test1DReduceWithAxes1(OpTest): ...@@ -962,16 +952,22 @@ class Test1DReduceWithAxes1(OpTest):
self.enable_cinn = True self.enable_cinn = True
def test_check_output(self): def test_check_output(self):
self.check_output(check_prim=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
def reduce_sum_wrapper(
x, axis=None, dtype_rename=None, keepdim=False, name=None
):
return paddle.sum(x, axis, "float64", keepdim, name)
class TestReduceWithDtype(OpTest): class TestReduceWithDtype(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum self.python_api = reduce_sum_wrapper
self.prim_op_type = "prim" self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum().astype('float64')} self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
...@@ -982,22 +978,14 @@ class TestReduceWithDtype(OpTest): ...@@ -982,22 +978,14 @@ class TestReduceWithDtype(OpTest):
'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
} }
) )
# cinn op_mapper not support in_dtype/out_dtype attr
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output(check_prim=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
def reduce_sum_wrapper(
x, axis=None, dtype_rename=None, keepdim=False, name=None
):
return paddle.sum(x, axis, "float64", keepdim, name)
class TestReduceWithDtype1(TestReduceWithDtype): class TestReduceWithDtype1(TestReduceWithDtype):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
......
...@@ -34,7 +34,6 @@ class TestSliceOp(OpTest): ...@@ -34,7 +34,6 @@ class TestSliceOp(OpTest):
self.op_type = "slice" self.op_type = "slice"
self.prim_op_type = "prim" self.prim_op_type = "prim"
self.python_api = paddle.slice self.python_api = paddle.slice
self.enable_cinn = True
self.config() self.config()
self.inputs = {'Input': self.input} self.inputs = {'Input': self.input}
self.outputs = {'Out': self.out} self.outputs = {'Out': self.out}
...@@ -54,7 +53,7 @@ class TestSliceOp(OpTest): ...@@ -54,7 +53,7 @@ class TestSliceOp(OpTest):
self.out = self.input[1:3, 0:3, 2:4, :] self.out = self.input[1:3, 0:3, 2:4, :]
def test_check_output(self): def test_check_output(self):
self.check_output(check_prim=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
...@@ -74,7 +73,6 @@ class TestCase1(TestSliceOp): ...@@ -74,7 +73,6 @@ class TestCase1(TestSliceOp):
class TestCase2(TestSliceOp): class TestCase2(TestSliceOp):
def config(self): def config(self):
self.enable_cinn = True
self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self.starts = [-3, 0, 2] self.starts = [-3, 0, 2]
self.ends = [3, 100, -1] self.ends = [3, 100, -1]
...@@ -139,7 +137,7 @@ class TestSliceOp_decs_dim(OpTest): ...@@ -139,7 +137,7 @@ class TestSliceOp_decs_dim(OpTest):
self.out = self.input[1, 0:3, 2:4, :] self.out = self.input[1, 0:3, 2:4, :]
def test_check_output(self): def test_check_output(self):
self.check_output(check_prim=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
...@@ -465,7 +463,6 @@ class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): ...@@ -465,7 +463,6 @@ class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest):
) )
class TestFP16(OpTest): class TestFP16(OpTest):
def setUp(self): def setUp(self):
self.enable_cinn = True
self.op_type = "slice" self.op_type = "slice"
self.prim_op_type = "prim" self.prim_op_type = "prim"
self.python_api = paddle.slice self.python_api = paddle.slice
...@@ -578,6 +575,7 @@ class TestBF16(OpTest): ...@@ -578,6 +575,7 @@ class TestBF16(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
# pad not support bfloat16, so we can't test prim.
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['Input'], 'Out') self.check_grad(['Input'], 'Out')
......
...@@ -919,7 +919,7 @@ def sqrt(x, name=None): ...@@ -919,7 +919,7 @@ def sqrt(x, name=None):
return _C_ops.sqrt(x) return _C_ops.sqrt(x)
else: else:
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'sqrt' x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'sqrt'
) )
helper = LayerHelper('sqrt', **locals()) helper = LayerHelper('sqrt', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册