未验证 提交 c6b7b2ad 编写于 作者: C Charles-hit 提交者: GitHub

support some prim ops zero dim part2 (#54907)

上级 87f72107
......@@ -32,10 +32,14 @@ class TestAssignOp(eager_op_test.OpTest):
self.public_python_api = paddle.assign
self.op_type = "assign"
self.prim_op_type = "prim"
x = np.random.random(size=(100, 10)).astype('float64')
self.init_input_configs()
x = np.random.random(size=self.shape).astype('float64')
self.inputs = {'X': x}
self.outputs = {'Out': x}
def init_input_configs(self):
self.shape = (100, 10)
def test_forward(self):
paddle.enable_static()
self.check_output()
......@@ -47,6 +51,11 @@ class TestAssignOp(eager_op_test.OpTest):
paddle.disable_static()
class TestAssignOp_ZeroDim(TestAssignOp):
def init_input_configs(self):
self.shape = ()
@unittest.skipIf(
not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU"
)
......
......@@ -30,12 +30,15 @@ class TestErfOp(OpTest):
self.public_python_api = paddle.erf
self.python_api = paddle.erf
self.dtype = self._init_dtype()
self.x_shape = [11, 17]
self.init_shape()
x = np.random.uniform(-1, 1, size=self.x_shape).astype(self.dtype)
y_ref = erf(x).astype(self.dtype)
self.inputs = {'X': x}
self.outputs = {'Out': y_ref}
def init_shape(self):
self.x_shape = [11, 17]
def _init_dtype(self):
return "float64"
......@@ -46,6 +49,11 @@ class TestErfOp(OpTest):
self.check_grad(['X'], 'Out', check_prim=True)
class TestErfOp_ZeroDim(TestErfOp):
def init_shape(self):
self.x_shape = []
class TestErfLayer(unittest.TestCase):
def _test_case(self, place):
x = np.random.uniform(-1, 1, size=(11, 17)).astype(np.float64)
......
......@@ -54,6 +54,31 @@ class TestExpandAsBasic(OpTest):
self.check_grad(['X'], 'Out', check_prim=True)
class TestExpandAs_ZeroDim1(TestExpandAsBasic):
def init_inputs_and_outputs(self):
x = np.random.random(()).astype(self.dtype)
target_tensor = np.random.random(1).astype(self.dtype)
self.inputs = {'X': x, "Y": target_tensor}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
class TestExpandAs_ZeroDim2(TestExpandAsBasic):
def init_inputs_and_outputs(self):
x = np.random.random(()).astype(self.dtype)
target_tensor = np.random.random(()).astype(self.dtype)
self.inputs = {'X': x, "Y": target_tensor}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = []
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}
def if_enable_cinn(self):
self.enable_cinn = False
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
......
......@@ -36,20 +36,43 @@ class TestExpandV2OpRank1(OpTest):
self.attrs = {'shape': self.shape}
output = np.tile(self.inputs['X'], self.expand_times)
self.outputs = {'Out': output}
self.enable_cinn = True
self.if_enable_cinn()
def init_data(self):
self.ori_shape = [100]
self.shape = [100]
self.expand_times = [1]
def if_enable_cinn(self):
pass
def test_check_output(self):
self.check_output(check_cinn=self.enable_cinn)
self.check_output(check_cinn=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
class TestExpandV2OpRank1_ZeroDim1(TestExpandV2OpRank1):
def init_data(self):
self.ori_shape = []
self.shape = [10]
self.expand_times = [10]
def if_enable_cinn(self):
self.enable_cinn = False
class TestExpandV2OpRank1_ZeroDim2(TestExpandV2OpRank1):
def init_data(self):
self.ori_shape = []
self.shape = []
self.expand_times = []
def if_enable_cinn(self):
pass
class TestExpandV2OpRank2_DimExpanding(TestExpandV2OpRank1):
def init_data(self):
self.ori_shape = [120]
......
......@@ -44,6 +44,13 @@ class TestFlattenOp(OpTest):
self.attrs = {"axis": self.axis}
class TestFlattenOp_ZeroDim(TestFlattenOp):
def init_test_case(self):
self.in_shape = ()
self.axis = 0
self.new_shape = 1
class TestFlattenOp1(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
......
......@@ -142,6 +142,13 @@ class TestFullLikeOp1(OpTest):
pass
class TestFullLikeOp1_ZeroDim(TestFullLikeOp1):
def init_data(self):
self.fill_value = 5
self.shape = []
self.dtype = np.float32
class TestFullLikeOp2(TestFullLikeOp1):
def init_data(self):
self.fill_value = 1000
......
......@@ -122,6 +122,33 @@ class TestGatherNdOpWithIndex1(OpTest):
self.check_grad(['X'], 'Out', check_prim=True)
class TestGatherNdOpWithIndex1_ZeroDim(TestGatherNdOpWithIndex1):
def setUp(self):
self.op_type = "gather_nd"
self.prim_op_type = "prim"
self.python_api = paddle.gather_nd
self.public_python_api = paddle.gather_nd
self.config_dtype()
self.if_enable_cinn()
if self.dtype == np.float64:
target_dtype = "float64"
elif self.dtype == np.float16:
target_dtype = "float16"
else:
target_dtype = "float32"
xnp = np.random.random((100,)).astype(target_dtype)
index = np.array([1]).astype("int32")
output = xnp[index[-1]]
if self.dtype == np.uint16:
xnp = convert_float_to_uint16(xnp)
output = convert_float_to_uint16(output)
self.inputs = {'X': xnp, 'Index': index}
self.outputs = {'Out': output}
def if_enable_cinn(self):
self.enable_cinn = False
class TestGatherNdOpWithIndex1FP16(TestGatherNdOpWithIndex1):
def config_dtype(self):
self.dtype = np.float16
......
......@@ -279,15 +279,18 @@ class TestMaxOp_ZeroDim(OpTest):
self.python_api = paddle.max
self.public_python_api = paddle.max
self.if_enable_cinn()
self.init_inputs_and_outputs()
def if_enable_cinn(self):
self.enable_cinn = False
def init_inputs_and_outputs(self):
self.inputs = {'X': np.random.random([]).astype("float64")}
self.attrs = {'dim': []}
self.outputs = {
'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
}
def if_enable_cinn(self):
self.enable_cinn = False
def test_check_output(self):
self.check_output()
......@@ -301,6 +304,20 @@ class TestMaxOp_ZeroDim(OpTest):
)
class TestMaxOp_ZeroDim1(TestMaxOp_ZeroDim):
def init_inputs_and_outputs(self):
self.inputs = {'X': np.random.random([5]).astype("float64")}
self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].max(axis=(0,))}
class TestMaxOp_ZeroDim2(TestMaxOp_ZeroDim1):
def init_inputs_and_outputs(self):
self.inputs = {'X': np.random.random([5, 20]).astype("float64")}
self.attrs = {'dim': [0, 1]}
self.outputs = {'Out': self.inputs['X'].max(axis=(0, 1))}
class TestMaxFP32Op(OpTest):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册