未验证 提交 70eaf9de 编写于 作者: W Wilber 提交者: GitHub

update (#51297)

上级 7c98abd9
......@@ -25,15 +25,31 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_
class TestSumOp(OpTest):
def setUp(self):
self.init_dtype()
self.init_input()
self.init_attrs()
self.calc_output()
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
self.attrs = {'dim': [0]}
self.inputs = {'X': self.x}
self.outputs = {'Out': self.out}
self.enable_cinn = True
def init_dtype(self):
self.dtype = np.float64
def init_input(self):
self.x = np.random.random((5, 6, 10)).astype(self.dtype)
def init_attrs(self):
self.attrs = {'dim': [0]}
def calc_output(self):
self.out = self.x.sum(axis=tuple(self.attrs['dim']))
def test_check_output(self):
self.check_output()
......@@ -41,51 +57,42 @@ class TestSumOp(OpTest):
self.check_grad(['X'], 'Out', check_prim=True)
class TestSumOpFp32(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
}
self.attrs = {'dim': [0, 1, 2]}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
self.gradient = self.calc_gradient()
self.enable_cinn = True
class TestSumOp_ZeroDim(TestSumOp):
def init_attrs(self):
self.attrs = {'dim': [], 'reduce_all': True}
def test_check_output(self):
self.check_output()
def init_input(self):
self.x = np.random.random([]).astype(self.dtype)
def calc_gradient(self):
x = self.inputs["X"]
grad = np.ones(x.shape, dtype=x.dtype)
return (grad,)
def calc_output(self):
self.out = self.x.sum(axis=None)
def test_check_grad(self):
self.check_grad(
['X'],
'Out',
user_defined_grads=self.gradient,
check_prim=True,
)
self.check_grad(['X'], 'Out')
class TestSumOp_ZeroDim(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random([]).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=None)}
self.attrs = {'dim': [], 'reduce_all': True}
# reduce doesn't support float64 in cinn.
# 0-D tensor doesn't support in cinn
self.enable_cinn = False
class TestSumOp5D(TestSumOp):
def init_input(self):
self.x = np.random.random((1, 2, 5, 6, 10)).astype(self.dtype)
def init_attrs(self):
self.attrs = {'dim': [0]}
class TestSumOp6D(TestSumOp):
def init_input(self):
self.x = np.random.random((1, 1, 2, 5, 6, 10)).astype(self.dtype)
def init_attrs(self):
self.attrs = {'dim': [0]}
class TestSumOp8D(TestSumOp):
def init_input(self):
self.x = np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype(self.dtype)
def init_attrs(self):
self.attrs = {'dim': (0, 3)}
def test_check_output(self):
self.check_output()
......@@ -94,64 +101,38 @@ class TestSumOp_ZeroDim(OpTest):
self.check_grad(['X'], 'Out')
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestSumOp_bf16(OpTest):
def setUp(self):
np.random.seed(100)
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.dtype = np.uint16
self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
self.attrs = {'dim': [0, 1, 2]}
self.out = self.x.sum(axis=tuple(self.attrs['dim']))
self.gradient = self.calc_gradient()
class TestSumOp_withInt(TestSumOp):
def init_input(self):
# ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
# Precision limitations on integer values between 0 and 2048 can be exactly represented
self.x = np.random.randint(0, 30, (10, 10)).astype(self.dtype)
self.inputs = {'X': convert_float_to_uint16(self.x)}
self.outputs = {'Out': convert_float_to_uint16(self.out)}
self.gradient = self.calc_gradient()
self.enable_cinn = False
def init_attrs(self):
self.attrs = {'dim': (0, 1)}
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=0.1)
self.check_output()
def calc_gradient(self):
x = self.inputs["X"]
grad = np.ones(x.shape, dtype=x.dtype)
return (grad,)
def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(
place,
self.check_grad(
['X'],
'Out',
user_defined_grads=self.gradient,
user_defined_grads=self.calc_gradient(),
check_prim=True,
)
def calc_gradient(self):
x = self.x
grad = np.ones(x.shape, dtype=x.dtype)
return [grad]
class TestSumOp3Dim(TestSumOp):
def init_input(self):
self.x = np.random.uniform(0, 0.1, (5, 6, 10)).astype(self.dtype)
class TestSumOp_fp16_withInt(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {
# ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
# Precision limitations on integer values between 0 and 2048 can be exactly represented
'X': np.random.randint(0, 30, (10, 10)).astype("float16")
}
self.attrs = {'dim': [0, 1]}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
self.gradient = self.calc_gradient()
self.enable_cinn = True
def init_attrs(self):
self.attrs = {'dim': (0, 1, 2)}
def test_check_output(self):
self.check_output()
......@@ -165,66 +146,79 @@ class TestSumOp_fp16_withInt(OpTest):
self.check_grad(
['X'],
'Out',
user_defined_grads=self.gradient,
user_defined_grads=self.calc_gradient(),
check_prim=True,
)
class TestSumOp5D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
}
self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
# error occurred in cinn
self.enable_cinn = True
def create_test_fp16_class(parent):
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestSumOpFp16(parent):
def init_dtype(self):
self.dtype = np.float16
def test_check_output(self):
self.check_output()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
def test_check_grad(self):
self.check_grad(
['X'],
'Out',
check_prim=True,
)
class TestSumOp6D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
}
self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
create_test_fp16_class(TestSumOp)
create_test_fp16_class(TestSumOp_ZeroDim)
create_test_fp16_class(TestSumOp5D)
create_test_fp16_class(TestSumOp6D)
create_test_fp16_class(TestSumOp8D)
create_test_fp16_class(TestSumOp_withInt)
create_test_fp16_class(TestSumOp3Dim)
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
def create_test_bf16_class(parent):
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestSumOpBf16(parent):
def setUp(self):
self.inputs = {'X': convert_float_to_uint16(self.x)}
self.outputs = {'Out': convert_float_to_uint16(self.out)}
self.enable_cinn = False
def init_dtype(self):
self.dtype = np.uint16
class TestSumOp8D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
}
self.attrs = {'dim': (0, 3)}
self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)
def test_check_output(self):
self.check_output()
def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(
place,
['X'],
'Out',
user_defined_grads=self.gradient,
check_prim=True,
)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
def calc_gradient(self):
x = self.x
grad = np.ones(x.shape, dtype=x.dtype)
return [grad]
create_test_bf16_class(TestSumOp)
create_test_bf16_class(TestSumOp_ZeroDim)
create_test_bf16_class(TestSumOp5D)
create_test_bf16_class(TestSumOp6D)
create_test_bf16_class(TestSumOp8D)
create_test_bf16_class(TestSumOp_withInt)
create_test_bf16_class(TestSumOp3Dim)
@skip_check_grad_ci(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册