未验证 提交 bef4e9f7 编写于 作者: C Charles-hit 提交者: GitHub

[Prim Op Test]add public_python_api in prim test (#51829)

* add public_python_api in prim test

* fix test_squeeze2_op
上级 cb1d6b50
......@@ -221,12 +221,12 @@ def apply_to_static(net, use_cinn):
class PrimNet(paddle.nn.Layer):
def __init__(self, python_api):
def __init__(self, public_python_api):
super().__init__()
self.python_api = python_api
self.public_python_api = public_python_api
def forward(self, args):
out = self.python_api(*args)
out = self.public_python_api(*args)
return out
......@@ -264,7 +264,10 @@ class PrimForwardChecker:
), "Please set dtype in setUp function."
self.op_type = self.op_test.op_type
self.prim_op_type = self.op_test.prim_op_type
self.python_api = self.op_test.python_api
assert hasattr(
self.op_test, 'public_python_api'
), "If you want to check prim, please set public_python_api in setUp function."
self.public_python_api = self.op_test.public_python_api
self.dtype = np.dtype(self.op_test.dtype)
self.inputs = self.op_test.inputs
self.attrs = (
......@@ -432,7 +435,7 @@ class PrimForwardChecker:
_,
) = self.get_eager_input_attr_and_inputdict(stop_gradient=True)
args = OpTestUtils.prepare_python_api_arguments(
self.python_api,
self.public_python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
......@@ -441,7 +444,7 @@ class PrimForwardChecker:
args = OpTestUtils.assumption_assert_and_transform(
args, len(inputs_sig)
)
ret = flatten(_as_list(self.python_api(*args)))
ret = flatten(_as_list(self.public_python_api(*args)))
ret = paddle.utils.map_structure(lambda x: x.numpy(), ret)
if OpTestUtils.is_bfloat16_type(self.dtype):
ret = paddle.utils.map_structure(
......@@ -579,7 +582,7 @@ class PrimForwardChecker:
stop_gradient=True
)
args = OpTestUtils.prepare_python_api_arguments(
self.python_api,
self.public_python_api,
static_inputs,
attrs,
self.kernel_sig,
......@@ -588,7 +591,7 @@ class PrimForwardChecker:
args = OpTestUtils.assumption_assert_and_transform(
args, len(inputs_sig)
)
ret = flatten(_as_list(self.python_api(*args)))
ret = flatten(_as_list(self.public_python_api(*args)))
primapi.to_prim(main_program.blocks)
exe = paddle.static.Executor(self.place)
exe.run(startup_program)
......@@ -650,7 +653,7 @@ class PrimForwardChecker:
_,
) = self.get_eager_input_attr_and_inputdict(stop_gradient=True)
args = OpTestUtils.prepare_python_api_arguments(
self.python_api,
self.public_python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
......@@ -659,7 +662,7 @@ class PrimForwardChecker:
args = OpTestUtils.assumption_assert_and_transform(
args, len(inputs_sig)
)
net = PrimNet(self.python_api)
net = PrimNet(self.public_python_api)
net = apply_to_static(net, False)
ret = flatten(_as_list(net(args)))
ret = paddle.utils.map_structure(lambda x: x.numpy(), ret)
......@@ -732,7 +735,7 @@ class PrimForwardChecker:
_,
) = self.get_eager_input_attr_and_inputdict(stop_gradient=True)
args = OpTestUtils.prepare_python_api_arguments(
self.python_api,
self.public_python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
......@@ -741,7 +744,7 @@ class PrimForwardChecker:
args = OpTestUtils.assumption_assert_and_transform(
args, len(inputs_sig)
)
net = PrimNet(self.python_api)
net = PrimNet(self.public_python_api)
net = apply_to_static(
net, core.is_compiled_with_cinn() and self.enable_cinn
)
......@@ -895,7 +898,7 @@ class PrimGradChecker(PrimForwardChecker):
inputs_dict,
) = self.get_eager_input_attr_and_inputdict(stop_gradient=False)
args = OpTestUtils.prepare_python_api_arguments(
self.python_api,
self.public_python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
......@@ -906,7 +909,7 @@ class PrimGradChecker(PrimForwardChecker):
args = OpTestUtils.assumption_assert_and_transform(
args, len(inputs_sig)
)
ret = _as_list(self.python_api(*args))
ret = _as_list(self.public_python_api(*args))
outputs_dict = self.get_output_dict(self.outputs, ret, outputs_sig)
ys = []
if isinstance(self.output_names, list):
......@@ -1004,7 +1007,7 @@ class PrimGradChecker(PrimForwardChecker):
stop_gradient=False
)
args = OpTestUtils.prepare_python_api_arguments(
self.python_api,
self.public_python_api,
static_inputs,
attrs,
self.kernel_sig,
......@@ -1015,7 +1018,7 @@ class PrimGradChecker(PrimForwardChecker):
args = OpTestUtils.assumption_assert_and_transform(
args, len(inputs_sig)
)
fw_outs = _as_list(self.python_api(*args))
fw_outs = _as_list(self.public_python_api(*args))
outputs_dict = self.get_output_dict(
self.outputs, fw_outs, outputs_sig
)
......@@ -1110,7 +1113,7 @@ class PrimGradChecker(PrimForwardChecker):
inputs_dict,
) = self.get_eager_input_attr_and_inputdict(stop_gradient=False)
args = OpTestUtils.prepare_python_api_arguments(
self.python_api,
self.public_python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
......@@ -1119,7 +1122,7 @@ class PrimGradChecker(PrimForwardChecker):
args = OpTestUtils.assumption_assert_and_transform(
args, len(inputs_sig)
)
net = PrimNet(self.python_api)
net = PrimNet(self.public_python_api)
net = apply_to_static(net, False)
out = _as_list(net(args))
if hasattr(self.op_test, "python_out_sig"):
......@@ -1225,7 +1228,7 @@ class PrimGradChecker(PrimForwardChecker):
inputs_dict,
) = self.get_eager_input_attr_and_inputdict(stop_gradient=False)
args = OpTestUtils.prepare_python_api_arguments(
self.python_api,
self.public_python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
......@@ -1234,7 +1237,7 @@ class PrimGradChecker(PrimForwardChecker):
args = OpTestUtils.assumption_assert_and_transform(
args, len(inputs_sig)
)
net = PrimNet(self.python_api)
net = PrimNet(self.public_python_api)
net = apply_to_static(
net, core.is_compiled_with_cinn() and self.enable_cinn
)
......
......@@ -56,6 +56,7 @@ class TestActivation(OpTest):
self.init_shape()
self.init_kernel_type()
self.python_api = paddle.exp
self.public_python_api = paddle.exp
np.random.seed(2049)
x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
......@@ -97,6 +98,7 @@ class TestExpFp32_Prim(OpTest):
self.init_dtype()
self.init_shape()
self.python_api = paddle.exp
self.public_python_api = paddle.exp
np.random.seed(2049)
x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
......@@ -236,6 +238,7 @@ class TestSigmoid(TestActivation):
self.prim_op_type = "comp"
self.enable_cinn = False
self.python_api = paddle.nn.functional.sigmoid
self.public_python_api = paddle.nn.functional.sigmoid
self.init_dtype()
self.init_shape()
......@@ -269,6 +272,7 @@ class TestSigmoidBF16(OpTest):
self.prim_op_type = "comp"
self.enable_cinn = False
self.python_api = paddle.nn.functional.sigmoid
self.public_python_api = paddle.nn.functional.sigmoid
self.init_dtype()
self.init_shape()
np.random.seed(1024)
......@@ -310,6 +314,7 @@ class TestSilu(TestActivation):
self.prim_op_type = "comp"
self.enable_cinn = True
self.python_api = paddle.nn.functional.silu
self.public_python_api = paddle.nn.functional.silu
self.init_dtype()
self.init_shape()
self.if_enable_cinn()
......@@ -1131,6 +1136,8 @@ class TestSqrt(TestActivation, TestParameter):
self.op_type = "sqrt"
self.prim_op_type = "prim"
self.python_api = paddle.sqrt
self.public_python_api = paddle.sqrt
self.init_dtype()
self.init_shape()
......@@ -1157,6 +1164,7 @@ class TestSqrtPrimFp32(TestActivation):
self.op_type = "sqrt"
self.prim_op_type = "prim"
self.python_api = paddle.sqrt
self.public_python_api = paddle.sqrt
self.init_dtype()
self.init_shape()
np.random.seed(1023)
......@@ -1207,6 +1215,7 @@ class TestSqrtBF16(OpTest):
self.op_type = "sqrt"
self.prim_op_type = "prim"
self.python_api = paddle.sqrt
self.public_python_api = paddle.sqrt
self.init_dtype()
self.init_shape()
......@@ -1240,6 +1249,7 @@ class TestSqrtComp(TestActivation, TestParameter):
self.op_type = "sqrt"
self.prim_op_type = "comp"
self.python_api = paddle.sqrt
self.public_python_api = paddle.sqrt
self.init_dtype()
self.init_shape()
......@@ -1265,6 +1275,7 @@ class TestSqrtCompFp32(TestActivation):
self.op_type = "sqrt"
self.prim_op_type = "comp"
self.python_api = paddle.sqrt
self.public_python_api = paddle.sqrt
self.init_dtype()
self.init_shape()
np.random.seed(1023)
......@@ -1292,6 +1303,7 @@ class TestRsqrt(TestActivation):
self.op_type = "rsqrt"
self.prim_op_type = "comp"
self.python_api = paddle.rsqrt
self.public_python_api = paddle.rsqrt
self.init_dtype()
self.init_shape()
......@@ -1333,6 +1345,7 @@ class TestAbs(TestActivation):
self.op_type = "abs"
self.prim_op_type = "prim"
self.python_api = paddle.abs
self.public_python_api = paddle.abs
self.enable_cinn = False
self.init_dtype()
self.init_shape()
......@@ -1395,6 +1408,7 @@ class TestFloor(TestActivation):
self.op_type = "floor"
self.prim_op_type = "prim"
self.python_api = paddle.floor
self.public_python_api = paddle.floor
self.init_dtype()
self.init_shape()
......@@ -1425,6 +1439,7 @@ class TestFloor_Prim(TestActivation):
self.op_type = "floor"
self.prim_op_type = "prim"
self.python_api = paddle.floor
self.public_python_api = paddle.floor
self.init_dtype()
self.init_shape()
......@@ -1765,6 +1780,7 @@ class TestRelu(TestActivation):
self.op_type = "relu"
self.python_api = paddle.nn.functional.relu
self.prim_op_type = "comp"
self.public_python_api = paddle.nn.functional.relu
self.init_dtype()
self.init_shape()
self.skip_cinn()
......@@ -1996,6 +2012,7 @@ class TestGeluApproximate(TestActivation):
self.op_type = "gelu"
self.prim_op_type = "comp"
self.python_api = paddle.nn.functional.gelu
self.public_python_api = paddle.nn.functional.gelu
self.init_dtype()
self.init_shape()
approximate = True
......@@ -2022,6 +2039,7 @@ class TestGelu(TestActivation):
self.op_type = "gelu"
self.prim_op_type = "comp"
self.python_api = paddle.nn.functional.gelu
self.public_python_api = paddle.nn.functional.gelu
self.init_dtype()
self.init_shape()
approximate = False
......@@ -2280,6 +2298,7 @@ class TestHardSwish(TestActivation):
self.init_shape()
self.prim_op_type = "comp"
self.python_api = paddle.nn.functional.hardswish
self.public_python_api = paddle.nn.functional.hardswish
np.random.seed(1024)
x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
......@@ -2666,6 +2685,7 @@ class TestLog(TestActivation):
self.op_type = "log"
self.prim_op_type = "prim"
self.python_api = paddle.log
self.public_python_api = paddle.log
self.init_dtype()
self.init_shape()
......@@ -2980,6 +3000,7 @@ class TestPow(TestActivation):
self.op_type = "pow"
self.prim_op_type = "comp"
self.python_api = paddle.pow
self.public_python_api = paddle.pow
self.init_dtype()
self.init_shape()
......
......@@ -29,6 +29,7 @@ from paddle.fluid.backward import append_backward
class TestAssignOp(op_test.OpTest):
def setUp(self):
self.python_api = paddle.assign
self.public_python_api = paddle.assign
self.op_type = "assign"
self.prim_op_type = "prim"
self.enable_cinn = False
......@@ -50,6 +51,7 @@ class TestAssignOp(op_test.OpTest):
class TestAssignFP16Op(op_test.OpTest):
def setUp(self):
self.python_api = paddle.assign
self.public_python_api = paddle.assign
self.op_type = "assign"
self.prim_op_type = "prim"
self.enable_cinn = False
......
......@@ -17,21 +17,18 @@ import unittest
import gradient_checker
import numpy as np
from decorator_helper import prog_scope
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, core, program_guard
from paddle.fluid.tests.unittests.op_test import (
OpTest,
convert_float_to_uint16,
skip_check_grad_ci,
)
class TestConcatOp(OpTest):
def setUp(self):
self.op_type = "concat"
self.python_api = paddle.concat
self.public_python_api = paddle.concat
self.prim_op_type = "prim"
self.enable_cinn = False
self.dtype = self.get_dtype()
......@@ -135,6 +132,7 @@ class TestConcatOp6(TestConcatOp):
self.op_type = "concat"
self.dtype = self.get_dtype()
self.python_api = paddle.concat
self.public_python_api = paddle.concat
self.prim_op_type = "prim"
self.enable_cinn = False
self.init_test_data()
......@@ -175,6 +173,7 @@ class TestConcatOp7(TestConcatOp):
def setUp(self):
self.op_type = "concat"
self.python_api = paddle.concat
self.public_python_api = paddle.concat
self.prim_op_type = "prim"
self.enable_cinn = True
self.dtype = self.get_dtype()
......@@ -224,6 +223,7 @@ def create_test_AxisTensor(parent):
def setUp(self):
self.op_type = "concat"
self.python_api = paddle.concat
self.public_python_api = paddle.concat
self.dtype = self.get_dtype()
self.init_test_data()
......
......@@ -117,6 +117,7 @@ class TestSumOp1(OpTest):
self.op_type = "cumsum"
self.prim_op_type = "prim"
self.python_api = paddle.cumsum
self.public_python_api = paddle.cumsum
self.set_enable_cinn()
self.init_dtype()
self.set_attrs_input_output()
......@@ -215,6 +216,7 @@ class TestSumOpExclusive1(OpTest):
self.op_type = "cumsum"
self.prim_op_type = "prim"
self.python_api = paddle.cumsum
self.public_python_api = paddle.cumsum
self.set_enable_cinn()
self.init_dtype()
self.set_attrs_input_output()
......@@ -306,6 +308,7 @@ class TestSumOpExclusiveFP16(OpTest):
self.op_type = "cumsum"
self.prim_op_type = "prim"
self.python_api = paddle.cumsum
self.public_python_api = paddle.cumsum
self.init_dtype()
self.enable_cinn = False
self.attrs = {'axis': 2, "exclusive": True}
......@@ -339,6 +342,7 @@ class TestSumOpReverseExclusive(OpTest):
self.op_type = "cumsum"
self.prim_op_type = "prim"
self.python_api = paddle.cumsum
self.public_python_api = paddle.cumsum
self.set_enable_cinn()
self.init_dtype()
self.attrs = {
......
......@@ -32,6 +32,7 @@ class TestElementwiseAddOp(OpTest):
def setUp(self):
self.op_type = "elementwise_add"
self.python_api = paddle.add
self.public_python_api = paddle.add
self.prim_op_type = "prim"
self.init_dtype()
self.init_input_output()
......@@ -163,6 +164,7 @@ class TestBF16ElementwiseAddOp(OpTest):
def setUp(self):
self.op_type = "elementwise_add"
self.python_api = paddle.add
self.public_python_api = paddle.add
self.prim_op_type = "prim"
self.dtype = np.uint16
......
......@@ -33,6 +33,7 @@ class ElementwiseDivOp(OpTest):
def setUp(self):
self.op_type = "elementwise_div"
self.python_api = paddle.divide
self.public_python_api = paddle.divide
self.prim_op_type = "prim"
self.init_args()
self.init_dtype()
......
......@@ -38,6 +38,7 @@ class TestElementwiseOp(OpTest):
self.prim_op_type = "prim"
self.enable_cinn = False
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.inputs = {'X': self.x, 'Y': self.y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
......@@ -163,6 +164,7 @@ class TestElementwiseBF16Op(OpTest):
self.init_data()
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
self.enable_cinn = False
self.dtype = np.uint16
......@@ -273,6 +275,7 @@ class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
......@@ -293,6 +296,7 @@ class TestElementwiseMaxFP16Op_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
......@@ -313,6 +317,7 @@ class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
......@@ -333,6 +338,7 @@ class TestElementwiseMaxFP16Op_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
......@@ -353,6 +359,7 @@ class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
......@@ -372,6 +379,7 @@ class TestElementwiseMaxFP16Op_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
......@@ -391,6 +399,7 @@ class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float64)
sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float64)
......@@ -411,6 +420,7 @@ class TestElementwiseMaxFP16Op_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float16)
sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float16)
......@@ -431,6 +441,7 @@ class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float64)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float64)
......@@ -444,6 +455,7 @@ class TestElementwiseFP16Op_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.public_python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float16)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float16)
......
......@@ -29,6 +29,7 @@ class ElementwiseMulOp(OpTest):
self.op_type = "elementwise_mul"
self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.public_python_api = paddle.multiply
self.dtype = np.float64
self.axis = -1
self.init_dtype()
......@@ -127,6 +128,7 @@ class TestBF16ElementwiseMulOp(OpTest):
self.op_type = "elementwise_mul"
self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.public_python_api = paddle.multiply
self.dtype = np.uint16
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
......@@ -171,6 +173,7 @@ class TestElementwiseMulOp_scalar(ElementwiseMulOp):
self.op_type = "elementwise_mul"
self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.public_python_api = paddle.multiply
self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float64),
'Y': np.random.rand(1).astype(np.float64),
......@@ -184,6 +187,7 @@ class TestElementwiseMulOp_Vector(ElementwiseMulOp):
self.op_type = "elementwise_mul"
self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.public_python_api = paddle.multiply
self.inputs = {
'X': np.random.random((100,)).astype("float64"),
'Y': np.random.random((100,)).astype("float64"),
......@@ -200,6 +204,7 @@ class ElementwiseMulOp_broadcast(OpTest):
self.op_type = "elementwise_mul"
self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.public_python_api = paddle.multiply
self.init_dtype()
self.init_kernel_type()
self.init_axis()
......@@ -358,6 +363,7 @@ class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
self.op_type = "elementwise_mul"
self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.public_python_api = paddle.multiply
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float64),
'Y': np.random.rand(1, 1, 100).astype(np.float64),
......@@ -371,6 +377,7 @@ class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
self.op_type = "elementwise_mul"
self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.public_python_api = paddle.multiply
self.inputs = {
'X': np.random.rand(30, 3, 1, 5).astype(np.float64),
'Y': np.random.rand(30, 1, 4, 1).astype(np.float64),
......@@ -384,6 +391,7 @@ class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
self.op_type = "elementwise_mul"
self.prim_op_type = "prim"
self.python_api = paddle.multiply
self.public_python_api = paddle.multiply
self.inputs = {
'X': np.random.rand(10, 10).astype(np.float64),
'Y': np.random.rand(2, 2, 10, 10).astype(np.float64),
......
......@@ -31,6 +31,7 @@ class TestElementwisePowOp(OpTest):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.public_python_api = paddle.pow
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.uniform(1, 2, [20, 5]).astype("float64"),
......@@ -59,6 +60,7 @@ class TestElementwisePowOp_ZeroDim1(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.public_python_api = paddle.pow
self.enable_cinn = False
self.prim_op_type = "prim"
......@@ -73,6 +75,7 @@ class TestElementwisePowOp_ZeroDim2(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.public_python_api = paddle.pow
self.enable_cinn = False
self.prim_op_type = "prim"
......@@ -87,6 +90,7 @@ class TestElementwisePowOp_ZeroDim3(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.public_python_api = paddle.pow
self.enable_cinn = False
self.prim_op_type = "prim"
......@@ -101,6 +105,7 @@ class TestElementwisePowOp_big_shape_1(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.public_python_api = paddle.pow
self.prim_op_type = "prim"
self.inputs = {
......@@ -114,6 +119,7 @@ class TestElementwisePowOp_big_shape_2(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.public_python_api = paddle.pow
self.prim_op_type = "prim"
self.inputs = {
......@@ -130,6 +136,7 @@ class TestElementwisePowOp_scalar(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.public_python_api = paddle.pow
self.prim_op_type = "prim"
self.inputs = {
......@@ -143,7 +150,7 @@ class TestElementwisePowOp_tensor(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.public_python_api = paddle.pow
self.prim_op_type = "prim"
self.inputs = {
......@@ -157,6 +164,7 @@ class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.public_python_api = paddle.pow
self.prim_op_type = "prim"
self.inputs = {
......@@ -237,6 +245,7 @@ class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.public_python_api = paddle.pow
self.prim_op_type = "prim"
self.inputs = {
......@@ -300,6 +309,7 @@ class TestElementwisePowOpFP16(OpTest):
def setUp(self):
self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.public_python_api = paddle.pow
self.prim_op_type = "prim"
self.inputs = {
......
......@@ -28,6 +28,7 @@ class TestElementwiseOp(OpTest):
def setUp(self):
self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
self.public_python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
......@@ -72,6 +73,7 @@ class TestElementwiseSubOp_ZeroDim1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
self.public_python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.uniform(0.1, 1, []).astype("float64"),
......@@ -92,6 +94,7 @@ class TestElementwiseSubOp_ZeroDim2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
self.public_python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
......@@ -112,6 +115,7 @@ class TestElementwiseSubOp_ZeroDim3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
self.public_python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.uniform(0.1, 1, []).astype("float64"),
......@@ -132,6 +136,7 @@ class TestBF16ElementwiseOp(OpTest):
def setUp(self):
self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
self.public_python_api = paddle.subtract
self.prim_op_type = "prim"
self.dtype = np.uint16
x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
......@@ -176,6 +181,7 @@ class TestElementwiseSubOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
self.public_python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float64),
......@@ -189,6 +195,7 @@ class TestElementwiseSubOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
self.public_python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.random((100,)).astype("float64"),
......@@ -256,6 +263,7 @@ class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
self.public_python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float64),
......@@ -290,6 +298,7 @@ class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
self.public_python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.rand(2, 5, 3, 12).astype(np.float64),
......@@ -306,6 +315,7 @@ class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
self.public_python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float64),
......@@ -322,6 +332,7 @@ class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
self.public_python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.rand(10, 3, 1, 4).astype(np.float64),
......@@ -338,6 +349,7 @@ class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
self.public_python_api = paddle.subtract
self.prim_op_type = "prim"
self.inputs = {
'X': np.random.rand(10, 12).astype(np.float64),
......@@ -358,6 +370,7 @@ class TestComplexElementwiseSubOp(OpTest):
def setUp(self):
self.op_type = "elementwise_sub"
self.python_api = paddle.subtract
self.public_python_api = paddle.subtract
self.prim_op_type = "prim"
self.dtype = np.float64
self.shape = (2, 3, 4, 5)
......
......@@ -27,7 +27,7 @@ class TestErfOp(OpTest):
def setUp(self):
self.op_type = "erf"
self.prim_op_type = "prim"
self.enable_cinn = True
self.public_python_api = paddle.erf
self.python_api = paddle.erf
self.dtype = self._init_dtype()
self.x_shape = [11, 17]
......
......@@ -26,6 +26,7 @@ class TestExpandAsBasic(OpTest):
self.op_type = "expand_as_v2"
self.prim_op_type = "comp"
self.python_api = paddle.expand_as
self.public_python_api = paddle.expand_as
x = np.random.rand(100).astype("float64")
target_tensor = np.random.rand(2, 100).astype("float64")
self.inputs = {'X': x, "Y": target_tensor}
......@@ -46,6 +47,7 @@ class TestExpandAsOpRank2(TestExpandAsBasic):
self.op_type = "expand_as_v2"
self.prim_op_type = "comp"
self.python_api = paddle.expand_as
self.public_python_api = paddle.expand_as
x = np.random.rand(10, 12).astype("float64")
target_tensor = np.random.rand(10, 12).astype("float64")
self.inputs = {'X': x, "Y": target_tensor}
......@@ -60,6 +62,7 @@ class TestExpandAsOpRank3(TestExpandAsBasic):
self.op_type = "expand_as_v2"
self.prim_op_type = "comp"
self.python_api = paddle.expand_as
self.public_python_api = paddle.expand_as
x = np.random.rand(2, 3, 20).astype("float64")
target_tensor = np.random.rand(2, 3, 20).astype("float64")
self.inputs = {'X': x, "Y": target_tensor}
......@@ -74,6 +77,7 @@ class TestExpandAsOpRank4(TestExpandAsBasic):
self.op_type = "expand_as_v2"
self.prim_op_type = "comp"
self.python_api = paddle.expand_as
self.public_python_api = paddle.expand_as
x = np.random.rand(1, 1, 7, 16).astype("float64")
target_tensor = np.random.rand(4, 6, 7, 16).astype("float64")
self.inputs = {'X': x, "Y": target_tensor}
......@@ -90,6 +94,7 @@ class TestExpandAsOpRank5(TestExpandAsBasic):
self.op_type = "expand_as_v2"
self.prim_op_type = "comp"
self.python_api = paddle.expand_as
self.public_python_api = paddle.expand_as
x = np.random.rand(1, 1, 7, 16).astype("int64")
target_tensor = np.random.rand(4, 6, 7, 16).astype("float64")
self.inputs = {'X': x, "Y": target_tensor}
......
......@@ -31,6 +31,7 @@ class TestExpandV2OpRank1(OpTest):
self.prim_op_type = "prim"
self.init_data()
self.python_api = paddle.expand
self.public_python_api = paddle.expand
self.inputs = {'X': np.random.random(self.ori_shape).astype("float64")}
self.attrs = {'shape': self.shape}
output = np.tile(self.inputs['X'], self.expand_times)
......@@ -83,6 +84,7 @@ class TestExpandV2OpRank1_tensor_attr(OpTest):
self.op_type = "expand_v2"
self.prim_op_type = "prim"
self.python_api = paddle.expand
self.public_python_api = paddle.expand
self.init_data()
expand_shapes_tensor = []
for index, ele in enumerate(self.expand_shape):
......@@ -125,6 +127,7 @@ class TestExpandV2OpRank1_tensor(OpTest):
self.op_type = "expand_v2"
self.prim_op_type = "prim"
self.python_api = paddle.expand
self.public_python_api = paddle.expand
self.init_data()
self.inputs = {
......@@ -153,6 +156,7 @@ class TestExpandV2OpInteger(OpTest):
self.op_type = "expand_v2"
self.prim_op_type = "prim"
self.python_api = paddle.expand
self.public_python_api = paddle.expand
self.inputs = {
'X': np.random.randint(10, size=(2, 4, 5)).astype("int32")
}
......@@ -170,6 +174,7 @@ class TestExpandV2OpBoolean(OpTest):
self.op_type = "expand_v2"
self.prim_op_type = "prim"
self.python_api = paddle.expand
self.public_python_api = paddle.expand
self.inputs = {'X': np.random.randint(2, size=(2, 4, 5)).astype("bool")}
self.attrs = {'shape': [2, 4, 5]}
output = np.tile(self.inputs['X'], (1, 1, 1))
......@@ -185,6 +190,7 @@ class TestExpandV2OpInt64_t(OpTest):
self.op_type = "expand_v2"
self.prim_op_type = "prim"
self.python_api = paddle.expand
self.public_python_api = paddle.expand
self.inputs = {
'X': np.random.randint(10, size=(2, 4, 5)).astype("int64")
}
......@@ -339,6 +345,7 @@ class TestExpandV2CompOpRank1(OpTest):
self.prim_op_type = "comp"
self.init_data()
self.python_api = paddle.expand
self.public_python_api = paddle.expand
self.inputs = {'X': np.random.random(self.ori_shape).astype("float64")}
self.attrs = {'shape': self.shape}
output = np.tile(self.inputs['X'], self.expand_times)
......@@ -391,6 +398,7 @@ class TestExpandV2CompOpInteger(OpTest):
self.op_type = "expand_v2"
self.prim_op_type = "comp"
self.python_api = paddle.expand
self.public_python_api = paddle.expand
self.inputs = {
'X': np.random.randint(10, size=(2, 4, 5)).astype("int32")
}
......@@ -408,6 +416,7 @@ class TestExpandV2CompOpBoolean(OpTest):
self.op_type = "expand_v2"
self.prim_op_type = "comp"
self.python_api = paddle.expand
self.public_python_api = paddle.expand
self.inputs = {'X': np.random.randint(2, size=(2, 4, 5)).astype("bool")}
self.attrs = {'shape': [2, 4, 5]}
output = np.tile(self.inputs['X'], (1, 1, 1))
......@@ -423,6 +432,7 @@ class TestExpandV2CompOpInt64_t(OpTest):
self.op_type = "expand_v2"
self.prim_op_type = "comp"
self.python_api = paddle.expand
self.public_python_api = paddle.expand
self.inputs = {
'X': np.random.randint(10, size=(2, 4, 5)).astype("int64")
}
......
......@@ -35,6 +35,7 @@ class TestFillAnyLikeOp(OpTest):
self.op_type = "fill_any_like"
self.prim_op_type = "comp"
self.python_api = fill_any_like_wrapper
self.public_python_api = fill_any_like_wrapper
self.dtype = np.int32
self.value = 0.0
self.init()
......@@ -70,6 +71,7 @@ class TestFillAnyLikeOpBfloat16(OpTest):
self.op_type = "fill_any_like"
self.prim_op_type = "comp"
self.python_api = fill_any_like_wrapper
self.public_python_api = fill_any_like_wrapper
self.dtype = np.uint16
self.value = 0.0
self.inputs = {'X': np.random.random((219, 232)).astype(np.float32)}
......@@ -118,6 +120,7 @@ class TestFillAnyLikeOpType(TestFillAnyLikeOp):
self.op_type = "fill_any_like"
self.prim_op_type = "comp"
self.python_api = fill_any_like_wrapper
self.public_python_api = fill_any_like_wrapper
self.dtype = np.int32
self.value = 0.0
self.init()
......
......@@ -23,6 +23,7 @@ import paddle
class TestFlattenOp(OpTest):
def setUp(self):
self.python_api = paddle.flatten
self.public_python_api = paddle.flatten
self.python_out_sig = ["Out"]
self.op_type = "flatten_contiguous_range"
self.prim_op_type = "comp"
......
......@@ -111,6 +111,7 @@ class TestFullLikeOp1(OpTest):
self.op_type = "fill_any_like"
self.prim_op_type = "comp"
self.python_api = fill_any_like_wrapper
self.public_python_api = fill_any_like_wrapper
self.init_data()
self.if_enable_cinn()
......
......@@ -28,6 +28,7 @@ class TestGatherNdOpWithEmptyIndex(OpTest):
self.op_type = "gather_nd"
self.prim_op_type = "prim"
self.python_api = paddle.gather_nd
self.public_python_api = paddle.gather_nd
xnp = np.random.random((5, 20)).astype("float64")
self.inputs = {'X': xnp, 'Index': np.array([[], []]).astype("int32")}
self.outputs = {
......@@ -46,6 +47,7 @@ class TestGatherNdOpWithIndex1(OpTest):
self.op_type = "gather_nd"
self.prim_op_type = "prim"
self.python_api = paddle.gather_nd
self.public_python_api = paddle.gather_nd
xnp = np.random.random((5, 20)).astype("float64")
self.inputs = {'X': xnp, 'Index': np.array([1]).astype("int32")}
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
......@@ -64,6 +66,7 @@ class TestGatherNdOpWithLowIndex(OpTest):
self.op_type = "gather_nd"
self.prim_op_type = "prim"
self.python_api = paddle.gather_nd
self.public_python_api = paddle.gather_nd
self.enable_cinn = False
xnp = np.random.uniform(0, 100, (10, 10)).astype("float64")
index = np.array([[1], [2]]).astype("int64")
......@@ -88,6 +91,7 @@ class TestGatherNdOpIndex1(OpTest):
self.op_type = "gather_nd"
self.prim_op_type = "prim"
self.python_api = paddle.gather_nd
self.public_python_api = paddle.gather_nd
self.init_input()
self.inputs = {'X': self.xnp, 'Index': self.index}
......@@ -118,6 +122,7 @@ class TestGatherNdOpWithSameIndexAsX(OpTest):
self.op_type = "gather_nd"
self.prim_op_type = "prim"
self.python_api = paddle.gather_nd
self.public_python_api = paddle.gather_nd
self.enable_cinn = False
xnp = np.random.uniform(0, 100, (10, 10)).astype("float64")
index = np.array([[1, 1], [2, 1]]).astype("int64")
......@@ -139,6 +144,7 @@ class TestGatherNdOpWithHighRankSame(OpTest):
self.op_type = "gather_nd"
self.prim_op_type = "prim"
self.python_api = paddle.gather_nd
self.public_python_api = paddle.gather_nd
shape = (5, 2, 3, 1, 10)
xnp = np.random.rand(*shape).astype("float64")
index = np.vstack([np.random.randint(0, s, size=2) for s in shape]).T
......@@ -160,6 +166,7 @@ class TestGatherNdOpWithHighRankDiff(OpTest):
self.op_type = "gather_nd"
self.prim_op_type = "prim"
self.python_api = paddle.gather_nd
self.public_python_api = paddle.gather_nd
shape = (2, 3, 4, 1, 10)
xnp = np.random.rand(*shape).astype("float64")
index = np.vstack([np.random.randint(0, s, size=200) for s in shape]).T
......
......@@ -34,6 +34,7 @@ class TestGatherOp(OpTest):
def setUp(self):
self.op_type = "gather"
self.python_api = paddle.gather
self.public_python_api = paddle.gather
self.config()
self.prim_op_type = "prim"
xnp = np.random.random(self.x_shape).astype(self.x_type)
......
......@@ -27,6 +27,7 @@ np.random.seed(1024)
class TestIndexSelectOp(OpTest):
def setUp(self):
self.python_api = paddle.index_select
self.public_python_api = paddle.index_select
self.op_type = "index_select"
self.prim_op_type = "comp"
self.init_dtype_type()
......
......@@ -150,6 +150,7 @@ class TestReduceMeanOp(OpTest):
def setUp(self):
self.op_type = 'reduce_mean'
self.python_api = reduce_mean_wrapper
self.public_python_api = reduce_mean_wrapper
self.prim_op_type = "comp"
self.dtype = 'float64'
self.shape = [2, 3, 4, 5]
......@@ -204,6 +205,7 @@ class TestReduceMeanBF16Op(OpTest):
def setUp(self):
self.op_type = 'reduce_mean'
self.python_api = reduce_mean_wrapper
self.public_python_api = reduce_mean_wrapper
self.prim_op_type = "comp"
self.dtype = np.uint16
self.shape = [2, 3, 4, 5]
......@@ -244,6 +246,7 @@ class TestReduceMeanOpDefaultAttrs(TestReduceMeanOp):
def setUp(self):
self.op_type = 'reduce_mean'
self.python_api = reduce_mean_wrapper
self.public_python_api = reduce_mean_wrapper
self.prim_op_type = "comp"
self.dtype = 'float64'
self.shape = [2, 3, 4, 5]
......
......@@ -27,6 +27,7 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_
class TestSumOp(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
......@@ -44,6 +45,7 @@ class TestSumOp(OpTest):
class TestSumOpFp32(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {
......@@ -77,6 +79,7 @@ class TestSumOpFp32(OpTest):
class TestSumOp_ZeroDim(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random([]).astype("float64")}
......@@ -100,6 +103,7 @@ class TestSumOp_bf16(OpTest):
def setUp(self):
np.random.seed(100)
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.dtype = np.uint16
......@@ -137,6 +141,7 @@ class TestSumOp_bf16(OpTest):
class TestSumOp_fp16_withInt(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {
......@@ -172,6 +177,7 @@ class TestSumOp_fp16_withInt(OpTest):
class TestSumOp5D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {
......@@ -192,6 +198,7 @@ class TestSumOp5D(OpTest):
class TestSumOp6D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {
......@@ -671,6 +678,7 @@ class Test1DReduce(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random(120).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
......@@ -687,6 +695,7 @@ class Test2DReduce0(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.attrs = {'dim': [0]}
self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
......@@ -697,6 +706,7 @@ class Test2DReduce1(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.attrs = {'dim': [1]}
self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
......@@ -709,6 +719,7 @@ class Test3DReduce0(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.attrs = {'dim': [1]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
......@@ -721,6 +732,7 @@ class Test3DReduce1(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.attrs = {'dim': [2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
......@@ -733,6 +745,7 @@ class Test3DReduce2(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.attrs = {'dim': [-2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
......@@ -745,6 +758,7 @@ class Test3DReduce3(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.attrs = {'dim': [1, 2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
......@@ -775,6 +789,7 @@ class TestKeepDimReduce(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': True}
......@@ -849,6 +864,7 @@ class TestKeepDimReduceSumMultiAxises(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-2, -1], 'keep_dim': True}
......@@ -869,6 +885,7 @@ class TestReduceSumWithDimOne(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
self.attrs = {'dim': [1, 2], 'keep_dim': True}
......@@ -890,6 +907,7 @@ class TestReduceSumWithNumelOne(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': False}
......@@ -911,6 +929,7 @@ class TestReduceAll(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
self.attrs = {'reduce_all': True, 'keep_dim': False}
......@@ -928,6 +947,7 @@ class TestReduceAllFp32(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((100, 1, 1)).astype("float32")}
self.attrs = {'reduce_all': True, 'keep_dim': False}
......@@ -945,6 +965,7 @@ class Test1DReduceWithAxes1(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.public_python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random(100).astype("float64")}
self.attrs = {'dim': [0], 'keep_dim': False}
......@@ -958,9 +979,7 @@ class Test1DReduceWithAxes1(OpTest):
self.check_grad(['X'], 'Out', check_prim=True)
def reduce_sum_wrapper(
x, axis=None, dtype_rename=None, keepdim=False, name=None
):
def reduce_sum_wrapper(x, axis=None, out_dtype=None, keepdim=False, name=None):
return paddle.sum(x, axis, "float64", keepdim, name)
......@@ -968,6 +987,7 @@ class TestReduceWithDtype(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = reduce_sum_wrapper
self.public_python_api = reduce_sum_wrapper
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
......@@ -990,6 +1010,7 @@ class TestReduceWithDtype1(TestReduceWithDtype):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = reduce_sum_wrapper
self.public_python_api = reduce_sum_wrapper
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
......@@ -1015,6 +1036,7 @@ class TestReduceWithDtype2(TestReduceWithDtype):
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.python_api = reduce_sum_wrapper
self.public_python_api = reduce_sum_wrapper
self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
self.attrs = {'dim': [1], 'keep_dim': True}
......
......@@ -29,6 +29,7 @@ class TestReshapeOp(OpTest):
self.op_type = "reshape2"
self.prim_op_type = "prim"
self.python_api = paddle.tensor.reshape
self.public_python_api = paddle.tensor.reshape
self.python_out_sig = ['Out']
self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")}
self.attrs = {"shape": self.new_shape}
......@@ -56,6 +57,7 @@ class TestReshapeOp_ZeroDim1(TestReshapeOp):
self.prim_op_type = "prim"
self.enable_cinn = False
self.python_api = paddle.tensor.reshape
self.public_python_api = paddle.tensor.reshape
self.python_out_sig = ['Out']
self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")}
self.attrs = {"shape": self.new_shape}
......@@ -91,6 +93,7 @@ class TestReshapeBF16Op(OpTest):
self.prim_op_type = "prim"
self.enable_cinn = False
self.python_api = paddle.tensor.reshape
self.public_python_api = paddle.tensor.reshape
self.python_out_sig = ['Out']
self.dtype = np.uint16
x = np.random.random(self.ori_shape).astype("float32")
......
......@@ -34,6 +34,7 @@ class TestSliceOp(OpTest):
self.op_type = "slice"
self.prim_op_type = "prim"
self.python_api = paddle.slice
self.public_python_api = paddle.slice
self.config()
self.inputs = {'Input': self.input}
self.outputs = {'Out': self.out}
......@@ -86,6 +87,7 @@ class TestSliceZerosShapeTensor(OpTest):
self.op_type = "slice"
self.prim_op_type = "prim"
self.python_api = paddle.slice
self.public_python_api = paddle.slice
self.config()
self.inputs = {'Input': self.input}
self.outputs = {'Out': self.out}
......@@ -116,6 +118,7 @@ class TestSliceOp_decs_dim(OpTest):
self.op_type = "slice"
self.prim_op_type = "prim"
self.python_api = paddle.slice
self.public_python_api = paddle.slice
self.config()
self.inputs = {'Input': self.input}
self.outputs = {'Out': self.out}
......@@ -466,6 +469,7 @@ class TestFP16(OpTest):
self.op_type = "slice"
self.prim_op_type = "prim"
self.python_api = paddle.slice
self.public_python_api = paddle.slice
self.config()
self.inputs = {'Input': self.input}
self.outputs = {'Out': self.out}
......@@ -510,6 +514,7 @@ class TestFP16_2(OpTest):
self.op_type = "slice"
self.prim_op_type = "prim"
self.python_api = paddle.slice
self.public_python_api = paddle.slice
self.config()
self.inputs = {'Input': self.input}
self.outputs = {'Out': self.out}
......@@ -551,6 +556,7 @@ class TestBF16(OpTest):
self.op_type = "slice"
self.prim_op_type = "prim"
self.python_api = paddle.slice
self.public_python_api = paddle.slice
self.config()
self.inputs = {'Input': convert_float_to_uint16(self.input)}
self.outputs = {'Out': convert_float_to_uint16(self.out)}
......
......@@ -54,6 +54,7 @@ class TestSoftmaxOp(OpTest):
self.op_type = "softmax"
self.prim_op_type = "comp"
self.python_api = F.softmax
self.public_python_api = F.softmax
self.use_cudnn = False
self.use_mkldnn = False
# explicilty use float32 for ROCm, as MIOpen does not yet support float64
......@@ -118,6 +119,7 @@ class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp):
self.op_type = "softmax"
self.prim_op_type = "comp"
self.python_api = F.softmax
self.public_python_api = F.softmax
self.use_cudnn = False
self.use_mkldnn = False
# explicilty use float32 for ROCm, as MIOpen does not yet support float64
......
......@@ -25,6 +25,7 @@ from paddle.fluid import Program, core, program_guard
class TestSplitOp(OpTest):
def setUp(self):
self.python_api = paddle.split
self.public_python_api = paddle.split
self.python_out_sig = ['out0', 'out1', 'out2']
self._set_op_type()
self.prim_op_type = "prim"
......@@ -67,6 +68,7 @@ class TestSplitOp(OpTest):
class TestSplitOp_2(OpTest):
def setUp(self):
self.python_api = paddle.split
self.public_python_api = paddle.split
self.python_out_sig = ['out0', 'out1', 'out2']
self._set_op_type()
self.prim_op_type = "prim"
......@@ -190,6 +192,7 @@ class TestSplitOp_SectionsTensor(OpTest):
class TestSplitOp_unk_section(OpTest):
def setUp(self):
self.python_api = paddle.split
self.public_python_api = paddle.split
self.python_out_sig = ['out0', 'out1', 'out2']
self._set_op_type()
self.prim_op_type = "prim"
......
......@@ -31,6 +31,7 @@ class TestSqueezeOp(OpTest):
self.op_type = "squeeze2"
self.prim_op_type = "comp"
self.python_api = paddle.squeeze
self.public_python_api = paddle.squeeze
self.python_out_sig = [
"Out"
] # python out sig is customized output signature.
......@@ -73,6 +74,7 @@ class TestSqueezeOp2(TestSqueezeOp):
self.op_type = "squeeze2"
self.prim_op_type = "comp"
self.python_api = paddle.squeeze
self.public_python_api = paddle.squeeze
self.enable_cinn = False
self.python_out_sig = [
"Out"
......
......@@ -46,6 +46,7 @@ class TestStackOpBase(OpTest):
self.op_type = 'stack'
self.prim_op_type = "comp"
self.python_api = paddle.stack
self.public_python_api = paddle.stack
self.x = []
for i in range(self.num_inputs):
self.x.append(
......@@ -129,6 +130,7 @@ class TestStackBF16Op(OpTest):
self.prim_op_type = "comp"
self.enable_cinn = False
self.python_api = paddle.stack
self.public_python_api = paddle.stack
self.x = []
for i in range(self.num_inputs):
self.x.append(
......
......@@ -47,6 +47,7 @@ class TestTopkOp(OpTest):
self.op_type = "top_k_v2"
self.prim_op_type = "prim"
self.python_api = paddle.topk
self.public_python_api = paddle.topk
self.dtype = np.float64
self.input_data = np.random.rand(10, 20)
self.init_args()
......@@ -88,6 +89,7 @@ class TestTopkOp3(TestTopkOp):
self.op_type = "top_k_v2"
self.prim_op_type = "prim"
self.python_api = paddle.topk
self.public_python_api = paddle.topk
self.dtype = np.float64
self.input_data = np.random.rand(16, 100)
self.init_args()
......@@ -109,6 +111,7 @@ class TestTopkOp4(TestTopkOp):
self.op_type = "top_k_v2"
self.prim_op_type = "prim"
self.python_api = paddle.topk
self.public_python_api = paddle.topk
self.dtype = np.float64
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
......@@ -130,6 +133,7 @@ class TestTopkOp5(TestTopkOp):
self.op_type = "top_k_v2"
self.prim_op_type = "prim"
self.python_api = paddle.topk
self.public_python_api = paddle.topk
self.dtype = np.float64
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
......@@ -151,6 +155,7 @@ class TestTopkOp6(TestTopkOp):
self.op_type = "top_k_v2"
self.prim_op_type = "prim"
self.python_api = paddle.topk
self.public_python_api = paddle.topk
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
......@@ -172,6 +177,7 @@ class TestTopkOp7(TestTopkOp):
self.op_type = "top_k_v2"
self.prim_op_type = "prim"
self.python_api = paddle.topk
self.public_python_api = paddle.topk
self.dtype = np.float16
self.input_data = np.random.rand(10, 20, 10)
self.init_args()
......
......@@ -32,6 +32,7 @@ class TestTransposeOp(OpTest):
self.init_op_type()
self.initTestCase()
self.python_api = paddle.transpose
self.public_python_api = paddle.transpose
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random(self.shape).astype("float64")}
self.attrs = {
......@@ -123,6 +124,7 @@ class TestCase10(TestTransposeOp):
self.init_op_type()
self.initTestCase()
self.python_api = paddle.transpose
self.public_python_api = paddle.transpose
self.prim_op_type = "prim"
self.enable_cinn = False
self.inputs = {'X': np.random.random(self.shape).astype("float64")}
......@@ -145,6 +147,7 @@ class TestCase_ZeroDim(TestTransposeOp):
self.init_op_type()
self.initTestCase()
self.python_api = paddle.transpose
self.public_python_api = paddle.transpose
self.prim_op_type = "prim"
self.enable_cinn = False
self.inputs = {'X': np.random.random(self.shape).astype("float64")}
......@@ -167,6 +170,7 @@ class TestAutoTuneTransposeOp(OpTest):
self.init_op_type()
self.initTestCase()
self.python_api = paddle.transpose
self.public_python_api = paddle.transpose
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random(self.shape).astype("float64")}
self.attrs = {
......@@ -203,6 +207,7 @@ class TestAutoTuneTransposeBF16Op(OpTest):
self.initTestCase()
self.dtype = np.uint16
self.python_api = paddle.transpose
self.public_python_api = paddle.transpose
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.random(self.shape).astype("float32")
......@@ -245,6 +250,7 @@ class TestTransposeBF16Op(OpTest):
self.prim_op_type = "prim"
self.enable_cinn = False
self.python_api = paddle.transpose
self.public_python_api = paddle.transpose
x = np.random.random(self.shape).astype("float32")
self.inputs = {'X': convert_float_to_uint16(x)}
......
......@@ -28,6 +28,7 @@ class TestUnsqueezeOp(OpTest):
self.init_test_case()
self.op_type = "unsqueeze2"
self.python_api = paddle.unsqueeze
self.public_python_api = paddle.unsqueeze
self.python_out_sig = ["Out"]
self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
self.init_attrs()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册