未验证 提交 09694f82 编写于 作者: 姜永久 提交者: GitHub

dynamic graph tests (#50572)

* fix

* and others

* more ops

* reset distribute_fpn and precision_recall

* reset fc

* modify arange test

* modify reshape&reduce

* add fill_any and sigmoid_cross_entropy

* reset linear_interp_v2

* reset reduce

* modify

* modify arange

* modify cast
上级 f24eadd9
......@@ -15,13 +15,17 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
from paddle.fluid import core
from paddle.static import Program, program_guard
def arange_wrapper(start, end, step, dtype="float32"):
return paddle.arange(start, end, step, dtype)
class TestArangeOp(OpTest):
def setUp(self):
self.op_type = "range"
......@@ -40,6 +44,7 @@ class TestArangeOp(OpTest):
def init_config(self):
self.dtype = np.float32
self.python_api = arange_wrapper
self.case = (0, 1, 0.2)
def test_check_output(self):
......@@ -49,30 +54,35 @@ class TestArangeOp(OpTest):
class TestFloatArangeOp(TestArangeOp):
def init_config(self):
self.dtype = np.float32
self.python_api = paddle.arange
self.case = (0, 5, 1)
class TestInt32ArangeOp(TestArangeOp):
def init_config(self):
self.dtype = np.int32
self.python_api = paddle.arange
self.case = (0, 5, 2)
class TestFloat64ArangeOp(TestArangeOp):
def init_config(self):
self.dtype = np.float64
self.python_api = paddle.arange
self.case = (10, 1, -2)
class TestInt64ArangeOp(TestArangeOp):
def init_config(self):
self.dtype = np.int64
self.python_api = paddle.arange
self.case = (-1, -10, -2)
class TestZeroSizeArangeOp(TestArangeOp):
def init_config(self):
self.dtype = np.int32
self.python_api = paddle.arange
self.case = (0, 0, 1)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -243,10 +243,15 @@ def bce_loss(input, label):
return -1 * (label * np.log(input) + (1.0 - label) * np.log(1.0 - input))
def bce_wrapper(x, label):
return paddle._C_ops.bce_loss(x, label)
class TestBceLossOp(OpTest):
def setUp(self):
self.init_test_case()
self.op_type = "bce_loss"
self.python_api = bce_wrapper
input_np = np.random.uniform(0.1, 0.8, self.shape).astype("float64")
label_np = np.random.randint(0, 2, self.shape).astype("float64")
output_np = bce_loss(input_np, label_np)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -26,6 +26,7 @@ paddle.enable_static()
class TestBitwiseAnd(OpTest):
def setUp(self):
self.op_type = "bitwise_and"
self.python_api = paddle.tensor.logic.bitwise_and
self.init_dtype()
self.init_shape()
self.init_bound()
......@@ -116,6 +117,8 @@ class TestBitwiseAndInt64(TestBitwiseAnd):
class TestBitwiseAndBool(TestBitwiseAnd):
def setUp(self):
self.op_type = "bitwise_and"
self.python_api = paddle.tensor.logic.bitwise_and
self.init_shape()
x = np.random.choice([True, False], self.x_shape)
......@@ -130,6 +133,7 @@ class TestBitwiseAndBool(TestBitwiseAnd):
class TestBitwiseOr(OpTest):
def setUp(self):
self.op_type = "bitwise_or"
self.python_api = paddle.tensor.logic.bitwise_or
self.init_dtype()
self.init_shape()
self.init_bound()
......@@ -220,6 +224,8 @@ class TestBitwiseOrInt64(TestBitwiseOr):
class TestBitwiseOrBool(TestBitwiseOr):
def setUp(self):
self.op_type = "bitwise_or"
self.python_api = paddle.tensor.logic.bitwise_or
self.init_shape()
x = np.random.choice([True, False], self.x_shape)
......@@ -234,6 +240,8 @@ class TestBitwiseOrBool(TestBitwiseOr):
class TestBitwiseXor(OpTest):
def setUp(self):
self.op_type = "bitwise_xor"
self.python_api = paddle.tensor.logic.bitwise_xor
self.init_dtype()
self.init_shape()
self.init_bound()
......@@ -324,6 +332,8 @@ class TestBitwiseXorInt64(TestBitwiseXor):
class TestBitwiseXorBool(TestBitwiseXor):
def setUp(self):
self.op_type = "bitwise_xor"
self.python_api = paddle.tensor.logic.bitwise_xor
self.init_shape()
x = np.random.choice([True, False], self.x_shape)
......@@ -338,6 +348,8 @@ class TestBitwiseXorBool(TestBitwiseXor):
class TestBitwiseNot(OpTest):
def setUp(self):
self.op_type = "bitwise_not"
self.python_api = paddle.tensor.logic.bitwise_not
self.init_dtype()
self.init_shape()
self.init_bound()
......@@ -408,6 +420,7 @@ class TestBitwiseNotInt64(TestBitwiseNot):
class TestBitwiseNotBool(TestBitwiseNot):
def setUp(self):
self.op_type = "bitwise_not"
self.python_api = paddle.tensor.logic.bitwise_not
self.init_shape()
x = np.random.choice([True, False], self.x_shape)
......
......@@ -17,7 +17,11 @@ import unittest
import gradient_checker
import numpy as np
from decorator_helper import prog_scope
from op_test import OpTest, convert_float_to_uint16, convert_uint16_to_float
from eager_op_test import (
OpTest,
convert_float_to_uint16,
convert_uint16_to_float,
)
import paddle
import paddle.fluid as fluid
......@@ -25,6 +29,35 @@ import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
def convert_to_dtype_(dtype):
if dtype == 5:
return core.VarDesc.VarType.FP32
elif dtype == 6:
return core.VarDesc.VarType.FP64
elif dtype == 4:
return core.VarDesc.VarType.FP16
elif dtype == 2:
return core.VarDesc.VarType.INT32
elif dtype == 1:
return core.VarDesc.VarType.INT16
elif dtype == 3:
return core.VarDesc.VarType.INT64
elif dtype == 0:
return core.VarDesc.VarType.BOOL
elif dtype == 22:
return core.VarDesc.VarType.BF16
elif dtype == 20:
return core.VarDesc.VarType.UINT8
elif dtype == 21:
return core.VarDesc.VarType.INT8
elif dtype == np.complex64:
raise ValueError("Not supported dtype %s" % dtype)
def cast_wrapper(x, out_dtype=None):
return paddle.tensor.cast(x, convert_to_dtype_(out_dtype))
class TestCastOpFp32ToFp64(OpTest):
def setUp(self):
ipt = np.random.random(size=[10, 10])
......@@ -35,6 +68,7 @@ class TestCastOpFp32ToFp64(OpTest):
'out_dtype': int(core.VarDesc.VarType.FP64),
}
self.op_type = 'cast'
self.python_api = cast_wrapper
def test_check_output(self):
self.check_output()
......@@ -54,6 +88,7 @@ class TestCastOpFp16ToFp32(OpTest):
}
self.op_type = 'cast'
self.__class__.no_need_check_grad = True
self.python_api = cast_wrapper
def test_check_output(self):
self.check_output(atol=1e-3)
......@@ -70,6 +105,7 @@ class TestCastOpFp32ToFp16(OpTest):
}
self.op_type = 'cast'
self.__class__.no_need_check_grad = True
self.python_api = cast_wrapper
def test_check_output(self):
self.check_output(atol=1e-3)
......@@ -86,6 +122,7 @@ class TestCastOpBf16ToFp32(OpTest):
}
self.op_type = 'cast'
self.__class__.no_need_check_grad = True
self.python_api = cast_wrapper
def test_check_output(self):
self.check_output()
......@@ -102,6 +139,7 @@ class TestCastOpFp32ToBf16(OpTest):
}
self.op_type = 'cast'
self.__class__.no_need_check_grad = True
self.python_api = cast_wrapper
def test_check_output(self):
self.check_output()
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -67,10 +67,10 @@ class TestChannelShuffleOp(OpTest):
self.format = "NCHW"
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestChannelLast(TestChannelShuffleOp):
......
......@@ -16,8 +16,8 @@ import unittest
import numpy as np
from decorator_helper import prog_scope
from eager_op_test import OpTest, skip_check_grad_ci
from gradient_checker import grad_check
from op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid as fluid
......@@ -36,6 +36,7 @@ import paddle.fluid.core as core
class TestCholeskyOp(OpTest):
def setUp(self):
self.op_type = "cholesky"
self.python_api = paddle.cholesky
self._input_shape = (2, 32, 32)
self._upper = True
self.init_config()
......
......@@ -20,7 +20,7 @@ import scipy
import scipy.linalg
sys.path.append("..")
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -121,6 +121,7 @@ class TestCholeskySolveOp(OpTest):
def setUp(self):
self.op_type = "cholesky_solve"
self.python_api = paddle.tensor.cholesky_solve
self.config()
if self.upper:
......
......@@ -15,15 +15,23 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
def einsum_wrapper(a, b):
if not isinstance(a, list):
a = [a]
return paddle._C_ops.einsum(a, b)
class TestEinsumBinary(OpTest):
def setUp(self):
paddle.enable_static()
self.op_type = "einsum"
self.python_api = einsum_wrapper
self.python_out_sig = ['Out']
self.disable = False
self.set_mandatory()
self.init_input()
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
def bcast(x, target_tensor):
......
......@@ -21,15 +21,10 @@ import paddle
import paddle.fluid.core as core
def fill_any_like_wrapper(x, value):
x.fill_(value)
return x
class TestFillAnyLikeOp(OpTest):
def setUp(self):
self.op_type = "fill_any_like"
self.python_api = fill_any_like_wrapper
self.python_api = paddle.full_like
self.dtype = np.int32
self.value = 0.0
self.init()
......@@ -56,7 +51,7 @@ class TestFillAnyLikeOpFloat32(TestFillAnyLikeOp):
class TestFillAnyLikeOpBfloat16(OpTest):
def setUp(self):
self.op_type = "fill_any_like"
self.python_api = fill_any_like_wrapper
self.python_api = paddle.full_like
self.dtype = np.uint16
self.value = 0.0
self.inputs = {'X': np.random.random((219, 232)).astype(np.float32)}
......@@ -90,7 +85,7 @@ class TestFillAnyLikeOpValue3(TestFillAnyLikeOp):
class TestFillAnyLikeOpType(TestFillAnyLikeOp):
def setUp(self):
self.op_type = "fill_any_like"
self.python_api = fill_any_like_wrapper
self.python_api = paddle.full_like
self.dtype = np.int32
self.value = 0.0
self.init()
......
......@@ -15,14 +15,21 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
def fill_any_wrapper(x, value_float=0, value_int=0):
return paddle._legacy_C_ops.fill_any(
x, "value_float", value_float, "value_int", value_int
)
class TestFillAnyOp(OpTest):
def setUp(self):
self.op_type = "fill_any"
self.python_api = fill_any_wrapper
self.dtype = 'float64'
self.value = 0.0
self.init()
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python
from test_generate_proposals_op import box_coder, clip_tiled_boxes, nms
......@@ -223,11 +223,12 @@ class TestGenerateProposalsV2Op(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def setUp(self):
self.op_type = "generate_proposals_v2"
self.python_api = python_generate_proposals_v2
self.python_out_sig = ['Out']
self.set_data()
def init_test_params(self):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -28,10 +28,17 @@ def huber_loss_forward(val, delta):
return delta * (abs_val - 0.5 * delta)
def huber_loss_wraper(x, y, delta):
a, b = paddle._C_ops.huber_loss(x, y, delta)
return a, b
class TestHuberLossOp(OpTest):
def setUp(self):
self.op_type = 'huber_loss'
self.python_out_sig = ["Out"]
self.python_api = huber_loss_wraper
self.delta = 1.0
self.init_input()
shape = self.set_shape()
......@@ -53,10 +60,10 @@ class TestHuberLossOp(OpTest):
return (100, 1)
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=False)
self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
self.check_grad(
......
......@@ -24,6 +24,37 @@ from paddle.fluid.op import Operator
paddle.enable_static()
def lamb_wrapper(
param,
grad,
lr,
moment1,
moment2,
beta1Pow,
beta2Pow,
epsilon=1e-8,
beta1=0.9,
beta2=0.999,
weight_decay=0.01,
):
return paddle._C_ops.lamb_(
param,
grad,
lr,
moment1,
moment2,
beta1Pow,
beta2Pow,
None,
None,
weight_decay,
beta1,
beta2,
epsilon,
False,
)
class TestLambOp1(OpTest):
def set_attrs(self):
self.attrs = {
......@@ -43,6 +74,10 @@ class TestLambOp1(OpTest):
learning_rate = 0.001
self.set_attrs()
self.python_api = lamb_wrapper
self.python_out_sig = ['Out']
beta1_pow = self.attrs['beta1']
beta2_pow = self.attrs['beta2']
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
......@@ -27,6 +27,8 @@ class TestReshapeOp(OpTest):
def setUp(self):
self.init_data()
self.op_type = "reshape2"
self.python_api = paddle.tensor.reshape
self.python_out_sig = ['Out']
self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")}
self.attrs = {"shape": self.new_shape}
self.outputs = {
......@@ -71,6 +73,8 @@ class TestReshapeBF16Op(OpTest):
def setUp(self):
self.init_data()
self.op_type = "reshape2"
self.python_api = paddle.tensor.reshape
self.python_out_sig = ['Out']
self.dtype = np.uint16
x = np.random.random(self.ori_shape).astype("float32")
out = x.reshape(self.infered_shape)
......@@ -114,6 +118,8 @@ class TestReshapeOpWithInputShape(OpTest):
def setUp(self):
self.init_data()
self.op_type = "reshape2"
self.python_api = paddle.tensor.reshape
self.python_out_sig = ['Out']
self.inputs = {
"X": np.random.random(self.ori_shape).astype("float32"),
......@@ -142,6 +148,8 @@ class TestReshapeOp_attr_ShapeTensor(OpTest):
def setUp(self):
self.init_data()
self.op_type = "reshape2"
self.python_api = paddle.tensor.reshape
self.python_out_sig = ['Out']
shape_tensor = []
for index, ele in enumerate(self.new_shape):
......@@ -193,6 +201,8 @@ class TestReshapeOp_attr_OnlyShape(OpTest):
def setUp(self):
self.init_data()
self.op_type = "reshape2"
self.python_api = paddle.tensor.reshape
self.python_out_sig = ['Out']
self.inputs = {
"X": np.random.random(self.ori_shape).astype("float32"),
......@@ -240,6 +250,8 @@ class TestReshapeInt8Op(OpTest):
self.use_mkldnn = True
self._cpu_only = True
self.op_type = "reshape2"
self.python_api = paddle.tensor.reshape
self.python_out_sig = ['Out']
input = np.random.randint(0, 127, self.ori_shape).astype(self.dtype)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)}
self.attrs = {
......@@ -261,7 +273,9 @@ class TestReshapeInt8Op(OpTest):
def test_check_output(self):
self.check_output_with_place(
fluid.core.CPUPlace(), atol=1e-5, no_check_set=['XShape']
fluid.core.CPUPlace(),
atol=1e-5,
no_check_set=['XShape'],
)
def test_check_grad(self):
......@@ -278,6 +292,8 @@ class TestReshapeOpBool(TestReshapeOp):
def setUp(self):
self.init_data()
self.op_type = "reshape2"
self.python_api = paddle.tensor.reshape
self.python_out_sig = ['Out']
self.inputs = {
"X": np.random.choice([True, False], size=self.ori_shape)
}
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from scipy.special import expit, logit
import paddle
......@@ -23,11 +23,19 @@ import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
def loss_wrapper(logit, label, normalize=False, ignore_index=-100):
out = paddle._C_ops.sigmoid_cross_entropy_with_logits(
logit, label, normalize, ignore_index
)
return out
class TestSigmoidCrossEntropyWithLogitsOp1(OpTest):
"""Test sigmoid_cross_entropy_with_logit_op with binary label"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = loss_wrapper
batch_size = 64
num_classes = 20
self.inputs = {
......@@ -50,10 +58,10 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest):
self.outputs = {'Out': -term1 - term2}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False)
self.check_grad(['X'], 'Out')
class TestSigmoidCrossEntropyWithLogitsOp2(OpTest):
......@@ -61,6 +69,7 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest):
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = loss_wrapper
batch_size = 64
num_classes = 20
ignore_index = -1
......@@ -88,10 +97,10 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False)
self.check_grad(['X'], 'Out')
class TestSigmoidCrossEntropyWithLogitsOp3(OpTest):
......@@ -99,6 +108,7 @@ class TestSigmoidCrossEntropyWithLogitsOp3(OpTest):
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = loss_wrapper
batch_size = 64
num_classes = 20
self.inputs = {
......@@ -121,15 +131,16 @@ class TestSigmoidCrossEntropyWithLogitsOp3(OpTest):
self.outputs = {'Out': -term1 - term2}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False)
self.check_grad(['X'], 'Out')
class TestSigmoidCrossEntropyWithNorm(OpTest):
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = loss_wrapper
batch_size = 64
num_classes = 20
ignore_index = -1
......@@ -156,10 +167,10 @@ class TestSigmoidCrossEntropyWithNorm(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False)
self.check_grad(['X'], 'Out')
class TestSigmoidCrossEntropyWithLogitsOp5(OpTest):
......@@ -167,6 +178,7 @@ class TestSigmoidCrossEntropyWithLogitsOp5(OpTest):
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = loss_wrapper
batch_size = [10, 10]
num_classes = 20
self.inputs = {
......@@ -189,15 +201,16 @@ class TestSigmoidCrossEntropyWithLogitsOp5(OpTest):
self.outputs = {'Out': -term1 - term2}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False)
self.check_grad(['X'], 'Out')
class TestSigmoidCrossEntropyWithNorm2(OpTest):
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = loss_wrapper
batch_size = [10, 10]
num_classes = 20
ignore_index = -1
......@@ -224,16 +237,17 @@ class TestSigmoidCrossEntropyWithNorm2(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False)
self.check_grad(['X'], 'Out')
class TestSigmoidCrossEntropyWithLogitsOp6(OpTest):
"""Test sigmoid_cross_entropy_with_logit_op with binary label"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = loss_wrapper
batch_size = [10, 10]
num_classes = 20
self.inputs = {
......@@ -256,10 +270,10 @@ class TestSigmoidCrossEntropyWithNorm2(OpTest):
self.outputs = {'Out': -term1 - term2}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False)
self.check_grad(['X'], 'Out')
class TestSigmoidCrossEntropyWithLogitsOpError(unittest.TestCase):
def test_errors(self):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
......@@ -103,6 +103,8 @@ class TestUnbindOp(OpTest):
self.outputs = {
'Out': [('out%d' % i, self.out[i]) for i in range(len(self.out))]
}
self.python_api = paddle.unbind
self.python_out_sig = ['out%d' % i for i in range(len(self.out))]
def get_dtype(self):
return "float64"
......@@ -192,6 +194,7 @@ class TestUnbindBF16Op(OpTest):
for i in range(len(self.out))
]
}
self.python_out_sig = ['out%d' % i for i in range(len(self.out))]
def get_dtype(self):
return np.uint16
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册