未验证 提交 b0ee022b 编写于 作者: 姜永久 提交者: GitHub

migrating dot/sign/fill/norm from old dynamic graph to new dynamic graph (#49895)

* check dygraph on for op tests

* reset eigh and modify prelu&sign

* update eager_op_test

* lint

* add more ops

* fix reduce

* modify reduce test

* reset reduce_op

* modify matmul test

* revert prelu
上级 47ddd36e
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -38,7 +38,7 @@ class DotOp(OpTest):
self.attrs = {}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
if core.is_compiled_with_rocm():
......@@ -46,10 +46,12 @@ class DotOp(OpTest):
['X', 'Y'],
'Out',
user_defined_grads=[self.inputs['Y'], self.inputs['X']],
check_eager=True,
)
else:
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
self.check_grad(
['X', 'Y'],
'Out',
)
def test_check_grad_ingore_x(self):
if core.is_compiled_with_rocm():
......@@ -58,11 +60,12 @@ class DotOp(OpTest):
'Out',
no_grad_set=set("X"),
user_defined_grads=[self.inputs['X']],
check_eager=True,
)
else:
self.check_grad(
['Y'], 'Out', no_grad_set=set("X"), check_eager=True
['Y'],
'Out',
no_grad_set=set("X"),
)
def test_check_grad_ingore_y(self):
......@@ -72,11 +75,12 @@ class DotOp(OpTest):
'Out',
no_grad_set=set('Y'),
user_defined_grads=[self.inputs['Y']],
check_eager=True,
)
else:
self.check_grad(
['X'], 'Out', no_grad_set=set('Y'), check_eager=True
['X'],
'Out',
no_grad_set=set('Y'),
)
def init_input_output(self):
......@@ -187,7 +191,7 @@ class TestComplexDotOp(OpTest):
self.grad_y = self.grad_out * np.conj(self.x)
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
......@@ -195,7 +199,6 @@ class TestComplexDotOp(OpTest):
'Out',
user_defined_grads=[self.grad_x, self.grad_y],
user_defined_grad_outputs=[self.grad_out],
check_eager=True,
)
def test_check_grad_ingore_x(self):
......@@ -205,7 +208,6 @@ class TestComplexDotOp(OpTest):
no_grad_set=set("X"),
user_defined_grads=[self.grad_y],
user_defined_grad_outputs=[self.grad_out],
check_eager=True,
)
def test_check_grad_ingore_y(self):
......@@ -215,13 +217,13 @@ class TestComplexDotOp(OpTest):
no_grad_set=set('Y'),
user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out],
check_eager=True,
)
class TestComplexDotOp2D(OpTest):
def setUp(self):
self.op_type = "dot"
self.python_api = paddle.dot
self.init_base_dtype()
self.init_input_output()
self.init_grad_input_output()
......
......@@ -15,15 +15,21 @@
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid.core as core
def fill_any_like_wrapper(x, value):
x.fill_(value)
return x
class TestFillAnyLikeOp(OpTest):
def setUp(self):
self.op_type = "fill_any_like"
self.python_api = fill_any_like_wrapper
self.dtype = np.int32
self.value = 0.0
self.init()
......@@ -50,6 +56,7 @@ class TestFillAnyLikeOpFloat32(TestFillAnyLikeOp):
class TestFillAnyLikeOpBfloat16(OpTest):
def setUp(self):
self.op_type = "fill_any_like"
self.python_api = fill_any_like_wrapper
self.dtype = np.uint16
self.value = 0.0
self.inputs = {'X': np.random.random((219, 232)).astype(np.float32)}
......@@ -83,6 +90,7 @@ class TestFillAnyLikeOpValue3(TestFillAnyLikeOp):
class TestFillAnyLikeOpType(TestFillAnyLikeOp):
def setUp(self):
self.op_type = "fill_any_like"
self.python_api = fill_any_like_wrapper
self.dtype = np.int32
self.value = 0.0
self.init()
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
......@@ -24,11 +24,17 @@ from paddle.fluid import Program, program_guard
from paddle.fluid.op import Operator
def fill_wrapper(shape, value=0.0):
out = paddle.full(shape=shape, fill_value=value)
return out
# Situation 1: Attr(shape) is a list(without tensor)
class TestFillConstantOp1(OpTest):
def setUp(self):
'''Test fill_constant op with specified value'''
self.op_type = "fill_constant"
self.python_api = fill_wrapper
self.inputs = {}
self.attrs = {'shape': [123, 92], 'value': 3.8}
......@@ -42,6 +48,7 @@ class TestFillConstantOp2(OpTest):
def setUp(self):
'''Test fill_constant op with default value'''
self.op_type = "fill_constant"
self.python_api = fill_wrapper
self.inputs = {}
self.attrs = {'shape': [123, 92]}
......@@ -55,6 +62,7 @@ class TestFillConstantOp3(OpTest):
def setUp(self):
'''Test fill_constant op with specified int64 value'''
self.op_type = "fill_constant"
self.python_api = fill_wrapper
self.inputs = {}
self.attrs = {'shape': [123, 92], 'value': 10000000000}
......@@ -68,6 +76,7 @@ class TestFillConstantOp4(OpTest):
def setUp(self):
'''Test fill_constant op with specified int value'''
self.op_type = "fill_constant"
self.python_api = fill_wrapper
self.inputs = {}
self.attrs = {'shape': [123, 92], 'value': 3}
......@@ -84,6 +93,7 @@ class TestFillConstantBF16Op(OpTest):
def setUp(self):
'''Test fill_constant op with specified value'''
self.op_type = "fill_constant"
self.python_api = fill_wrapper
self.dtype = np.uint16
self.inputs = {}
self.attrs = {
......@@ -130,6 +140,7 @@ class TestFillConstantOp1_ShapeTensorList(OpTest):
def setUp(self):
'''Test fill_constant op with specified value'''
self.op_type = "fill_constant"
self.python_api = fill_wrapper
self.init_data()
shape_tensor_list = []
for index, ele in enumerate(self.shape):
......@@ -154,6 +165,7 @@ class TestFillConstantOp2_ShapeTensorList(OpTest):
def setUp(self):
'''Test fill_constant op with default value'''
self.op_type = "fill_constant"
self.python_api = fill_wrapper
self.init_data()
shape_tensor_list = []
for index, ele in enumerate(self.shape):
......@@ -192,6 +204,7 @@ class TestFillConstantOp1_ShapeTensor(OpTest):
def setUp(self):
'''Test fill_constant op with specified value'''
self.op_type = "fill_constant"
self.python_api = fill_wrapper
self.init_data()
self.inputs = {"ShapeTensor": np.array(self.shape).astype("int32")}
......@@ -211,6 +224,7 @@ class TestFillConstantOp1_ValueTensor(OpTest):
def setUp(self):
'''Test fill_constant op with specified value'''
self.op_type = "fill_constant"
self.python_api = fill_wrapper
self.init_data()
self.inputs = {
......@@ -234,6 +248,7 @@ class TestFillConstantOp2_ValueTensor(OpTest):
def setUp(self):
'''Test fill_constant op with specified value'''
self.op_type = "fill_constant"
self.python_api = fill_wrapper
self.init_data()
self.inputs = {
......@@ -452,6 +467,7 @@ class TestFillConstantOp_ValueTensorBf16(OpTest):
def setUp(self):
'''Test fill_constant op with specified value'''
self.op_type = "fill_constant"
self.python_api = fill_wrapper
self.init_data()
self.inputs = {
......@@ -470,7 +486,8 @@ class TestFillConstantOp_ValueTensorBf16(OpTest):
self.mkldnn_data_type = "bfloat16"
def test_check_output(self):
self.check_output_with_place(core.CPUPlace())
# no dynamic graph test for mkldnn
self.check_output_with_place(core.CPUPlace(), check_dygraph=False)
if __name__ == "__main__":
......
......@@ -15,7 +15,9 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from paddle.nn import functional as F
def sigmoid_array(x):
......@@ -25,6 +27,7 @@ def sigmoid_array(x):
class TestLogLossOp(OpTest):
def setUp(self):
self.op_type = 'log_loss'
self.python_api = F.log_loss
samples_num = 100
x = np.random.random((samples_num, 1)).astype("float32")
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -23,6 +23,7 @@ import paddle
class TestLogspaceOpCommonCase(OpTest):
def setUp(self):
self.op_type = "logspace"
self.python_api = paddle.logspace
dtype = 'float32'
self.inputs = {
'Start': np.array([0]).astype(dtype),
......@@ -41,6 +42,7 @@ class TestLogspaceOpCommonCase(OpTest):
class TestLogspaceOpReverseCase(OpTest):
def setUp(self):
self.op_type = "logspace"
self.python_api = paddle.logspace
dtype = 'float32'
self.inputs = {
'Start': np.array([10]).astype(dtype),
......@@ -59,6 +61,7 @@ class TestLogspaceOpReverseCase(OpTest):
class TestLogspaceOpNumOneCase(OpTest):
def setUp(self):
self.op_type = "logspace"
self.python_api = paddle.logspace
dtype = 'float32'
self.inputs = {
'Start': np.array([10]).astype(dtype),
......@@ -77,6 +80,7 @@ class TestLogspaceOpNumOneCase(OpTest):
class TestLogspaceOpMinusBaseCase(OpTest):
def setUp(self):
self.op_type = "logspace"
self.python_api = paddle.logspace
dtype = 'float32'
self.inputs = {
'Start': np.array([0]).astype(dtype),
......@@ -95,6 +99,7 @@ class TestLogspaceOpMinusBaseCase(OpTest):
class TestLogspaceOpZeroBaseCase(OpTest):
def setUp(self):
self.op_type = "logspace"
self.python_api = paddle.logspace
dtype = 'float32'
self.inputs = {
'Start': np.array([0]).astype(dtype),
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
from eager_op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid as fluid
......@@ -57,10 +57,10 @@ class TestLookupTableOp(OpTest):
return "int64"
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['W'], 'Out', no_grad_set=set('Ids'), check_eager=True)
self.check_grad(['W'], 'Out', no_grad_set=set('Ids'))
class TestLookupTableOpInt16(OpTest):
......@@ -81,6 +81,7 @@ class TestLookupTableOpUInt8(OpTest):
class TestLookupTableOpWithTensorIds(OpTest):
def setUp(self):
self.op_type = "lookup_table_v2"
self.python_api = paddle.nn.functional.embedding
table = np.random.random((17, 31)).astype("float64")
ids = np.random.randint(low=0, high=17, size=(2, 4, 5)).astype("int32")
self.inputs = {'W': table, 'Ids': ids}
......
......@@ -15,12 +15,12 @@
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16, get_numeric_gradient
from eager_op_test import OpTest, convert_float_to_uint16, get_numeric_gradient
from testsuite import create_op
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.testsuite import create_op
def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
......@@ -72,6 +72,7 @@ class TestMatMulV2Op(OpTest):
self.init_kernel_type()
self.config()
self.op_type = "matmul_v2"
self.python_api = paddle.tensor.matmul
if self.is_bfloat16_op():
x = np.random.random(self.x_shape).astype(np.float32)
y = np.random.random(self.y_shape).astype(np.float32)
......@@ -102,15 +103,13 @@ class TestMatMulV2Op(OpTest):
self.outputs = {'Out': result}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad(self):
if core.is_compiled_with_rocm():
self.check_grad(
['X', 'Y'], 'Out', max_relative_error=1e-2, check_eager=False
)
self.check_grad(['X', 'Y'], 'Out', max_relative_error=1e-2)
else:
self.check_grad(['X', 'Y'], 'Out', check_eager=False)
self.check_grad(['X', 'Y'], 'Out')
class TestMatMulOp2(TestMatMulV2Op):
......@@ -344,9 +343,7 @@ def create_test_fp16_class(parent, atol=0.001, max_relative_error=1.0):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(
place, atol=atol, check_eager=False
)
self.check_output_with_place(place, atol=atol)
def test_check_grad(self):
place = core.CUDAPlace(0)
......@@ -356,7 +353,6 @@ def create_test_fp16_class(parent, atol=0.001, max_relative_error=1.0):
['X', 'Y'],
'Out',
max_relative_error=max_relative_error,
check_eager=False,
)
cls_name = "{0}_{1}".format(parent.__name__, "Fp16")
......@@ -562,6 +558,7 @@ class TestMatMulV2API(unittest.TestCase):
class TestComplexMatMulOp(OpTest):
def setUp(self):
self.op_type = "matmul_v2"
self.python_api = paddle.tensor.matmul
self.init_base_dtype()
self.init_input_output()
self.init_grad_input_output()
......@@ -593,7 +590,7 @@ class TestComplexMatMulOp(OpTest):
self.grad_y = np.matmul(np.conj(self.x).T, self.grad_out)
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
......@@ -601,7 +598,6 @@ class TestComplexMatMulOp(OpTest):
'Out',
user_defined_grads=[self.grad_x, self.grad_y],
user_defined_grad_outputs=[self.grad_out],
check_eager=False,
)
def test_check_grad_ingore_x(self):
......@@ -611,7 +607,6 @@ class TestComplexMatMulOp(OpTest):
no_grad_set=set("X"),
user_defined_grads=[self.grad_y],
user_defined_grad_outputs=[self.grad_out],
check_eager=False,
)
def test_check_grad_ingore_y(self):
......@@ -621,13 +616,13 @@ class TestComplexMatMulOp(OpTest):
no_grad_set=set('Y'),
user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out],
check_eager=False,
)
class TestComplexMatMulOpBroadcast(OpTest):
def setUp(self):
self.op_type = "matmul_v2"
self.python_api = paddle.tensor.matmul
self.init_base_dtype()
self.init_input_output()
self.init_grad_input_output()
......@@ -661,7 +656,7 @@ class TestComplexMatMulOpBroadcast(OpTest):
)
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
......@@ -669,7 +664,6 @@ class TestComplexMatMulOpBroadcast(OpTest):
'Out',
user_defined_grads=[self.grad_x, self.grad_y],
user_defined_grad_outputs=[self.grad_out],
check_eager=False,
)
def test_check_grad_ingore_x(self):
......@@ -679,7 +673,6 @@ class TestComplexMatMulOpBroadcast(OpTest):
no_grad_set=set("X"),
user_defined_grads=[self.grad_y],
user_defined_grad_outputs=[self.grad_out],
check_eager=False,
)
def test_check_grad_ingore_y(self):
......@@ -689,7 +682,6 @@ class TestComplexMatMulOpBroadcast(OpTest):
no_grad_set=set('Y'),
user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out],
check_eager=False,
)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -32,6 +32,7 @@ class TestMatrixPowerOp(OpTest):
def setUp(self):
self.op_type = "matrix_power"
self.python_api = paddle.tensor.matrix_power
self.config()
np.random.seed(123)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
from eager_op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid as fluid
......@@ -29,10 +29,14 @@ def l2_norm(x, axis, epsilon):
return y, r
def norm_wrapper(x, axis=1, epsilon=1e-12, is_test=False):
return paddle.nn.functional.normalize(x, axis=axis, epsilon=epsilon)
class TestNormOp(OpTest):
def setUp(self):
self.op_type = "norm"
self.python_api = paddle.nn.functional.normalize
self.python_api = norm_wrapper
self.init_test_case()
self.init_dtype()
x = np.random.random(self.shape).astype(self.dtype)
......@@ -40,6 +44,7 @@ class TestNormOp(OpTest):
self.inputs = {'X': x}
self.attrs = {'epsilon': self.epsilon, 'axis': self.axis}
self.outputs = {'Out': y, 'Norm': norm}
self.python_out_sig = ['Out']
def test_check_output(self):
self.check_output()
......@@ -126,19 +131,22 @@ class TestNormOp7(TestNormOp):
class TestNormTestOp(OpTest):
def setUp(self):
self.op_type = "norm"
self.python_api = norm_wrapper
self.init_test_case()
x = np.random.random(self.shape).astype("float64")
y, norm = l2_norm(x, self.axis, self.epsilon)
self.inputs = {'X': x}
self.attrs = {
'epsilon': self.epsilon,
'axis': self.axis,
'axis': int(self.axis),
'is_test': True,
}
self.outputs = {'Out': y}
self.python_out_sig = ["out"]
def test_check_output(self):
self.check_output()
# dynamic graph just supports float tensor
self.check_output(check_dygraph=True)
def test_check_grad(self):
pass
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -24,6 +24,7 @@ import paddle.fluid as fluid
class TestNumelOp(OpTest):
def setUp(self):
self.op_type = "size"
self.python_api = paddle.numel
self.init()
x = np.random.random((self.shape)).astype("float64")
self.inputs = {
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -23,9 +23,15 @@ import paddle.fluid.core as core
from paddle.fluid.framework import Program, program_guard
def one_hot_wrapper(x, depth_tensor, **keargs):
return paddle.nn.functional.one_hot(x, depth_tensor)
class TestOneHotOp(OpTest):
def setUp(self):
self.op_type = 'one_hot_v2'
self.python_api = one_hot_wrapper
self.python_out_sig = ['Out']
depth = 10
depth_np = np.array(10).astype('int32')
dimension = 12
......@@ -49,6 +55,7 @@ class TestOneHotOp(OpTest):
class TestOneHotOp_attr(OpTest):
def setUp(self):
self.op_type = 'one_hot_v2'
self.python_api = one_hot_wrapper
depth = 10
dimension = 12
x_lod = [[4, 1, 3, 3]]
......@@ -73,6 +80,7 @@ class TestOneHotOp_attr(OpTest):
class TestOneHotOp_default_dtype(OpTest):
def setUp(self):
self.op_type = 'one_hot_v2'
self.python_api = one_hot_wrapper
depth = 10
depth_np = np.array(10).astype('int32')
dimension = 12
......@@ -96,6 +104,7 @@ class TestOneHotOp_default_dtype(OpTest):
class TestOneHotOp_default_dtype_attr(OpTest):
def setUp(self):
self.op_type = 'one_hot_v2'
self.python_api = one_hot_wrapper
depth = 10
dimension = 12
x_lod = [[4, 1, 3, 3]]
......
......@@ -15,11 +15,14 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
def common_setup(self, index_num, nshards, shard_id, ignore_value):
self.op_type = 'shard_index'
self.python_api = paddle.tensor.shard_index
x_lod = [[i for i in range(10)]]
N = sum(x_lod[0])
x = [np.random.randint(0, index_num - 1) for i in range(N)]
......
......@@ -28,6 +28,7 @@ from paddle.fluid import Program, program_guard
class TestSignOp(OpTest):
def setUp(self):
self.op_type = "sign"
self.python_api = paddle.sign
self.inputs = {
'X': np.random.uniform(-10, 10, (10, 10)).astype("float64")
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册