未验证 提交 e6ca78c2 编写于 作者: W wanghuancoder 提交者: GitHub

Del old dygraph optest2 (#51458)

* delete old dygraph op test
上级 48090c72
...@@ -437,6 +437,7 @@ class TestMKLDNNExpOp(TestActivation): ...@@ -437,6 +437,7 @@ class TestMKLDNNExpOp(TestActivation):
# Check if primitives already exist in backward # Check if primitives already exist in backward
class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase): class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase):
def setUp(self): def setUp(self):
paddle.enable_static()
super().setUp() super().setUp()
np.random.seed(123) np.random.seed(123)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -44,10 +44,10 @@ class TestAtan2(OpTest): ...@@ -44,10 +44,10 @@ class TestAtan2(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X1', 'X2'], 'Out', check_eager=True) self.check_grad(['X1', 'X2'], 'Out')
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def init_dtype(self): def init_dtype(self):
self.dtype = np.float64 self.dtype = np.float64
...@@ -67,7 +67,6 @@ class TestAtan2_float(TestAtan2): ...@@ -67,7 +67,6 @@ class TestAtan2_float(TestAtan2):
self.inputs['X2'], self.inputs['X2'],
1 / self.inputs['X1'].size, 1 / self.inputs['X1'].size,
), ),
check_eager=True,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_fusion_lstm_op import ACTIVATION, fc from test_fusion_lstm_op import ACTIVATION, fc
from test_softmax_op import stable_softmax from test_softmax_op import stable_softmax
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -65,7 +65,7 @@ class TestAucOp(OpTest): ...@@ -65,7 +65,7 @@ class TestAucOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
class TestGlobalAucOp(OpTest): class TestGlobalAucOp(OpTest):
...@@ -105,7 +105,7 @@ class TestGlobalAucOp(OpTest): ...@@ -105,7 +105,7 @@ class TestGlobalAucOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
class TestAucAPI(unittest.TestCase): class TestAucAPI(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from paddle.fluid import metrics from paddle.fluid import metrics
...@@ -66,7 +66,7 @@ class TestAucSinglePredOp(OpTest): ...@@ -66,7 +66,7 @@ class TestAucSinglePredOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
class TestAucGlobalSinglePredOp(OpTest): class TestAucGlobalSinglePredOp(OpTest):
...@@ -109,7 +109,7 @@ class TestAucGlobalSinglePredOp(OpTest): ...@@ -109,7 +109,7 @@ class TestAucGlobalSinglePredOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid.core as core import paddle.fluid.core as core
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, _set_use_system_allocator from eager_op_test import OpTest, _set_use_system_allocator
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -135,8 +135,6 @@ class TestBicubicInterpOp(OpTest): ...@@ -135,8 +135,6 @@ class TestBicubicInterpOp(OpTest):
self.init_test_case() self.init_test_case()
self.op_type = "bicubic_interp" self.op_type = "bicubic_interp"
# NOTE(dev): some AsDispensible input is not used under imperative mode. # NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float64") input_np = np.random.random(self.input_shape).astype("float64")
if self.data_layout == "NCHW": if self.data_layout == "NCHW":
...@@ -165,10 +163,8 @@ class TestBicubicInterpOp(OpTest): ...@@ -165,10 +163,8 @@ class TestBicubicInterpOp(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None: if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
...@@ -181,12 +177,10 @@ class TestBicubicInterpOp(OpTest): ...@@ -181,12 +177,10 @@ class TestBicubicInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=self.check_eager) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Out', in_place=True)
['X'], 'Out', in_place=True, check_eager=self.check_eager
)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'bicubic' self.interp_method = 'bicubic'
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -186,8 +186,6 @@ class TestBicubicInterpOp(OpTest): ...@@ -186,8 +186,6 @@ class TestBicubicInterpOp(OpTest):
self.init_test_case() self.init_test_case()
self.op_type = "bicubic_interp_v2" self.op_type = "bicubic_interp_v2"
# NOTE(dev): some AsDispensible input is not used under imperative mode. # NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float64") input_np = np.random.random(self.input_shape).astype("float64")
scale_h = 0 scale_h = 0
scale_w = 0 scale_w = 0
...@@ -227,10 +225,8 @@ class TestBicubicInterpOp(OpTest): ...@@ -227,10 +225,8 @@ class TestBicubicInterpOp(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None: if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
self.attrs = { self.attrs = {
'out_h': self.out_h, 'out_h': self.out_h,
...@@ -249,12 +245,10 @@ class TestBicubicInterpOp(OpTest): ...@@ -249,12 +245,10 @@ class TestBicubicInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=self.check_eager) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Out', in_place=True)
['X'], 'Out', in_place=True, check_eager=self.check_eager
)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'bicubic' self.interp_method = 'bicubic'
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
...@@ -192,26 +192,27 @@ class TestBilateralSliceOp1(TestBilateralSliceOp): ...@@ -192,26 +192,27 @@ class TestBilateralSliceOp1(TestBilateralSliceOp):
class TestBilateralSliceApi(unittest.TestCase): class TestBilateralSliceApi(unittest.TestCase):
def test_api(self): def test_api(self):
x = paddle.fluid.data( with paddle_static_guard():
name='x', shape=[None, 3, 25, 15], dtype='float32' x = paddle.fluid.data(
) name='x', shape=[None, 3, 25, 15], dtype='float32'
guide = paddle.fluid.data( )
name='guide', shape=[None, 25, 15], dtype='float32' guide = paddle.fluid.data(
) name='guide', shape=[None, 25, 15], dtype='float32'
grid = paddle.fluid.data( )
name='grid', shape=[None, None, 8, 5, 3], dtype='float32' grid = paddle.fluid.data(
) name='grid', shape=[None, None, 8, 5, 3], dtype='float32'
paddle.fluid.contrib.layers.bilateral_slice(x, guide, grid, False) )
paddle.fluid.contrib.layers.bilateral_slice(x, guide, grid, False)
if not paddle.fluid.is_compiled_with_cuda():
return if not paddle.fluid.is_compiled_with_cuda():
return
with paddle.fluid.dygraph.guard():
x1 = paddle.rand([3, 1, 50, 30]) with paddle.fluid.dygraph.guard():
guide1 = paddle.rand([3, 50, 30]) x1 = paddle.rand([3, 1, 50, 30])
grid1 = paddle.rand([3, 2, 2, 5, 3]) guide1 = paddle.rand([3, 50, 30])
grid1 = paddle.rand([3, 2, 2, 5, 3])
paddle.fluid.contrib.bilateral_slice(x1, guide1, grid1, False)
paddle.fluid.contrib.bilateral_slice(x1, guide1, grid1, False)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -219,10 +219,10 @@ class TestBilinearInterpOp(OpTest): ...@@ -219,10 +219,10 @@ class TestBilinearInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True, check_eager=True) self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'bilinear' self.interp_method = 'bilinear'
...@@ -409,9 +409,7 @@ class TestBilinearInterpOpUint8(OpTest): ...@@ -409,9 +409,7 @@ class TestBilinearInterpOpUint8(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(place=core.CPUPlace(), atol=1)
place=core.CPUPlace(), atol=1, check_eager=True
)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'bilinear' self.interp_method = 'bilinear'
...@@ -585,10 +583,10 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -585,10 +583,10 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True, check_eager=True) self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'bilinear' self.interp_method = 'bilinear'
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -23,28 +23,29 @@ import paddle.fluid as fluid ...@@ -23,28 +23,29 @@ import paddle.fluid as fluid
class TestDygraphBilinearTensorProductAPIError(unittest.TestCase): class TestDygraphBilinearTensorProductAPIError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()): with paddle_static_guard():
layer = paddle.nn.Bilinear(5, 4, 1000) with fluid.program_guard(fluid.Program(), fluid.Program()):
# the input must be Variable. layer = paddle.nn.Bilinear(5, 4, 1000)
x0 = fluid.create_lod_tensor( # the input must be Variable.
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() x0 = fluid.create_lod_tensor(
) np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace()
self.assertRaises(TypeError, layer, x0) )
# the input dtype must be float32 or float64 self.assertRaises(TypeError, layer, x0)
x1 = fluid.data(name='x1', shape=[-1, 5], dtype="float16") # the input dtype must be float32 or float64
x2 = fluid.data(name='x2', shape=[-1, 4], dtype="float32") x1 = fluid.data(name='x1', shape=[-1, 5], dtype="float16")
self.assertRaises(TypeError, layer, x1, x2) x2 = fluid.data(name='x2', shape=[-1, 4], dtype="float32")
# the dimensions of x and y must be 2 self.assertRaises(TypeError, layer, x1, x2)
paddle.enable_static() # the dimensions of x and y must be 2
x3 = paddle.static.data("", shape=[0], dtype="float32") paddle.enable_static()
x4 = paddle.static.data("", shape=[0], dtype="float32") x3 = paddle.static.data("", shape=[0], dtype="float32")
self.assertRaises( x4 = paddle.static.data("", shape=[0], dtype="float32")
ValueError, self.assertRaises(
paddle.static.nn.bilinear_tensor_product, ValueError,
x3, paddle.static.nn.bilinear_tensor_product,
x4, x3,
1000, x4,
) 1000,
)
class TestBilinearTensorProductOp(OpTest): class TestBilinearTensorProductOp(OpTest):
...@@ -73,10 +74,10 @@ class TestBilinearTensorProductOp(OpTest): ...@@ -73,10 +74,10 @@ class TestBilinearTensorProductOp(OpTest):
self.outputs = {'Out': output + bias} self.outputs = {'Out': output + bias}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y', 'Weight', 'Bias'], 'Out', check_eager=True) self.check_grad(['X', 'Y', 'Weight', 'Bias'], 'Out')
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,7 +17,7 @@ import tempfile ...@@ -17,7 +17,7 @@ import tempfile
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -150,7 +150,7 @@ class TestBincountOp(OpTest): ...@@ -150,7 +150,7 @@ class TestBincountOp(OpTest):
self.Out = np.bincount(self.np_input, minlength=self.minlength) self.Out = np.bincount(self.np_input, minlength=self.minlength)
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output()
class TestCase1(TestBincountOp): class TestCase1(TestBincountOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def bipartite_match(distance, match_indices, match_dist): def bipartite_match(distance, match_indices, match_dist):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -32,31 +32,33 @@ class TestBmmOp(OpTest): ...@@ -32,31 +32,33 @@ class TestBmmOp(OpTest):
self.outputs = {'Out': Out} self.outputs = {'Out': Out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_checkout_grad(self): def test_checkout_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True) self.check_grad(['X', 'Y'], 'Out')
class API_TestBmm(unittest.TestCase): class API_TestBmm(unittest.TestCase):
def test_out(self): def test_out(self):
with fluid.program_guard(fluid.Program(), fluid.Program()): with paddle_static_guard():
data1 = paddle.static.data( with fluid.program_guard(fluid.Program(), fluid.Program()):
'data1', shape=[-1, 3, 4], dtype='float64' data1 = paddle.static.data(
) 'data1', shape=[-1, 3, 4], dtype='float64'
data2 = paddle.static.data( )
'data2', shape=[-1, 4, 5], dtype='float64' data2 = paddle.static.data(
) 'data2', shape=[-1, 4, 5], dtype='float64'
result_bmm = paddle.bmm(data1, data2) )
place = fluid.CPUPlace() result_bmm = paddle.bmm(data1, data2)
exe = fluid.Executor(place) place = fluid.CPUPlace()
input1 = np.random.random([10, 3, 4]).astype('float64') exe = fluid.Executor(place)
input2 = np.random.random([10, 4, 5]).astype('float64') input1 = np.random.random([10, 3, 4]).astype('float64')
(result,) = exe.run( input2 = np.random.random([10, 4, 5]).astype('float64')
feed={"data1": input1, "data2": input2}, fetch_list=[result_bmm] (result,) = exe.run(
) feed={"data1": input1, "data2": input2},
expected_result = np.matmul(input1, input2) fetch_list=[result_bmm],
np.testing.assert_allclose(expected_result, result, rtol=1e-05) )
expected_result = np.matmul(input1, input2)
np.testing.assert_allclose(expected_result, result, rtol=1e-05)
class API_TestDygraphBmm(unittest.TestCase): class API_TestDygraphBmm(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def box_clip(input_box, im_info, output_box): def box_clip(input_box, im_info, output_box):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册