未验证 提交 d0fefa23 编写于 作者: 姜永久 提交者: GitHub

rm unittests eager guard tests part11 imperative_layer2ocr (#48828)

* rm unittests eager guard tests part11 imperative_layer2ocr

* review
上级 1f93de31
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn as nn import paddle.nn as nn
from paddle.fluid.framework import _test_eager_guard
class LeNetDygraph(fluid.dygraph.Layer): class LeNetDygraph(fluid.dygraph.Layer):
...@@ -74,7 +73,7 @@ def init_weights(layer): ...@@ -74,7 +73,7 @@ def init_weights(layer):
class TestLayerApply(unittest.TestCase): class TestLayerApply(unittest.TestCase):
def func_apply_init_weight(self): def test_apply_init_weight(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
net = LeNetDygraph() net = LeNetDygraph()
...@@ -88,11 +87,6 @@ class TestLayerApply(unittest.TestCase): ...@@ -88,11 +87,6 @@ class TestLayerApply(unittest.TestCase):
np.testing.assert_allclose(layer.weight.numpy(), 0.7) np.testing.assert_allclose(layer.weight.numpy(), 0.7)
np.testing.assert_allclose(layer.bias.numpy(), -0.2) np.testing.assert_allclose(layer.bias.numpy(), -0.2)
def test_apply_init_weight(self):
with _test_eager_guard():
self.func_apply_init_weight()
self.func_apply_init_weight()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn as nn import paddle.nn as nn
from paddle.fluid.framework import _test_eager_guard
class LeNetDygraph(fluid.dygraph.Layer): class LeNetDygraph(fluid.dygraph.Layer):
...@@ -56,9 +55,8 @@ class TestLayerChildren(unittest.TestCase): ...@@ -56,9 +55,8 @@ class TestLayerChildren(unittest.TestCase):
return y1, y2 return y1, y2
def test_func_apply_init_weight(self): def test_func_apply_init_weight(self):
with _test_eager_guard(): paddle.seed(102)
paddle.seed(102) self.new_y1, self.new_y2 = self.func_apply_init_weight()
self.new_y1, self.new_y2 = self.func_apply_init_weight()
paddle.seed(102) paddle.seed(102)
self.ori_y1, self.ori_y2 = self.func_apply_init_weight() self.ori_y1, self.ori_y2 = self.func_apply_init_weight()
......
...@@ -19,11 +19,10 @@ import numpy as np ...@@ -19,11 +19,10 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.dygraph as dygraph import paddle.fluid.dygraph as dygraph
from paddle.fluid.framework import _test_eager_guard
class TestImperativeLayerTrainable(unittest.TestCase): class TestImperativeLayerTrainable(unittest.TestCase):
def func_set_trainable(self): def test_set_trainable(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
label = np.random.uniform(-1, 1, [10, 10]).astype(np.float32) label = np.random.uniform(-1, 1, [10, 10]).astype(np.float32)
...@@ -45,11 +44,6 @@ class TestImperativeLayerTrainable(unittest.TestCase): ...@@ -45,11 +44,6 @@ class TestImperativeLayerTrainable(unittest.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
linear.weight.trainable = "1" linear.weight.trainable = "1"
def test_set_trainable(self):
with _test_eager_guard():
self.func_set_trainable()
self.func_set_trainable()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -15,11 +15,10 @@ ...@@ -15,11 +15,10 @@
import unittest import unittest
import paddle.nn as nn import paddle.nn as nn
from paddle.fluid.framework import _test_eager_guard
class TestLayerPrint(unittest.TestCase): class TestLayerPrint(unittest.TestCase):
def func_test_layer_str(self): def test_layer_str(self):
module = nn.ELU(0.2) module = nn.ELU(0.2)
self.assertEqual(str(module), 'ELU(alpha=0.2)') self.assertEqual(str(module), 'ELU(alpha=0.2)')
...@@ -385,11 +384,6 @@ class TestLayerPrint(unittest.TestCase): ...@@ -385,11 +384,6 @@ class TestLayerPrint(unittest.TestCase):
'(6): GELU(approximate=True)\n)', '(6): GELU(approximate=True)\n)',
) )
def test_layer_str(self):
with _test_eager_guard():
self.func_test_layer_str()
self.func_test_layer_str()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -22,7 +22,6 @@ import paddle ...@@ -22,7 +22,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
...@@ -80,7 +79,7 @@ class SimpleNet(fluid.Layer): ...@@ -80,7 +79,7 @@ class SimpleNet(fluid.Layer):
class TestDygraphSimpleNet(unittest.TestCase): class TestDygraphSimpleNet(unittest.TestCase):
def func_simple_net(self): def test_simple_net(self):
for is_sparse in [True, False]: for is_sparse in [True, False]:
dtype_list = ["float32"] dtype_list = ["float32"]
if not core.is_compiled_with_rocm(): if not core.is_compiled_with_rocm():
...@@ -88,11 +87,6 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -88,11 +87,6 @@ class TestDygraphSimpleNet(unittest.TestCase):
for dtype in dtype_list: for dtype in dtype_list:
self.simple_net_float32(is_sparse, dtype) self.simple_net_float32(is_sparse, dtype)
def test_simple_net(self):
with _test_eager_guard():
self.func_simple_net()
self.func_simple_net()
def simple_net_float32(self, is_sparse, dtype): def simple_net_float32(self, is_sparse, dtype):
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
......
...@@ -21,7 +21,6 @@ from utils import DyGraphProgramDescTracerTestHelper ...@@ -21,7 +21,6 @@ from utils import DyGraphProgramDescTracerTestHelper
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
from paddle.nn import Linear from paddle.nn import Linear
...@@ -113,7 +112,7 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -113,7 +112,7 @@ class TestImperativeMnist(unittest.TestCase):
return _reader_imple return _reader_imple
def func_test_mnist_float32(self): def test_mnist_float32(self):
seed = 90 seed = 90
epoch_num = 1 epoch_num = 1
batch_size = 128 batch_size = 128
...@@ -269,11 +268,6 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -269,11 +268,6 @@ class TestImperativeMnist(unittest.TestCase):
value, dy_param_value[key], rtol=1e-05, atol=1e-05 value, dy_param_value[key], rtol=1e-05, atol=1e-05
) )
def test_mnist_float32(self):
with _test_eager_guard():
self.func_test_mnist_float32()
self.func_test_mnist_float32()
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
...@@ -22,12 +22,11 @@ import paddle ...@@ -22,12 +22,11 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
class TestImperativeMnistSortGradient(unittest.TestCase): class TestImperativeMnistSortGradient(unittest.TestCase):
def func_test_mnist_sort_gradient_float32(self): def test_mnist_sort_gradient_float32(self):
seed = 90 seed = 90
epoch_num = 1 epoch_num = 1
...@@ -168,11 +167,6 @@ class TestImperativeMnistSortGradient(unittest.TestCase): ...@@ -168,11 +167,6 @@ class TestImperativeMnistSortGradient(unittest.TestCase):
value, dy_param_value2[key], rtol=1e-05, atol=1e-05 value, dy_param_value2[key], rtol=1e-05, atol=1e-05
) )
def test_mnist_sort_gradient_float32(self):
with _test_eager_guard():
self.func_test_mnist_sort_gradient_float32()
self.func_test_mnist_sort_gradient_float32()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
class MyLayer(fluid.Layer): class MyLayer(fluid.Layer):
...@@ -34,7 +33,7 @@ class MyLayer(fluid.Layer): ...@@ -34,7 +33,7 @@ class MyLayer(fluid.Layer):
class TestImperativeNamedSubLayers(unittest.TestCase): class TestImperativeNamedSubLayers(unittest.TestCase):
def func_test_named_sublayers(self): def test_named_sublayers(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fc1 = paddle.nn.Linear(10, 3) fc1 = paddle.nn.Linear(10, 3)
fc2 = paddle.nn.Linear(3, 10, bias_attr=False) fc2 = paddle.nn.Linear(3, 10, bias_attr=False)
...@@ -62,14 +61,9 @@ class TestImperativeNamedSubLayers(unittest.TestCase): ...@@ -62,14 +61,9 @@ class TestImperativeNamedSubLayers(unittest.TestCase):
[model] + expected_sublayers, [model] + expected_sublayers,
) )
def test_named_sublayers(self):
with _test_eager_guard():
self.func_test_named_sublayers()
self.func_test_named_sublayers()
class TestImperativeNamedParameters(unittest.TestCase): class TestImperativeNamedParameters(unittest.TestCase):
def func_test_named_parameters(self): def test_named_parameters(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fc1 = paddle.nn.Linear(10, 3) fc1 = paddle.nn.Linear(10, 3)
fc2 = paddle.nn.Linear(3, 10, bias_attr=False) fc2 = paddle.nn.Linear(3, 10, bias_attr=False)
...@@ -87,12 +81,7 @@ class TestImperativeNamedParameters(unittest.TestCase): ...@@ -87,12 +81,7 @@ class TestImperativeNamedParameters(unittest.TestCase):
self.assertListEqual(expected_named_parameters, named_parameters) self.assertListEqual(expected_named_parameters, named_parameters)
def test_named_parameters(self): def test_dir_layer(self):
with _test_eager_guard():
self.func_test_named_parameters()
self.func_test_named_parameters()
def func_test_dir_layer(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
class Mymodel(fluid.dygraph.Layer): class Mymodel(fluid.dygraph.Layer):
...@@ -140,11 +129,6 @@ class TestImperativeNamedParameters(unittest.TestCase): ...@@ -140,11 +129,6 @@ class TestImperativeNamedParameters(unittest.TestCase):
"model should contain parameter: weight", "model should contain parameter: weight",
) )
def test_dir_layer(self):
with _test_eager_guard():
self.func_test_dir_layer()
self.func_test_dir_layer()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -18,11 +18,11 @@ import warnings ...@@ -18,11 +18,11 @@ import warnings
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard from paddle.fluid.framework import _in_legacy_dygraph
class TestImperativeNumpyBridge(unittest.TestCase): class TestImperativeNumpyBridge(unittest.TestCase):
def func_tensor_from_numpy(self): def test_tensor_from_numpy(self):
data_np = np.array([[2, 3, 1]]).astype('float32') data_np = np.array([[2, 3, 1]]).astype('float32')
with fluid.dygraph.guard(fluid.CPUPlace()): with fluid.dygraph.guard(fluid.CPUPlace()):
with warnings.catch_warnings(record=True) as w: with warnings.catch_warnings(record=True) as w:
...@@ -52,11 +52,6 @@ class TestImperativeNumpyBridge(unittest.TestCase): ...@@ -52,11 +52,6 @@ class TestImperativeNumpyBridge(unittest.TestCase):
self.assertNotEqual(var2[0][0].numpy()[0], -1) self.assertNotEqual(var2[0][0].numpy()[0], -1)
self.assertFalse(np.array_equal(var2.numpy(), data_np)) self.assertFalse(np.array_equal(var2.numpy(), data_np))
def test_func_tensor_from_numpy(self):
with _test_eager_guard():
self.func_tensor_from_numpy()
self.func_tensor_from_numpy()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -21,7 +21,6 @@ import paddle ...@@ -21,7 +21,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import BatchNorm, Linear from paddle.nn import BatchNorm, Linear
...@@ -513,12 +512,11 @@ class TestDygraphOCRAttention(unittest.TestCase): ...@@ -513,12 +512,11 @@ class TestDygraphOCRAttention(unittest.TestCase):
dy_out, dy_param_init_value, dy_param_value = run_dygraph() dy_out, dy_param_init_value, dy_param_value = run_dygraph()
with fluid.dygraph.guard(): with fluid.dygraph.guard():
with _test_eager_guard(): (
( eager_out,
eager_out, eager_param_init_value,
eager_param_init_value, eager_param_value,
eager_param_value, ) = run_dygraph()
) = run_dygraph()
with new_program_scope(): with new_program_scope():
paddle.seed(seed) paddle.seed(seed)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册