未验证 提交 627eaa0f 编写于 作者: 姜永久 提交者: GitHub

rm unittests eager guard tests part9 histogram2imperative_dataloader (#48825)

* rm unittests eager guard tests part9 histogram2imperative_dataloader

* rm basic
上级 30674b0e
......@@ -20,7 +20,6 @@ from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestHistogramOpAPI(unittest.TestCase):
......@@ -59,14 +58,12 @@ class TestHistogramOpAPI(unittest.TestCase):
msg='histogram output is wrong, out =' + str(actual.numpy()),
)
with _test_eager_guard():
inputs_np = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int64)
inputs = paddle.to_tensor(inputs_np)
actual = paddle.histogram(inputs, bins=5, min=1, max=5)
self.assertTrue(
(actual.numpy() == expected).all(),
msg='histogram output is wrong, out ='
+ str(actual.numpy()),
msg='histogram output is wrong, out =' + str(actual.numpy()),
)
......
......@@ -18,7 +18,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Embedding
from paddle.tensor import random
......@@ -169,7 +168,7 @@ class MyLayer2(fluid.Layer):
class TestImperativeAutoPrune(unittest.TestCase):
def func_auto_prune(self):
def test_auto_prune(self):
with fluid.dygraph.guard():
case1 = AutoPruneLayer0(input_size=5)
value1 = np.arange(25).reshape(5, 5).astype("float32")
......@@ -181,12 +180,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
self.assertIsNotNone(case1.linear2.weight._grad_ivar())
self.assertIsNotNone(case1.linear1.weight._grad_ivar())
def test_auto_prune(self):
with _test_eager_guard():
self.func_auto_prune()
self.func_auto_prune()
def func_auto_prune2(self):
def test_auto_prune2(self):
with fluid.dygraph.guard():
case2 = AutoPruneLayer1(input_size=5)
value1 = np.arange(25).reshape(5, 5).astype("float32")
......@@ -199,13 +193,9 @@ class TestImperativeAutoPrune(unittest.TestCase):
self.assertIsNone(case2.linear2.weight._grad_ivar())
self.assertIsNotNone(case2.linear1.weight._grad_ivar())
def test_auto_prune2(self):
with _test_eager_guard():
self.func_auto_prune2()
self.func_auto_prune2()
# TODO(jiabin): Support this when we support better split tensor
def func_auto_prune3(self):
def test_auto_prune3(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with fluid.dygraph.guard():
case3 = AutoPruneLayer3(input_size=784)
value1 = np.arange(784).reshape(1, 784).astype("float32")
......@@ -216,15 +206,10 @@ class TestImperativeAutoPrune(unittest.TestCase):
loss.backward()
self.assertIsNotNone(case3.linear.weight._grad_ivar())
self.assertTrue((part2.gradient() == 0).all())
def test_auto_prune3(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_auto_prune3()
self.func_auto_prune3()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_auto_prune4(self):
def test_auto_prune4(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with fluid.dygraph.guard():
case4 = AutoPruneLayer3(input_size=784)
value1 = np.arange(784).reshape(1, 784).astype("float32")
......@@ -235,15 +220,10 @@ class TestImperativeAutoPrune(unittest.TestCase):
part2.backward()
self.assertIsNotNone(case4.linear.weight._grad_ivar())
self.assertTrue((part2.gradient() == 1).all())
def test_auto_prune4(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_auto_prune4()
self.func_auto_prune4()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_auto_prune5(self):
def test_auto_prune5(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with fluid.dygraph.guard():
case4 = AutoPruneLayer3(input_size=784)
value1 = np.arange(784).reshape(1, 784).astype("float32")
......@@ -254,15 +234,9 @@ class TestImperativeAutoPrune(unittest.TestCase):
part1.backward()
self.assertIsNotNone(case4.linear.weight._grad_ivar())
self.assertTrue((part2.gradient() == 0).all())
def test_auto_prune5(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with _test_eager_guard():
self.func_auto_prune5()
self.func_auto_prune5()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func_auto_prune6(self):
def test_auto_prune6(self):
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
......@@ -280,12 +254,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
self.assertIsNone(linear.weight.gradient())
self.assertIsNone(out1.gradient())
def test_auto_prune6(self):
with _test_eager_guard():
self.func_auto_prune6()
self.func_auto_prune6()
def func_auto_prune7(self):
def test_auto_prune7(self):
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
......@@ -303,12 +272,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
self.assertIsNone(linear.weight.gradient())
self.assertIsNone(out1.gradient())
def test_auto_prune7(self):
with _test_eager_guard():
self.func_auto_prune7()
self.func_auto_prune7()
def func_auto_prune8(self):
def test_auto_prune8(self):
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
......@@ -336,12 +300,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
np.array_equal(linear_origin, linear.weight.numpy())
)
def test_auto_prune8(self):
with _test_eager_guard():
self.func_auto_prune8()
self.func_auto_prune8()
def func_auto_prune9(self):
def test_auto_prune9(self):
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
......@@ -371,12 +330,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
except ValueError as e:
assert type(e) == ValueError
def test_auto_prune9(self):
with _test_eager_guard():
self.func_auto_prune9()
self.func_auto_prune9()
def func_auto_prune10(self):
def test_auto_prune10(self):
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
......@@ -396,12 +350,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
self.assertIsNone(linear.weight.gradient())
self.assertIsNone(out1.gradient())
def test_auto_prune10(self):
with _test_eager_guard():
self.func_auto_prune10()
self.func_auto_prune10()
def func_auto_prune_with_optimizer(self):
def test_auto_prune_with_optimizer(self):
vocab_size = 100
size = 20
batch_size = 16
......@@ -451,12 +400,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
assert model.embed1.weight._grad_ivar() is None
assert model.linear_1.weight._grad_ivar() is None
def test_auto_prune_with_optimizer(self):
with _test_eager_guard():
self.func_auto_prune_with_optimizer()
self.func_auto_prune_with_optimizer()
def func_case2_prune_no_grad_branch(self):
def test_case2_prune_no_grad_branch(self):
with fluid.dygraph.guard():
value1 = np.arange(784).reshape(1, 784)
value2 = np.arange(1).reshape(1, 1)
......@@ -468,12 +412,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
self.assertIsNone(case3.linear2.weight._grad_ivar())
self.assertIsNotNone(case3.linear.weight._grad_ivar())
def test_case2_prune_no_grad_branch(self):
with _test_eager_guard():
self.func_case2_prune_no_grad_branch()
self.func_case2_prune_no_grad_branch()
def func_case3_prune_no_grad_branch2(self):
def test_case3_prune_no_grad_branch2(self):
with fluid.dygraph.guard():
value1 = np.arange(1).reshape(1, 1)
linear = paddle.nn.Linear(1, 1)
......@@ -486,23 +425,13 @@ class TestImperativeAutoPrune(unittest.TestCase):
loss.backward()
self.assertIsNone(linear.weight._grad_ivar())
def test_case3_prune_no_grad_branch2(self):
with _test_eager_guard():
self.func_case3_prune_no_grad_branch2()
self.func_case3_prune_no_grad_branch2()
def func_case4_with_no_grad_op_maker(self):
def test_case4_with_no_grad_op_maker(self):
with fluid.dygraph.guard():
out = random.gaussian(shape=[20, 30])
loss = paddle.mean(out)
loss.backward()
self.assertIsNone(out._grad_ivar())
def test_case4_with_no_grad_op_maker(self):
with _test_eager_guard():
self.func_case4_with_no_grad_op_maker()
self.func_case4_with_no_grad_op_maker()
if __name__ == '__main__':
unittest.main()
......@@ -16,11 +16,10 @@ import unittest
from collections import OrderedDict
import paddle
from paddle.fluid.framework import _test_eager_guard
class TestLayerDict(unittest.TestCase):
def func_layer_dict(self):
def test_layer_dict(self):
layers = OrderedDict(
[
('conv1d', paddle.nn.Conv1D(3, 2, 3)),
......@@ -92,12 +91,7 @@ class TestLayerDict(unittest.TestCase):
layers_dicts.update(list_format_layers)
check_layer_dict()
def test_layer_dict(self):
with _test_eager_guard():
self.func_layer_dict()
self.func_layer_dict()
def func_layer_dict_error_inputs(self):
def test_layer_dict_error_inputs(self):
layers = [
('conv1d', paddle.nn.Conv1D(3, 2, 3), "conv1d"),
('conv2d', paddle.nn.Conv2D(3, 2, 3)),
......@@ -108,11 +102,6 @@ class TestLayerDict(unittest.TestCase):
self.assertRaises(AssertionError, layers_dicts.update, 1)
def test_layer_dict_error_inputs(self):
with _test_eager_guard():
self.func_layer_dict_error_inputs()
self.func_layer_dict_error_inputs()
if __name__ == '__main__':
unittest.main()
......@@ -18,7 +18,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
class MyLayer(fluid.Layer):
......@@ -94,15 +93,10 @@ class TestImperativeContainer(unittest.TestCase):
self.assertListEqual(res11.shape, [5, 4])
res11.backward()
def func_test_layer_list(self):
def test_test_layer_list(self):
self.layer_list(True)
self.layer_list(False)
def test_layer_list(self):
with _test_eager_guard():
self.func_test_layer_list()
self.func_test_layer_list()
if __name__ == '__main__':
unittest.main()
......@@ -19,7 +19,6 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle import _legacy_C_ops
from paddle.fluid.framework import _test_eager_guard
class MyLayer(fluid.Layer):
......@@ -68,9 +67,6 @@ class TestImperativeContainerParameterList(unittest.TestCase):
loss.backward()
def test_paramter_list(self):
with _test_eager_guard():
self.paramter_list(False)
self.paramter_list(True)
self.paramter_list(False)
self.paramter_list(True)
......
......@@ -18,12 +18,11 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear
class TestImperativeContainerSequential(unittest.TestCase):
def func_sequential(self):
def test_sequential(self):
data = np.random.uniform(-1, 1, [5, 10]).astype('float32')
with fluid.dygraph.guard():
data = fluid.dygraph.to_variable(data)
......@@ -57,12 +56,7 @@ class TestImperativeContainerSequential(unittest.TestCase):
loss2 = paddle.mean(res2)
loss2.backward()
def test_sequential(self):
with _test_eager_guard():
self.func_sequential()
self.func_sequential()
def func_sequential_list_params(self):
def test_sequential_list_params(self):
data = np.random.uniform(-1, 1, [5, 10]).astype('float32')
with fluid.dygraph.guard():
data = fluid.dygraph.to_variable(data)
......@@ -96,11 +90,6 @@ class TestImperativeContainerSequential(unittest.TestCase):
loss2 = paddle.mean(res2)
loss2.backward()
def test_sequential_list_params(self):
with _test_eager_guard():
self.func_sequential_list_params()
self.func_sequential_list_params()
if __name__ == '__main__':
unittest.main()
......@@ -17,7 +17,6 @@ import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.reader import use_pinned_memory
......@@ -51,7 +50,7 @@ class TestDygraphDataLoader(unittest.TestCase):
self.assertEqual(label.shape, [self.batch_size, 1])
self.assertEqual(relu.shape, [self.batch_size, 784])
def func_test_single_process_loader(self):
def test_single_process_loader(self):
with fluid.dygraph.guard():
loader = fluid.io.DataLoader.from_generator(
capacity=self.capacity, iterable=False, use_multiprocess=False
......@@ -63,12 +62,7 @@ class TestDygraphDataLoader(unittest.TestCase):
)
self.iter_loader_data(loader)
def test_single_process_loader(self):
with _test_eager_guard():
self.func_test_single_process_loader()
self.func_test_single_process_loader()
def func_test_multi_process_loader(self):
def test_multi_process_loader(self):
with fluid.dygraph.guard():
loader = fluid.io.DataLoader.from_generator(
capacity=self.capacity, use_multiprocess=True
......@@ -80,12 +74,7 @@ class TestDygraphDataLoader(unittest.TestCase):
)
self.iter_loader_data(loader)
def test_multi_process_loader(self):
with _test_eager_guard():
self.func_test_multi_process_loader()
self.func_test_multi_process_loader()
def func_test_generator_no_places(self):
def test_generator_no_places(self):
with fluid.dygraph.guard():
loader = fluid.io.DataLoader.from_generator(capacity=self.capacity)
loader.set_sample_generator(
......@@ -94,12 +83,7 @@ class TestDygraphDataLoader(unittest.TestCase):
)
self.iter_loader_data(loader)
def test_generator_no_places(self):
with _test_eager_guard():
self.func_test_generator_no_places()
self.func_test_generator_no_places()
def func_test_set_pin_memory(self):
def test_set_pin_memory(self):
with fluid.dygraph.guard():
use_pinned_memory(False)
loader = fluid.io.DataLoader.from_generator(
......@@ -113,11 +97,6 @@ class TestDygraphDataLoader(unittest.TestCase):
self.iter_loader_data(loader)
use_pinned_memory(True)
def test_set_pin_memory(self):
with _test_eager_guard():
self.func_test_set_pin_memory()
self.func_test_set_pin_memory()
if __name__ == '__main__':
unittest.main()
......@@ -19,7 +19,6 @@ import numpy as np
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
def get_random_images_and_labels(image_shape, label_shape):
......@@ -35,19 +34,14 @@ class TestDygraphDataLoaderWithException(unittest.TestCase):
self.epoch_num = 1
self.capacity = 5
def func_test_not_capacity(self):
def test_not_capacity(self):
with fluid.dygraph.guard():
with self.assertRaisesRegexp(
ValueError, "Please give value to capacity."
):
fluid.io.DataLoader.from_generator()
def test_not_capacity(self):
with _test_eager_guard():
self.func_test_not_capacity()
self.func_test_not_capacity()
def func_test_single_process_with_thread_expection(self):
def test_single_process_with_thread_expection(self):
def error_sample_genarator(batch_num):
def __reader__():
for _ in range(batch_num):
......@@ -71,12 +65,7 @@ class TestDygraphDataLoaderWithException(unittest.TestCase):
exception = ex
self.assertIsNotNone(exception)
def test_single_process_with_thread_expection(self):
with _test_eager_guard():
self.func_test_single_process_with_thread_expection()
self.func_test_single_process_with_thread_expection()
def func_test_multi_process_with_process_expection(self):
def test_multi_process_with_process_expection(self):
def error_sample_genarator(batch_num):
def __reader__():
for _ in range(batch_num):
......@@ -99,12 +88,7 @@ class TestDygraphDataLoaderWithException(unittest.TestCase):
exception = ex
self.assertIsNotNone(exception)
def test_multi_process_with_process_expection(self):
with _test_eager_guard():
self.func_test_multi_process_with_process_expection()
self.func_test_multi_process_with_process_expection()
def func_test_multi_process_with_get_timeout(self):
def test_multi_process_with_get_timeout(self):
def slow_batch_generator_creator(batch_size, batch_num):
def __reader__():
for _ in range(batch_num):
......@@ -134,11 +118,6 @@ class TestDygraphDataLoaderWithException(unittest.TestCase):
exception = ex
self.assertIsNotNone(exception)
def test_multi_process_with_get_timeout(self):
with _test_eager_guard():
self.func_test_multi_process_with_get_timeout()
self.func_test_multi_process_with_get_timeout()
if __name__ == '__main__':
unittest.main()
......@@ -18,7 +18,6 @@ import signal
import time
import unittest
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.reader import (
CleanupFuncRegistrar,
_cleanup,
......@@ -33,25 +32,20 @@ class TestDygraphDataLoaderCleanUpFunc(unittest.TestCase):
def setUp(self):
self.capacity = 10
def func_test_clear_queue_set(self):
def test_clear_queue_set(self):
test_queue = queue.Queue(self.capacity)
multiprocess_queue_set.add(test_queue)
for i in range(0, self.capacity):
test_queue.put(i)
_cleanup()
def test_clear_queue_set(self):
with _test_eager_guard():
self.func_test_clear_queue_set()
self.func_test_clear_queue_set()
class TestRegisterExitFunc(unittest.TestCase):
# This function does not need to be implemented in this case
def none_func(self):
pass
def func_test_not_callable_func(self):
def test_not_callable_func(self):
exception = None
try:
CleanupFuncRegistrar.register(5)
......@@ -60,22 +54,12 @@ class TestRegisterExitFunc(unittest.TestCase):
exception = ex
self.assertIsNotNone(exception)
def test_not_callable_func(self):
with _test_eager_guard():
self.func_test_not_callable_func()
self.func_test_not_callable_func()
def func_test_old_handler_for_sigint(self):
def test_old_handler_for_sigint(self):
CleanupFuncRegistrar.register(
function=self.none_func, signals=[signal.SIGINT]
)
def test_old_handler_for_sigint(self):
with _test_eager_guard():
self.func_test_old_handler_for_sigint()
self.func_test_old_handler_for_sigint()
def func_test_signal_wrapper_by_sigchld(self):
def test_signal_wrapper_by_sigchld(self):
# This function does not need to be implemented in this case
def __test_process__():
pass
......@@ -93,11 +77,6 @@ class TestRegisterExitFunc(unittest.TestCase):
exception = ex
self.assertIsNotNone(exception)
def test_signal_wrapper_by_sigchld(self):
with _test_eager_guard():
self.func_test_signal_wrapper_by_sigchld()
self.func_test_signal_wrapper_by_sigchld()
if __name__ == '__main__':
unittest.main()
......@@ -17,7 +17,6 @@ import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from paddle.io import DataLoader, Dataset
......@@ -79,29 +78,19 @@ class TestDygraphDataLoaderMmapFdsClear(unittest.TestCase):
if step_id == 30:
break
def func_test_data_loader_break(self):
def test_data_loader_break(self):
with fluid.dygraph.guard():
loader = self.prepare_data_loader()
for _ in range(self.epoch_num):
self.run_one_epoch_with_break(loader)
break
def test_data_loader_break(self):
with _test_eager_guard():
self.func_test_data_loader_break()
self.func_test_data_loader_break()
def func_test_data_loader_continue_break(self):
def test_data_loader_continue_break(self):
with fluid.dygraph.guard():
loader = self.prepare_data_loader()
for _ in range(self.epoch_num):
self.run_one_epoch_with_break(loader)
def test_data_loader_continue_break(self):
with _test_eager_guard():
self.func_test_data_loader_continue_break()
self.func_test_data_loader_continue_break()
class TestMultiProcessDataLoaderMmapFdsClear(TestDygraphDataLoaderMmapFdsClear):
def prepare_data_loader(self):
......
......@@ -19,7 +19,6 @@ import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.reader import _reader_process_loop
......@@ -49,7 +48,7 @@ class TestDygraphDataLoaderProcess(unittest.TestCase):
self.epoch_num = 2
self.capacity = 2
def func_test_reader_process_loop(self):
def test_reader_process_loop(self):
# This unittest's memory mapped files needs to be cleaned manually
def __clear_process__(util_queue):
while True:
......@@ -80,12 +79,7 @@ class TestDygraphDataLoaderProcess(unittest.TestCase):
)
clear_process.start()
def test_reader_process_loop(self):
with _test_eager_guard():
self.func_test_reader_process_loop()
self.func_test_reader_process_loop()
def func_test_reader_process_loop_simple_none(self):
def test_reader_process_loop_simple_none(self):
def none_sample_genarator(batch_num):
def __reader__():
for _ in range(batch_num):
......@@ -108,11 +102,6 @@ class TestDygraphDataLoaderProcess(unittest.TestCase):
exception = ex
self.assertIsNotNone(exception)
def test_reader_process_loop_simple_none(self):
with _test_eager_guard():
self.func_test_reader_process_loop_simple_none()
self.func_test_reader_process_loop_simple_none()
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册