From c0cf5cb735261d4bd48a951758dd78a745a3b9ca Mon Sep 17 00:00:00 2001 From: Ming-Xu Huang Date: Tue, 28 Jun 2022 16:40:10 +0800 Subject: [PATCH] Apply IOU to test_parallel_executor_seresnext_base_gpu (#43812) 1. test_parallel_executor_seresnext_base_gpu failed on 2 P100 GPUs with `470.82` driver. ``` ====================================================================== FAIL: test_seresnext_with_learning_rate_decay (test_parallel_executor_seresnext_base_gpu.TestResnetGPU) ---------------------------------------------------------------------- Traceback (most recent call last): File "/opt/paddle/paddle/build/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_gpu.py", line 32, in test_seresnext_with_learning_rate_decay self._compare_result_with_origin_model( File "/opt/paddle/paddle/build/python/paddle/fluid/tests/unittests/seresnext_test_base.py", line 56, in _compare_result_with_origin_model self.assertAlmostEquals( AssertionError: 6.8825445 != 6.882531 within 1e-05 delta (1.335144e-05 difference) ---------------------------------------------------------------------- ``` 2. To be more accuracte on evaluating loss convergence, we proposed to apply IOU as metric, instead of comparing first and last loss values. 3. As offline discussion, we also evaluated convergence on P100 and A100 in 1000 interations to make sure this UT have the same convergence property on both devices. The curves are showed below. ![A100-Single, P100-Single and Diff (1)](https://user-images.githubusercontent.com/13541238/175461920-25df6101-6dd8-4387-862c-d1c8e9299c57.png) --- .../unittests/parallel_executor_test_base.py | 17 ++++++++++++----- .../tests/unittests/seresnext_test_base.py | 7 +++++-- .../unittests/test_fuse_all_reduce_pass.py | 4 ++-- .../unittests/test_fuse_elewise_add_act_pass.py | 4 ++-- .../tests/unittests/test_fuse_optimizer_pass.py | 4 ++-- .../test_fuse_relu_depthwise_conv_pass.py | 4 ++-- .../unittests/test_ir_memory_optimize_pass.py | 4 ++-- .../unittests/test_parallel_executor_mnist.py | 8 ++++---- .../unittests/test_parallel_executor_pg.py | 4 ++-- ...test_parallel_executor_seresnext_base_gpu.py | 1 + ...rallel_executor_seresnext_with_reduce_cpu.py | 8 ++++---- 11 files changed, 38 insertions(+), 27 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py index 46ab8f88511..ffd1ff4a16a 100644 --- a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py +++ b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py @@ -68,8 +68,8 @@ class TestParallelExecutorBase(unittest.TestCase): feed_data_reader, FeedDataReader ), "feed_data_reader must be type of FeedDataReader" - paddle.seed(1) - paddle.framework.random._manual_program_seed(1) + paddle.seed(0) + paddle.framework.random._manual_program_seed(0) main = fluid.Program() startup = fluid.Program() @@ -103,17 +103,24 @@ class TestParallelExecutorBase(unittest.TestCase): ) if use_device == DeviceType.XPU else int( os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + area_below_loss = 0 begin = time.time() first_loss, = run_executor(exe=exe, binary=binary, feed=feed_dict, fetch_list=[loss.name]) + area_below_loss += 0.5 * first_loss.mean() for _ in range(iter): - run_executor(exe=exe, binary=binary, feed=feed_dict, fetch_list=[]) + mid_loss = run_executor(exe=exe, + binary=binary, + feed=feed_dict, + fetch_list=[loss.name]) + area_below_loss += mid_loss[0].mean() last_loss, = run_executor(exe=exe, binary=binary, feed=feed_dict, fetch_list=[loss.name]) + area_below_loss += 0.5 * last_loss.mean() end = time.time() if batch_size is not None: @@ -126,9 +133,9 @@ class TestParallelExecutorBase(unittest.TestCase): float(avg_first_loss_val)): sys.exit("got NaN loss, training failed.") - print(first_loss, last_loss) + print(first_loss, last_loss, area_below_loss) # self.assertGreater(first_loss[0], last_loss[0]) - return first_loss, last_loss + return first_loss, last_loss, area_below_loss @classmethod def check_pass_conflict(cls, diff --git a/python/paddle/fluid/tests/unittests/seresnext_test_base.py b/python/paddle/fluid/tests/unittests/seresnext_test_base.py index f9113520131..d094114aae1 100644 --- a/python/paddle/fluid/tests/unittests/seresnext_test_base.py +++ b/python/paddle/fluid/tests/unittests/seresnext_test_base.py @@ -30,7 +30,7 @@ class TestResnetBase(TestParallelExecutorBase): if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda(): return - func_1_first_loss, func_1_last_loss = self.check_network_convergence( + func_1_first_loss, func_1_last_loss, func_1_loss_area = self.check_network_convergence( seresnext_net.model, feed_dict=seresnext_net.feed_dict(use_device), iter=seresnext_net.iter(use_device), @@ -39,7 +39,7 @@ class TestResnetBase(TestParallelExecutorBase): use_reduce=False, optimizer=seresnext_net.optimizer) - func_2_first_loss, func_2_last_loss = check_func( + func_2_first_loss, func_2_last_loss, func_2_loss_area = check_func( seresnext_net.model, feed_dict=seresnext_net.feed_dict(use_device), iter=seresnext_net.iter(use_device), @@ -52,6 +52,9 @@ class TestResnetBase(TestParallelExecutorBase): for loss in zip(func_1_last_loss, func_2_last_loss): self.assertAlmostEquals(loss[0], loss[1], delta=delta2) else: + np.testing.assert_allclose(func_1_loss_area, + func_2_loss_area, + rtol=delta2) self.assertAlmostEquals(np.mean(func_1_first_loss), func_2_first_loss[0], delta=1e-5) diff --git a/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py index c3e8a51397f..67729d6633d 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py @@ -48,7 +48,7 @@ class TestFuseAllReduceOpsBase(TestParallelExecutorBase): img, label = init_feed_dict() feed_dict_data = {"image": img, "label": label} - not_fuse_op_first_loss, not_fuse_op_last_loss = self.check_network_convergence( + not_fuse_op_first_loss, not_fuse_op_last_loss, _ = self.check_network_convergence( model, feed_dict=feed_dict_data, get_data_from_feeder=get_data_from_feeder, @@ -56,7 +56,7 @@ class TestFuseAllReduceOpsBase(TestParallelExecutorBase): fuse_all_reduce_ops=False, fuse_all_optimizer_ops=fuse_all_optimizer_ops, optimizer=optimizer) - fuse_op_first_loss, fuse_op_last_loss = self.check_network_convergence( + fuse_op_first_loss, fuse_op_last_loss, _ = self.check_network_convergence( model, feed_dict=feed_dict_data, get_data_from_feeder=get_data_from_feeder, diff --git a/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py index 97fa40a89de..15b79bf0a7f 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py @@ -42,7 +42,7 @@ class TestMNIST(TestParallelExecutorBase): # FIXME (liuwei12) # the new memory optimize strategy will crash this unittest # add enable_inplace=False here to force pass the unittest - not_fuse_op_first_loss, not_fuse_op_last_loss = self.check_network_convergence( + not_fuse_op_first_loss, not_fuse_op_last_loss, _ = self.check_network_convergence( model, feed_dict={ "image": img, @@ -53,7 +53,7 @@ class TestMNIST(TestParallelExecutorBase): use_ir_memory_optimize=False, enable_inplace=False, optimizer=_optimizer) - fuse_op_first_loss, fuse_op_last_loss = self.check_network_convergence( + fuse_op_first_loss, fuse_op_last_loss, _ = self.check_network_convergence( model, feed_dict={ "image": img, diff --git a/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py index b1451e83f9c..981d9dfcf4a 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py @@ -42,14 +42,14 @@ class TestFuseOptimizationOps(TestParallelExecutorBase): if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda(): return - not_fuse_op_first_loss, not_fuse_op_last_loss = self.check_network_convergence( + not_fuse_op_first_loss, not_fuse_op_last_loss, _ = self.check_network_convergence( model, feed_dict=feed_dict, get_data_from_feeder=get_data_from_feeder, use_device=use_device, fuse_all_optimizer_ops=False, optimizer=optimizer) - fuse_op_first_loss, fuse_op_last_loss = self.check_network_convergence( + fuse_op_first_loss, fuse_op_last_loss, _ = self.check_network_convergence( model, feed_dict=feed_dict, get_data_from_feeder=get_data_from_feeder, diff --git a/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py index a86ca3e31f6..cddc05f5914 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py @@ -91,7 +91,7 @@ class TestMNIST(TestParallelExecutorBase): if only_forward: _optimizer = None - fuse_op_first_loss, fuse_op_last_loss = self.check_network_convergence( + fuse_op_first_loss, fuse_op_last_loss, _ = self.check_network_convergence( model, feed_dict={ "image": img, @@ -101,7 +101,7 @@ class TestMNIST(TestParallelExecutorBase): fuse_relu_depthwise_conv=True, use_ir_memory_optimize=True, optimizer=_optimizer) - not_fuse_op_first_loss, not_fuse_op_last_loss = self.check_network_convergence( + not_fuse_op_first_loss, not_fuse_op_last_loss, _ = self.check_network_convergence( model, feed_dict={ "image": img, diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py index 24ac4630111..4b775197aae 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py @@ -66,7 +66,7 @@ class TestMNIST(TestParallelExecutorBase): return img, label = self._dummy_data() - first_loss0, last_loss0 = self.check_network_convergence( + first_loss0, last_loss0, _ = self.check_network_convergence( model, feed_dict={ "image": img, @@ -74,7 +74,7 @@ class TestMNIST(TestParallelExecutorBase): }, use_device=use_device, use_ir_memory_optimize=False) - first_loss1, last_loss1 = self.check_network_convergence( + first_loss1, last_loss1, _ = self.check_network_convergence( model, feed_dict={ "image": img, diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py index 81625a29e22..2e2791351bf 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py @@ -91,7 +91,7 @@ class TestMNIST(TestParallelExecutorBase): img, label = init_data() - all_reduce_first_loss, all_reduce_last_loss = self.check_network_convergence( + all_reduce_first_loss, all_reduce_last_loss, _ = self.check_network_convergence( model, feed_dict={ "image": img, @@ -100,7 +100,7 @@ class TestMNIST(TestParallelExecutorBase): use_device=use_device, use_reduce=False) - reduce_first_loss, reduce_last_loss = self.check_network_convergence( + reduce_first_loss, reduce_last_loss, _ = self.check_network_convergence( model, feed_dict={ "image": img, @@ -153,7 +153,7 @@ class TestMNIST(TestParallelExecutorBase): img, label = init_data() - single_first_loss, single_last_loss = self.check_network_convergence( + single_first_loss, single_last_loss, _ = self.check_network_convergence( method=simple_fc_net, feed_dict={ "image": img, @@ -161,7 +161,7 @@ class TestMNIST(TestParallelExecutorBase): }, use_device=use_device, use_parallel_executor=False) - parallel_first_loss, parallel_last_loss = self.check_network_convergence( + parallel_first_loss, parallel_last_loss, _ = self.check_network_convergence( method=simple_fc_net, feed_dict={ "image": img, diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_pg.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_pg.py index 36299da25a6..1ada1469853 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_pg.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_pg.py @@ -55,7 +55,7 @@ class TestMNIST(TestParallelExecutorBase): return img, label = init_data() - single_first_loss, single_last_loss = self.check_network_convergence( + single_first_loss, single_last_loss, _ = self.check_network_convergence( method=simple_fc_net, feed_dict={ "image": img, @@ -63,7 +63,7 @@ class TestMNIST(TestParallelExecutorBase): }, use_device=use_device, use_parallel_executor=False) - parallel_first_loss, parallel_last_loss = self.check_network_convergence( + parallel_first_loss, parallel_last_loss, _ = self.check_network_convergence( method=simple_fc_net, feed_dict={ "image": img, diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_gpu.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_gpu.py index ee7736a7303..7b243605312 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_gpu.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_gpu.py @@ -31,6 +31,7 @@ class TestResnetGPU(TestResnetBase): use_parallel_executor=False) self._compare_result_with_origin_model(check_func, use_device=DeviceType.CUDA, + delta2=1e-5, compare_separately=False) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_cpu.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_cpu.py index d4cc297d689..f2bbbd9fe2a 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_cpu.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_cpu.py @@ -25,7 +25,7 @@ class TestResnetWithReduceBase(TestParallelExecutorBase): if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda(): return - all_reduce_first_loss, all_reduce_last_loss = self.check_network_convergence( + all_reduce_first_loss, all_reduce_last_loss, _ = self.check_network_convergence( seresnext_net.model, feed_dict=seresnext_net.feed_dict(use_device), iter=seresnext_net.iter(use_device), @@ -33,7 +33,7 @@ class TestResnetWithReduceBase(TestParallelExecutorBase): use_device=use_device, use_reduce=False, optimizer=seresnext_net.optimizer) - reduce_first_loss, reduce_last_loss = self.check_network_convergence( + reduce_first_loss, reduce_last_loss, _ = self.check_network_convergence( seresnext_net.model, feed_dict=seresnext_net.feed_dict(use_device), iter=seresnext_net.iter(use_device), @@ -50,7 +50,7 @@ class TestResnetWithReduceBase(TestParallelExecutorBase): if not use_device: return - all_reduce_first_loss_seq, all_reduce_last_loss_seq = self.check_network_convergence( + all_reduce_first_loss_seq, all_reduce_last_loss_seq, _ = self.check_network_convergence( seresnext_net.model, feed_dict=seresnext_net.feed_dict(use_device), iter=seresnext_net.iter(use_device), @@ -60,7 +60,7 @@ class TestResnetWithReduceBase(TestParallelExecutorBase): optimizer=seresnext_net.optimizer, enable_sequential_execution=True) - reduce_first_loss_seq, reduce_last_loss_seq = self.check_network_convergence( + reduce_first_loss_seq, reduce_last_loss_seq, _ = self.check_network_convergence( seresnext_net.model, feed_dict=seresnext_net.feed_dict(use_device), iter=seresnext_net.iter(use_device), -- GitLab