From a1d6e2a8ee6888c259c0212098958db08079d2de Mon Sep 17 00:00:00 2001 From: wanghuancoder Date: Thu, 22 Dec 2022 15:50:19 +0800 Subject: [PATCH] delete distribute old dygraph test cast (#49100) * delete distribute old dygraph test cast --- .../test_parallel_class_center_sample.py | 1 - .../fleet/test_parallel_dygraph_mp_layers.py | 1 - ...parallel_dygraph_no_sync_gradient_check.py | 3 --- ...test_parallel_dygraph_pipeline_parallel.py | 20 ------------------- ...test_parallel_dygraph_sharding_parallel.py | 3 --- .../test_parallel_dygraph_tensor_parallel.py | 5 ----- .../test_parallel_margin_cross_entropy.py | 3 --- .../test_collective_global_gather.py | 9 --------- .../test_collective_global_scatter.py | 9 --------- .../test_parallel_dygraph_dataparallel.py | 10 ---------- python/paddle/tests/test_dist_hapi_model.py | 3 --- 11 files changed, 67 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_class_center_sample.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_class_center_sample.py index 6ff1d0c7ed..91d3080bff 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_class_center_sample.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_class_center_sample.py @@ -20,7 +20,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestParallelClassCenterSample(TestMultipleGpus): def test_parallel_class_center_sample(self): self.run_mnist_2gpu('parallel_class_center_sample.py') - self.run_mnist_2gpu('parallel_class_center_sample.py', eager_mode=False) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_mp_layers.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_mp_layers.py index f4491d26fd..2cc956f824 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_mp_layers.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_mp_layers.py @@ -20,7 +20,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestModelParallelLayer(TestMultipleGpus): def test_hybrid_parallel_mp_layer(self): self.run_mnist_2gpu('hybrid_parallel_mp_layers.py') - self.run_mnist_2gpu('hybrid_parallel_mp_layers.py', eager_mode=False) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_no_sync_gradient_check.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_no_sync_gradient_check.py index e06d85fc84..33f15476aa 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_no_sync_gradient_check.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_no_sync_gradient_check.py @@ -20,9 +20,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestDataParallelLayer(TestMultipleGpus): def test_parallel_dygraph_dataparallel_no_sync(self): self.run_mnist_2gpu('parallel_dygraph_no_sync_gradient_check.py') - self.run_mnist_2gpu( - 'parallel_dygraph_no_sync_gradient_check.py', eager_mode=False - ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_pipeline_parallel.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_pipeline_parallel.py index 8341cfef43..4bf669dbe5 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_pipeline_parallel.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_pipeline_parallel.py @@ -23,53 +23,33 @@ class TestHybridPipeParallel(TestMultipleGpus): self.run_mnist_2gpu( os.path.abspath('../../hybrid_parallel_pp_layer.py') ) - self.run_mnist_2gpu( - os.path.abspath('../../hybrid_parallel_pp_layer.py'), - eager_mode=False, - ) def test_hybrid_parallel_pp_tuple_inputs(self): self.run_mnist_2gpu('hybrid_parallel_pp_embedding.py') - self.run_mnist_2gpu('hybrid_parallel_pp_embedding.py', eager_mode=False) def test_hybrid_parallel_shared_weight(self): self.run_mnist_2gpu('hybrid_parallel_shared_weight.py') - self.run_mnist_2gpu( - 'hybrid_parallel_shared_weight.py', eager_mode=False - ) def test_pipeline_parallel_amp(self): self.run_mnist_2gpu('hybrid_parallel_pp_amp.py') - self.run_mnist_2gpu('hybrid_parallel_pp_amp.py', eager_mode=False) def test_pipeline_parallel_fp16(self): self.run_mnist_2gpu('hybrid_parallel_pp_fp16.py') - self.run_mnist_2gpu('hybrid_parallel_pp_fp16.py', eager_mode=False) def test_hybrid_parallel_transformer(self): self.run_mnist_2gpu('hybrid_parallel_pp_transformer.py') - self.run_mnist_2gpu( - 'hybrid_parallel_pp_transformer.py', eager_mode=False - ) def test_hybrid_parallel_save_load(self): self.run_mnist_2gpu('hybrid_parallel_pp_save_load.py') - self.run_mnist_2gpu('hybrid_parallel_pp_save_load.py', eager_mode=False) def test_hybrid_parallel_recompute(self): self.run_mnist_2gpu('hybrid_parallel_pp_recompute.py') - self.run_mnist_2gpu('hybrid_parallel_pp_recompute.py', eager_mode=False) def test_hybrid_parallel_pp_clip_grad(self): self.run_mnist_2gpu('hybrid_parallel_pp_clip_grad.py') - self.run_mnist_2gpu('hybrid_parallel_pp_clip_grad.py', eager_mode=False) def test_hybrid_parallel_transformer_unbalanced_data(self): self.run_mnist_2gpu('hybrid_parallel_pp_transformer_unbalanced_data.py') - self.run_mnist_2gpu( - 'hybrid_parallel_pp_transformer_unbalanced_data.py', - eager_mode=False, - ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sharding_parallel.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sharding_parallel.py index fcfaf72c44..6d12fec850 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sharding_parallel.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sharding_parallel.py @@ -22,9 +22,6 @@ class TestHybridParallel(TestMultipleGpus): # check sharding logic as well as the accuracy with single mode def test_hybrid_parallel_sharding_logic(self): self.run_mnist_2gpu('hybrid_parallel_sharding_model.py') - self.run_mnist_2gpu( - 'hybrid_parallel_sharding_model.py', eager_mode=False - ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_tensor_parallel.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_tensor_parallel.py index 575ff56350..45235965c2 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_tensor_parallel.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_tensor_parallel.py @@ -20,23 +20,18 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestHybridParallel(TestMultipleGpus): def test_hybrid_parallel_mp_random(self): self.run_mnist_2gpu('hybrid_parallel_mp_random.py') - self.run_mnist_2gpu('hybrid_parallel_mp_random.py', eager_mode=False) def test_hybrid_parallel_mp_model(self): self.run_mnist_2gpu('hybrid_parallel_mp_model.py') - self.run_mnist_2gpu('hybrid_parallel_mp_model.py', eager_mode=False) def test_hybrid_parallel_mp_amp(self): self.run_mnist_2gpu('hybrid_parallel_mp_amp.py') - self.run_mnist_2gpu('hybrid_parallel_mp_amp.py', eager_mode=False) def test_hybrid_parallel_mp_fp16(self): self.run_mnist_2gpu('hybrid_parallel_mp_fp16.py') - self.run_mnist_2gpu('hybrid_parallel_mp_fp16.py', eager_mode=False) def test_hybrid_parallel_mp_clip_grad(self): self.run_mnist_2gpu('hybrid_parallel_mp_clip_grad.py') - self.run_mnist_2gpu('hybrid_parallel_mp_clip_grad.py', eager_mode=False) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_margin_cross_entropy.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_margin_cross_entropy.py index c53b73d5ae..a9a738ac9a 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_margin_cross_entropy.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_margin_cross_entropy.py @@ -20,9 +20,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestParallelMarginSoftmaxWithCrossEntropy(TestMultipleGpus): def test_parallel_margin_cross_entropy(self): self.run_mnist_2gpu('parallel_margin_cross_entropy.py') - self.run_mnist_2gpu( - 'parallel_margin_cross_entropy.py', eager_mode=False - ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_global_gather.py b/python/paddle/fluid/tests/unittests/collective/test_collective_global_gather.py index 7de40b6d81..8ec8c61e13 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_global_gather.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_global_gather.py @@ -29,15 +29,6 @@ class TestCollectiveGlobalGatherAPI(TestDistBase): "collective_global_gather.py", "global_gather", "nccl" ) - def test_global_gather_nccl_dygraph(self): - self.check_with_place( - "collective_global_gather_dygraph.py", - "global_gather", - "nccl", - static_mode="0", - eager_mode=False, - ) - def test_global_gather_nccl_dygraph_eager(self): self.check_with_place( "collective_global_gather_dygraph.py", diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_global_scatter.py b/python/paddle/fluid/tests/unittests/collective/test_collective_global_scatter.py index c4575043b2..b385f6c077 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_global_scatter.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_global_scatter.py @@ -29,15 +29,6 @@ class TestCollectiveSelectScatterAPI(TestDistBase): "collective_global_scatter.py", "global_scatter", "nccl" ) - def test_global_scatter_nccl_dygraph(self): - self.check_with_place( - "collective_global_scatter_dygraph.py", - "global_scatter", - "nccl", - static_mode="0", - eager_mode=False, - ) - def test_global_scatter_nccl_dygraph_eager(self): self.check_with_place( "collective_global_scatter_dygraph.py", diff --git a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel.py b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel.py index 8a460152f9..5c03d758a9 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel.py @@ -213,19 +213,9 @@ class TestMultipleWithGloo(unittest.TestCase): time.sleep(3) -class TestDataParallelGradientCheck(TestMultipleGpus): - def test_multiple_gpus_dynamic(self): - self.run_mnist_2gpu( - 'parallel_dygraph_gradient_check.py', eager_mode=False - ) - - class TestDataParallelWithPyLayer(TestMultipleGpus): def test_parallel_dygraph_dataparallel_with_pylayer(self): self.run_mnist_2gpu('parallel_dygraph_dataparallel_with_pylayer.py') - self.run_mnist_2gpu( - 'parallel_dygraph_dataparallel_with_pylayer.py', eager_mode=False - ) self.run_mnist_2gpu( 'parallel_dygraph_dataparallel_with_pylayer.py', allocator_strategy="naive_best_fit", diff --git a/python/paddle/tests/test_dist_hapi_model.py b/python/paddle/tests/test_dist_hapi_model.py index 4d759f9720..1707d2ea40 100644 --- a/python/paddle/tests/test_dist_hapi_model.py +++ b/python/paddle/tests/test_dist_hapi_model.py @@ -133,15 +133,12 @@ class TestMultipleGpus(unittest.TestCase): def test_hapi_multiple_gpus_static(self): self.run_mnist_2gpu('dist_hapi_mnist_static.py') - self.run_mnist_2gpu('dist_hapi_mnist_static.py', eager_mode=False) def test_hapi_multiple_gpus_dynamic(self): self.run_mnist_2gpu('dist_hapi_mnist_dynamic.py') - self.run_mnist_2gpu('dist_hapi_mnist_dynamic.py', eager_mode=False) def test_hapi_amp_static(self): self.run_mnist_2gpu('dist_hapi_pure_fp16_static.py') - self.run_mnist_2gpu('dist_hapi_pure_fp16_static.py', eager_mode=False) if __name__ == "__main__": -- GitLab