未验证 提交 a1d6e2a8 编写于 作者: W wanghuancoder 提交者: GitHub

delete distribute old dygraph test cast (#49100)

* delete distribute old dygraph test cast
上级 80fe8cbc
......@@ -20,7 +20,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestParallelClassCenterSample(TestMultipleGpus):
def test_parallel_class_center_sample(self):
self.run_mnist_2gpu('parallel_class_center_sample.py')
self.run_mnist_2gpu('parallel_class_center_sample.py', eager_mode=False)
if __name__ == "__main__":
......
......@@ -20,7 +20,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestModelParallelLayer(TestMultipleGpus):
def test_hybrid_parallel_mp_layer(self):
self.run_mnist_2gpu('hybrid_parallel_mp_layers.py')
self.run_mnist_2gpu('hybrid_parallel_mp_layers.py', eager_mode=False)
if __name__ == "__main__":
......
......@@ -20,9 +20,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestDataParallelLayer(TestMultipleGpus):
def test_parallel_dygraph_dataparallel_no_sync(self):
self.run_mnist_2gpu('parallel_dygraph_no_sync_gradient_check.py')
self.run_mnist_2gpu(
'parallel_dygraph_no_sync_gradient_check.py', eager_mode=False
)
if __name__ == "__main__":
......
......@@ -23,53 +23,33 @@ class TestHybridPipeParallel(TestMultipleGpus):
self.run_mnist_2gpu(
os.path.abspath('../../hybrid_parallel_pp_layer.py')
)
self.run_mnist_2gpu(
os.path.abspath('../../hybrid_parallel_pp_layer.py'),
eager_mode=False,
)
def test_hybrid_parallel_pp_tuple_inputs(self):
self.run_mnist_2gpu('hybrid_parallel_pp_embedding.py')
self.run_mnist_2gpu('hybrid_parallel_pp_embedding.py', eager_mode=False)
def test_hybrid_parallel_shared_weight(self):
self.run_mnist_2gpu('hybrid_parallel_shared_weight.py')
self.run_mnist_2gpu(
'hybrid_parallel_shared_weight.py', eager_mode=False
)
def test_pipeline_parallel_amp(self):
self.run_mnist_2gpu('hybrid_parallel_pp_amp.py')
self.run_mnist_2gpu('hybrid_parallel_pp_amp.py', eager_mode=False)
def test_pipeline_parallel_fp16(self):
self.run_mnist_2gpu('hybrid_parallel_pp_fp16.py')
self.run_mnist_2gpu('hybrid_parallel_pp_fp16.py', eager_mode=False)
def test_hybrid_parallel_transformer(self):
self.run_mnist_2gpu('hybrid_parallel_pp_transformer.py')
self.run_mnist_2gpu(
'hybrid_parallel_pp_transformer.py', eager_mode=False
)
def test_hybrid_parallel_save_load(self):
self.run_mnist_2gpu('hybrid_parallel_pp_save_load.py')
self.run_mnist_2gpu('hybrid_parallel_pp_save_load.py', eager_mode=False)
def test_hybrid_parallel_recompute(self):
self.run_mnist_2gpu('hybrid_parallel_pp_recompute.py')
self.run_mnist_2gpu('hybrid_parallel_pp_recompute.py', eager_mode=False)
def test_hybrid_parallel_pp_clip_grad(self):
self.run_mnist_2gpu('hybrid_parallel_pp_clip_grad.py')
self.run_mnist_2gpu('hybrid_parallel_pp_clip_grad.py', eager_mode=False)
def test_hybrid_parallel_transformer_unbalanced_data(self):
self.run_mnist_2gpu('hybrid_parallel_pp_transformer_unbalanced_data.py')
self.run_mnist_2gpu(
'hybrid_parallel_pp_transformer_unbalanced_data.py',
eager_mode=False,
)
if __name__ == "__main__":
......
......@@ -22,9 +22,6 @@ class TestHybridParallel(TestMultipleGpus):
# check sharding logic as well as the accuracy with single mode
def test_hybrid_parallel_sharding_logic(self):
self.run_mnist_2gpu('hybrid_parallel_sharding_model.py')
self.run_mnist_2gpu(
'hybrid_parallel_sharding_model.py', eager_mode=False
)
if __name__ == "__main__":
......
......@@ -20,23 +20,18 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestHybridParallel(TestMultipleGpus):
def test_hybrid_parallel_mp_random(self):
self.run_mnist_2gpu('hybrid_parallel_mp_random.py')
self.run_mnist_2gpu('hybrid_parallel_mp_random.py', eager_mode=False)
def test_hybrid_parallel_mp_model(self):
self.run_mnist_2gpu('hybrid_parallel_mp_model.py')
self.run_mnist_2gpu('hybrid_parallel_mp_model.py', eager_mode=False)
def test_hybrid_parallel_mp_amp(self):
self.run_mnist_2gpu('hybrid_parallel_mp_amp.py')
self.run_mnist_2gpu('hybrid_parallel_mp_amp.py', eager_mode=False)
def test_hybrid_parallel_mp_fp16(self):
self.run_mnist_2gpu('hybrid_parallel_mp_fp16.py')
self.run_mnist_2gpu('hybrid_parallel_mp_fp16.py', eager_mode=False)
def test_hybrid_parallel_mp_clip_grad(self):
self.run_mnist_2gpu('hybrid_parallel_mp_clip_grad.py')
self.run_mnist_2gpu('hybrid_parallel_mp_clip_grad.py', eager_mode=False)
if __name__ == "__main__":
......
......@@ -20,9 +20,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestParallelMarginSoftmaxWithCrossEntropy(TestMultipleGpus):
def test_parallel_margin_cross_entropy(self):
self.run_mnist_2gpu('parallel_margin_cross_entropy.py')
self.run_mnist_2gpu(
'parallel_margin_cross_entropy.py', eager_mode=False
)
if __name__ == "__main__":
......
......@@ -29,15 +29,6 @@ class TestCollectiveGlobalGatherAPI(TestDistBase):
"collective_global_gather.py", "global_gather", "nccl"
)
def test_global_gather_nccl_dygraph(self):
self.check_with_place(
"collective_global_gather_dygraph.py",
"global_gather",
"nccl",
static_mode="0",
eager_mode=False,
)
def test_global_gather_nccl_dygraph_eager(self):
self.check_with_place(
"collective_global_gather_dygraph.py",
......
......@@ -29,15 +29,6 @@ class TestCollectiveSelectScatterAPI(TestDistBase):
"collective_global_scatter.py", "global_scatter", "nccl"
)
def test_global_scatter_nccl_dygraph(self):
self.check_with_place(
"collective_global_scatter_dygraph.py",
"global_scatter",
"nccl",
static_mode="0",
eager_mode=False,
)
def test_global_scatter_nccl_dygraph_eager(self):
self.check_with_place(
"collective_global_scatter_dygraph.py",
......
......@@ -213,19 +213,9 @@ class TestMultipleWithGloo(unittest.TestCase):
time.sleep(3)
class TestDataParallelGradientCheck(TestMultipleGpus):
def test_multiple_gpus_dynamic(self):
self.run_mnist_2gpu(
'parallel_dygraph_gradient_check.py', eager_mode=False
)
class TestDataParallelWithPyLayer(TestMultipleGpus):
def test_parallel_dygraph_dataparallel_with_pylayer(self):
self.run_mnist_2gpu('parallel_dygraph_dataparallel_with_pylayer.py')
self.run_mnist_2gpu(
'parallel_dygraph_dataparallel_with_pylayer.py', eager_mode=False
)
self.run_mnist_2gpu(
'parallel_dygraph_dataparallel_with_pylayer.py',
allocator_strategy="naive_best_fit",
......
......@@ -133,15 +133,12 @@ class TestMultipleGpus(unittest.TestCase):
def test_hapi_multiple_gpus_static(self):
self.run_mnist_2gpu('dist_hapi_mnist_static.py')
self.run_mnist_2gpu('dist_hapi_mnist_static.py', eager_mode=False)
def test_hapi_multiple_gpus_dynamic(self):
self.run_mnist_2gpu('dist_hapi_mnist_dynamic.py')
self.run_mnist_2gpu('dist_hapi_mnist_dynamic.py', eager_mode=False)
def test_hapi_amp_static(self):
self.run_mnist_2gpu('dist_hapi_pure_fp16_static.py')
self.run_mnist_2gpu('dist_hapi_pure_fp16_static.py', eager_mode=False)
if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册