未验证 提交 4b803a4a 编写于 作者: W wanghuancoder 提交者: GitHub

delete FLAGS_enable_eager_mode (#49036)

* delete FLAGS_enable_eager_mode
上级 2afa7b93
...@@ -462,8 +462,6 @@ class EagerPyLayerContext: ...@@ -462,8 +462,6 @@ class EagerPyLayerContext:
Examples: Examples:
.. code-block:: python .. code-block:: python
import os
os.environ['FLAGS_enable_eager_mode'] = '1'
import paddle import paddle
from paddle.autograd import PyLayer from paddle.autograd import PyLayer
import numpy as np import numpy as np
...@@ -503,8 +501,6 @@ class EagerPyLayerContext: ...@@ -503,8 +501,6 @@ class EagerPyLayerContext:
Examples: Examples:
.. code-block:: python .. code-block:: python
import os
os.environ['FLAGS_enable_eager_mode'] = '1'
import paddle import paddle
from paddle.autograd import PyLayer from paddle.autograd import PyLayer
import numpy as np import numpy as np
......
...@@ -71,7 +71,7 @@ ZERO_VAR_SUFFIX = core.kZeroVarSuffix() ...@@ -71,7 +71,7 @@ ZERO_VAR_SUFFIX = core.kZeroVarSuffix()
CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName() CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName()
_dygraph_tracer_ = None _dygraph_tracer_ = None
_in_eager_mode_ = os.environ.get('FLAGS_enable_eager_mode', '1') == '1' _in_eager_mode_ = True
_global_expected_place_ = None _global_expected_place_ = None
_current_device = None _current_device = None
global_prog_seed = 0 global_prog_seed = 0
......
...@@ -57,9 +57,6 @@ def start_local_trainers( ...@@ -57,9 +57,6 @@ def start_local_trainers(
"PADDLE_DISTRI_CUSTOM_DEVICE_TYPE": "custom_cpu", "PADDLE_DISTRI_CUSTOM_DEVICE_TYPE": "custom_cpu",
} }
if not eager_mode:
proc_env["FLAGS_enable_eager_mode"] = "%d" % 0
current_env.update(proc_env) current_env.update(proc_env)
print("trainer proc env:{}".format(current_env)) print("trainer proc env:{}".format(current_env))
......
...@@ -517,7 +517,7 @@ endforeach() ...@@ -517,7 +517,7 @@ endforeach()
set(TEST_EAGER_OPS test_jit_save_load test_translated_layer) set(TEST_EAGER_OPS test_jit_save_load test_translated_layer)
foreach(TEST_OP ${TEST_EAGER_OPS}) foreach(TEST_OP ${TEST_EAGER_OPS})
list(REMOVE_ITEM TEST_OPS ${TEST_OP}) list(REMOVE_ITEM TEST_OPS ${TEST_OP})
py_test_modules(${TEST_OP} MODULES ${TEST_OP} ENVS FLAGS_enable_eager_mode=1) py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach() endforeach()
if((NOT WITH_GPU) if((NOT WITH_GPU)
...@@ -1256,10 +1256,6 @@ py_test_modules( ...@@ -1256,10 +1256,6 @@ py_test_modules(
set_tests_properties(test_add_reader_dependency_for_interpretercore set_tests_properties(test_add_reader_dependency_for_interpretercore
PROPERTIES TIMEOUT 120) PROPERTIES TIMEOUT 120)
py_test_modules(test_renorm_op_without_eager MODULES test_renorm_op ENVS
FLAGS_enable_eager_mode=0)
set_tests_properties(test_renorm_op_without_eager PROPERTIES TIMEOUT 120)
py_test_modules( py_test_modules(
test_eager_deletion_padding_rnn_for_interpretercore MODULES test_eager_deletion_padding_rnn_for_interpretercore MODULES
test_eager_deletion_padding_rnn ENVS FLAGS_CONVERT_GRAPH_TO_PROGRAM=true) test_eager_deletion_padding_rnn ENVS FLAGS_CONVERT_GRAPH_TO_PROGRAM=true)
......
...@@ -51,20 +51,6 @@ if(WITH_NCCL) ...@@ -51,20 +51,6 @@ if(WITH_NCCL)
"120") "120")
endif() endif()
endif() endif()
if(WITH_NCCL)
if((WITH_GPU) AND LOCAL_ALL_PLAT)
bash_test_modules(
test_dygraph_sharding_stage3
START_BASH
../../dist_test.sh
LABELS
"RUN_TYPE=DIST"
ENVS
"PADDLE_DIST_UT_PORT=21202;http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python"
)
set_tests_properties(test_dygraph_sharding_stage3 PROPERTIES TIMEOUT "350")
endif()
endif()
if(WITH_NCCL) if(WITH_NCCL)
if(${NCCL_VERSION} VERSION_GREATER_EQUAL 2212) if(${NCCL_VERSION} VERSION_GREATER_EQUAL 2212)
if((WITH_GPU) AND LOCAL_ALL_PLAT) if((WITH_GPU) AND LOCAL_ALL_PLAT)
...@@ -401,19 +387,6 @@ if(LOCAL_ALL_ARCH AND (LINUX OR WIN32)) ...@@ -401,19 +387,6 @@ if(LOCAL_ALL_ARCH AND (LINUX OR WIN32))
test_fleet_amp_init MODULES test_fleet_amp_init ENVS test_fleet_amp_init MODULES test_fleet_amp_init ENVS
"http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python") "http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python")
endif() endif()
if(LOCAL_ALL_ARCH AND LOCAL_ALL_PLAT)
bash_test_modules(
test_dygraph_sharding_optimizer_stage2
START_BASH
../../dist_test.sh
LABELS
"RUN_TYPE=DIST"
ENVS
"PADDLE_DIST_UT_PORT=21238;http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python"
)
set_tests_properties(test_dygraph_sharding_optimizer_stage2 PROPERTIES TIMEOUT
"120")
endif()
if(LOCAL_ALL_ARCH AND (LINUX OR WIN32)) if(LOCAL_ALL_ARCH AND (LINUX OR WIN32))
py_test_modules( py_test_modules(
test_fleet_meta_optimizer_base MODULES test_fleet_meta_optimizer_base ENVS test_fleet_meta_optimizer_base MODULES test_fleet_meta_optimizer_base ENVS
...@@ -667,18 +640,6 @@ if((WITH_GPU ...@@ -667,18 +640,6 @@ if((WITH_GPU
test_fleet_recompute_meta_optimizer ENVS test_fleet_recompute_meta_optimizer ENVS
"http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python") "http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python")
endif() endif()
if(LOCAL_ALL_ARCH AND LOCAL_ALL_PLAT)
bash_test_modules(
test_dygraph_group_sharded_api
START_BASH
../../dist_test.sh
LABELS
"RUN_TYPE=DIST"
ENVS
"PADDLE_DIST_UT_PORT=21266;http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python"
)
set_tests_properties(test_dygraph_group_sharded_api PROPERTIES TIMEOUT "120")
endif()
if(LOCAL_ALL_ARCH AND (LINUX OR WIN32)) if(LOCAL_ALL_ARCH AND (LINUX OR WIN32))
py_test_modules( py_test_modules(
test_fleet_private_function MODULES test_fleet_private_function ENVS test_fleet_private_function MODULES test_fleet_private_function ENVS
...@@ -760,14 +721,6 @@ if((WITH_GPU OR WITH_ROCM) AND LOCAL_ALL_PLAT) ...@@ -760,14 +721,6 @@ if((WITH_GPU OR WITH_ROCM) AND LOCAL_ALL_PLAT)
set_tests_properties(test_parallel_dygraph_sync_batch_norm set_tests_properties(test_parallel_dygraph_sync_batch_norm
PROPERTIES TIMEOUT "120" LABELS "RUN_TYPE=DIST") PROPERTIES TIMEOUT "120" LABELS "RUN_TYPE=DIST")
endif() endif()
if((WITH_GPU OR WITH_ROCM) AND LOCAL_ALL_PLAT)
py_test_modules(
test_imperative_auto_mixed_precision MODULES
test_imperative_auto_mixed_precision ENVS
"http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python")
set_tests_properties(test_imperative_auto_mixed_precision
PROPERTIES TIMEOUT "300" LABELS "RUN_TYPE=DIST")
endif()
if((WITH_GPU OR WITH_ROCM) AND LOCAL_ALL_PLAT) if((WITH_GPU OR WITH_ROCM) AND LOCAL_ALL_PLAT)
py_test_modules( py_test_modules(
test_imperative_auto_mixed_precision_for_eager MODULES test_imperative_auto_mixed_precision_for_eager MODULES
...@@ -781,11 +734,6 @@ if((WITH_GPU OR WITH_ROCM) AND LOCAL_ALL_PLAT) ...@@ -781,11 +734,6 @@ if((WITH_GPU OR WITH_ROCM) AND LOCAL_ALL_PLAT)
test_mixed_precision MODULES test_mixed_precision ENVS test_mixed_precision MODULES test_mixed_precision ENVS
"http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python") "http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python")
endif() endif()
if((WITH_GPU OR WITH_ROCM) AND LOCAL_ALL_PLAT)
py_test_modules(
test_dygraph_recompute MODULES test_dygraph_recompute ENVS
"http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python")
endif()
if((WITH_GPU OR WITH_ROCM) AND LOCAL_ALL_PLAT) if((WITH_GPU OR WITH_ROCM) AND LOCAL_ALL_PLAT)
py_test_modules( py_test_modules(
test_dygraph_recompute_for_eager MODULES test_dygraph_recompute_for_eager test_dygraph_recompute_for_eager MODULES test_dygraph_recompute_for_eager
......
...@@ -33,6 +33,5 @@ class TestDistSaveLoad(unittest.TestCase): ...@@ -33,6 +33,5 @@ class TestDistSaveLoad(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
unittest.main() unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['FLAGS_enable_eager_mode'] = '0'
import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestDygraphGroupSharded(TestMultipleGpus):
# check group sharded logic as well as the accuracy with single mode
def test_dygraph_group_sharded(self):
self.run_mnist_2gpu('dygraph_group_sharded_api.py', eager_mode=False)
if __name__ == "__main__":
unittest.main()
...@@ -12,10 +12,6 @@ ...@@ -12,10 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
os.environ['FLAGS_enable_eager_mode'] = '1'
import unittest import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus from test_parallel_dygraph_dataparallel import TestMultipleGpus
......
...@@ -12,10 +12,6 @@ ...@@ -12,10 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
os.environ['FLAGS_enable_eager_mode'] = '1'
import random import random
import unittest import unittest
......
...@@ -44,6 +44,5 @@ class TestSingleCard(unittest.TestCase): ...@@ -44,6 +44,5 @@ class TestSingleCard(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
unittest.main() unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestDygraphShardingOptimizerStage2(TestMultipleGpus):
# check sharding logic as well as the accuracy with single mode
def test_dygraph_sharding_optimizer_stage2(self):
self.run_mnist_2gpu(
'dygraph_sharding_optimizer_stage2.py', eager_mode=False
)
if __name__ == "__main__":
unittest.main()
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import unittest import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus from test_parallel_dygraph_dataparallel import TestMultipleGpus
...@@ -23,18 +22,13 @@ class TestDygraphShardingStage2(TestMultipleGpus): ...@@ -23,18 +22,13 @@ class TestDygraphShardingStage2(TestMultipleGpus):
# check sharding logic as well as the accuracy with single mode # check sharding logic as well as the accuracy with single mode
def test_dygraph_sharding_stage2(self): def test_dygraph_sharding_stage2(self):
self.run_mnist_2gpu('dygraph_group_sharded_stage2.py') self.run_mnist_2gpu('dygraph_group_sharded_stage2.py')
self.run_mnist_2gpu('dygraph_sharding_stage2.py', eager_mode=False)
def test_dygraph_sharding_stage2_offload(self): def test_dygraph_sharding_stage2_offload(self):
self.run_mnist_2gpu('dygraph_group_sharded_stage2_offload.py') self.run_mnist_2gpu('dygraph_group_sharded_stage2_offload.py')
self.run_mnist_2gpu(
'dygraph_sharding_stage2_offload.py', eager_mode=False
)
def test_dygraph_sharding_stage2_with_comm_overlap(self): def test_dygraph_sharding_stage2_with_comm_overlap(self):
self.run_mnist_2gpu('dygraph_group_sharded_stage2_comm_overlap.py') self.run_mnist_2gpu('dygraph_group_sharded_stage2_comm_overlap.py')
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['FLAGS_enable_eager_mode'] = '0'
import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestDygraphShardingStage3(TestMultipleGpus):
# check sharding logic as well as the accuracy with single mode
def test_dygraph_sharding_stage3(self):
self.run_mnist_2gpu('dygraph_sharding_stage3.py', eager_mode=False)
def test_dygraph_sharding_stage3_offload(self):
self.run_mnist_2gpu(
'dygraph_sharding_stage3_offload.py', eager_mode=False
)
if __name__ == "__main__":
unittest.main()
...@@ -12,11 +12,6 @@ ...@@ -12,11 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
os.environ['FLAGS_enable_eager_mode'] = '1'
import os
import unittest import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus from test_parallel_dygraph_dataparallel import TestMultipleGpus
...@@ -33,5 +28,4 @@ class TestDygraphShardingStage3(TestMultipleGpus): ...@@ -33,5 +28,4 @@ class TestDygraphShardingStage3(TestMultipleGpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -13,9 +13,6 @@ ...@@ -13,9 +13,6 @@
# limitations under the License. # limitations under the License.
import os import os
os.environ['FLAGS_enable_eager_mode'] = '1'
import tempfile import tempfile
import unittest import unittest
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import unittest import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus from test_parallel_dygraph_dataparallel import TestMultipleGpus
...@@ -25,5 +24,4 @@ class TestParallelClassCenterSample(TestMultipleGpus): ...@@ -25,5 +24,4 @@ class TestParallelClassCenterSample(TestMultipleGpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import unittest import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus from test_parallel_dygraph_dataparallel import TestMultipleGpus
...@@ -25,5 +24,4 @@ class TestModelParallelLayer(TestMultipleGpus): ...@@ -25,5 +24,4 @@ class TestModelParallelLayer(TestMultipleGpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import unittest import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus from test_parallel_dygraph_dataparallel import TestMultipleGpus
...@@ -27,5 +26,4 @@ class TestDataParallelLayer(TestMultipleGpus): ...@@ -27,5 +26,4 @@ class TestDataParallelLayer(TestMultipleGpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -73,5 +73,4 @@ class TestHybridPipeParallel(TestMultipleGpus): ...@@ -73,5 +73,4 @@ class TestHybridPipeParallel(TestMultipleGpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import unittest import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus from test_parallel_dygraph_dataparallel import TestMultipleGpus
...@@ -34,5 +33,4 @@ class TestHybridPipeParallelWithVirtualStage(TestMultipleGpus): ...@@ -34,5 +33,4 @@ class TestHybridPipeParallelWithVirtualStage(TestMultipleGpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -78,9 +78,6 @@ def start_local_trainers( ...@@ -78,9 +78,6 @@ def start_local_trainers(
"PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()), "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()),
} }
if not eager_mode:
proc_env["FLAGS_enable_eager_mode"] = "%d" % 0
current_env.update(proc_env) current_env.update(proc_env)
print("trainer proc env:{}".format(current_env)) print("trainer proc env:{}".format(current_env))
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import unittest import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus from test_parallel_dygraph_dataparallel import TestMultipleGpus
...@@ -29,5 +28,4 @@ class TestHybridParallel(TestMultipleGpus): ...@@ -29,5 +28,4 @@ class TestHybridParallel(TestMultipleGpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import unittest import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus from test_parallel_dygraph_dataparallel import TestMultipleGpus
...@@ -41,5 +40,4 @@ class TestHybridParallel(TestMultipleGpus): ...@@ -41,5 +40,4 @@ class TestHybridParallel(TestMultipleGpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import unittest import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus from test_parallel_dygraph_dataparallel import TestMultipleGpus
...@@ -27,5 +26,4 @@ class TestParallelMarginSoftmaxWithCrossEntropy(TestMultipleGpus): ...@@ -27,5 +26,4 @@ class TestParallelMarginSoftmaxWithCrossEntropy(TestMultipleGpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -4,7 +4,6 @@ test_fleet_static_mp_layers,LINUX;WIN32,,,,test_runner.py,2,,http_proxy=;https_p ...@@ -4,7 +4,6 @@ test_fleet_static_mp_layers,LINUX;WIN32,,,,test_runner.py,2,,http_proxy=;https_p
test_dgc_op,,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,WITH_DGC test_dgc_op,,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,WITH_DGC
test_dgc_optimizer,,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,WITH_DGC test_dgc_optimizer,,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,WITH_DGC
test_parallel_margin_cross_entropy,,GPU,120,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,WITH_NCCL test_parallel_margin_cross_entropy,,GPU,120,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,WITH_NCCL
test_dygraph_sharding_stage3,,GPU,350,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,WITH_NCCL
test_parallel_dygraph_transformer,,GPU,,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,WITH_NCCL;${NCCL_VERSION} VERSION_GREATER_EQUAL 2212 test_parallel_dygraph_transformer,,GPU,,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,WITH_NCCL;${NCCL_VERSION} VERSION_GREATER_EQUAL 2212
test_parallel_dygraph_transformer,,ROCM,,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_parallel_dygraph_transformer,,ROCM,,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_fleet_fp16_allreduce_meta_optimizer,LINUX;WIN32,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_fleet_fp16_allreduce_meta_optimizer,LINUX;WIN32,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
...@@ -34,7 +33,6 @@ test_communicator_sync,,,,,test_runner.py,2,,FLAGS_communicator_send_queue_size= ...@@ -34,7 +33,6 @@ test_communicator_sync,,,,,test_runner.py,2,,FLAGS_communicator_send_queue_size=
test_fleet_pipeline_meta_optimizer,,GPU;XPU;ASCEND;ASCEND_CL,,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_fleet_pipeline_meta_optimizer,,GPU;XPU;ASCEND;ASCEND_CL,,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_fleet_gradient_merge_meta_optimizer,,GPU;XPU;ASCEND;ASCEND_CL,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_fleet_gradient_merge_meta_optimizer,,GPU;XPU;ASCEND;ASCEND_CL,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_fleet_amp_init,LINUX;WIN32,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_fleet_amp_init,LINUX;WIN32,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_dygraph_sharding_optimizer_stage2,,,120,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_fleet_meta_optimizer_base,LINUX;WIN32,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_fleet_meta_optimizer_base,LINUX;WIN32,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_fleet_raw_program_meta_optimizer,,GPU;XPU;ASCEND;ASCEND_CL,,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_fleet_raw_program_meta_optimizer,,GPU;XPU;ASCEND;ASCEND_CL,,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_parallel_dygraph_sharding_parallel,,,120,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_parallel_dygraph_sharding_parallel,,,120,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
...@@ -57,7 +55,6 @@ test_parallel_dygraph_sparse_embedding_over_height,,ROCM,350,DIST,../../dist_tes ...@@ -57,7 +55,6 @@ test_parallel_dygraph_sparse_embedding_over_height,,ROCM,350,DIST,../../dist_tes
test_distributed_strategy,LINUX;APPLE,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_distributed_strategy,LINUX;APPLE,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_auto_parallel_parallelizer,,,120,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_auto_parallel_parallelizer,,,120,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_fleet_recompute_meta_optimizer,LINUX;WIN32,GPU;XPU;ASCEND;ASCEND_CL,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_fleet_recompute_meta_optimizer,LINUX;WIN32,GPU;XPU;ASCEND;ASCEND_CL,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_dygraph_group_sharded_api,,,120,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_fleet_private_function,LINUX;WIN32,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_fleet_private_function,LINUX;WIN32,,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_new_group,,GPU;XPU;ASCEND;ASCEND_CL,,DIST,test_new_group.sh,2,,http_proxy=;https_proxy=, test_new_group,,GPU;XPU;ASCEND;ASCEND_CL,,DIST,test_new_group.sh,2,,http_proxy=;https_proxy=,
test_c_comm_init_op,LINUX,GPU;XPU;ASCEND;ASCEND_CL,120,DIST,test_c_comm_init_op.sh,2,,http_proxy=;https_proxy=, test_c_comm_init_op,LINUX,GPU;XPU;ASCEND;ASCEND_CL,120,DIST,test_c_comm_init_op.sh,2,,http_proxy=;https_proxy=,
...@@ -65,10 +62,8 @@ test_ir_pass_pipeline,,,120,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=; ...@@ -65,10 +62,8 @@ test_ir_pass_pipeline,,,120,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;
test_parallel_dygraph_mnist,,GPU;ROCM,200,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_parallel_dygraph_mnist,,GPU;ROCM,200,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_parallel_dygraph_se_resnext,,GPU;ROCM,200,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_parallel_dygraph_se_resnext,,GPU;ROCM,200,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_parallel_dygraph_sync_batch_norm,,GPU;ROCM,120,DIST,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_parallel_dygraph_sync_batch_norm,,GPU;ROCM,120,DIST,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_imperative_auto_mixed_precision,,GPU;ROCM,300,DIST,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_imperative_auto_mixed_precision_for_eager,,GPU;ROCM,300,DIST,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_imperative_auto_mixed_precision_for_eager,,GPU;ROCM,300,DIST,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_mixed_precision,,GPU;ROCM,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_mixed_precision,,GPU;ROCM,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_dygraph_recompute,,GPU;ROCM,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_dygraph_recompute_for_eager,,GPU;ROCM,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../.., test_dygraph_recompute_for_eager,,GPU;ROCM,,,test_runner.py,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,
test_dist_mnist_dgc_nccl,,,,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,WITH_NCCL OR WITH_RCCL;WITH_DGC test_dist_mnist_dgc_nccl,,,,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,WITH_NCCL OR WITH_RCCL;WITH_DGC
test_dist_se_resnext_dgc,,,,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,WITH_NCCL OR WITH_RCCL;WITH_DGC test_dist_se_resnext_dgc,,,,DIST,../../dist_test.sh,2,,http_proxy=;https_proxy=;PYTHONPATH=../..,WITH_NCCL OR WITH_RCCL;WITH_DGC
......
...@@ -77,10 +77,6 @@ class TestDistBase(unittest.TestCase): ...@@ -77,10 +77,6 @@ class TestDistBase(unittest.TestCase):
required_envs["GLOG_logtostderr"] = "1" required_envs["GLOG_logtostderr"] = "1"
required_envs["GLOO_LOG_LEVEL"] = "TRACE" required_envs["GLOO_LOG_LEVEL"] = "TRACE"
if eager_mode:
required_envs["FLAGS_enable_eager_mode"] = "%d" % 1
else:
required_envs["FLAGS_enable_eager_mode"] = "%d" % 0
self._run_cluster(model_file, required_envs) self._run_cluster(model_file, required_envs)
def _run_cluster(self, model_file, envs): def _run_cluster(self, model_file, envs):
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import unittest import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus from test_parallel_dygraph_dataparallel import TestMultipleGpus
...@@ -24,5 +23,4 @@ class TestCollectiveAllToAllSingle(TestMultipleGpus): ...@@ -24,5 +23,4 @@ class TestCollectiveAllToAllSingle(TestMultipleGpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import unittest import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus from test_parallel_dygraph_dataparallel import TestMultipleGpus
...@@ -24,5 +23,4 @@ class TestCollectiveBatchIsendIrecv(TestMultipleGpus): ...@@ -24,5 +23,4 @@ class TestCollectiveBatchIsendIrecv(TestMultipleGpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import unittest import unittest
from test_parallel_dygraph_dataparallel import TestMultipleGpus from test_parallel_dygraph_dataparallel import TestMultipleGpus
...@@ -24,5 +23,4 @@ class TestCollectiveReduceScatter(TestMultipleGpus): ...@@ -24,5 +23,4 @@ class TestCollectiveReduceScatter(TestMultipleGpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -4,7 +4,7 @@ file( ...@@ -4,7 +4,7 @@ file(
"test_*.py") "test_*.py")
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
set(GC_ENVS FLAGS_eager_delete_tensor_gb=0.0) set(GC_ENVS FLAGS_eager_delete_tensor_gb=0.0)
set(DY2ST_EAGER_TEST_ENVS ${GC_ENVS} FLAGS_enable_eager_mode=1) set(DY2ST_EAGER_TEST_ENVS ${GC_ENVS})
set(TEST_EAGER_OPS set(TEST_EAGER_OPS
test_bmn test_bmn
...@@ -35,7 +35,7 @@ list(REMOVE_ITEM TEST_OPS test_lac) ...@@ -35,7 +35,7 @@ list(REMOVE_ITEM TEST_OPS test_lac)
# NOTE(Aurelius84): In case of Windows CI, if open ON_INFER, RWLOCK of Scope # NOTE(Aurelius84): In case of Windows CI, if open ON_INFER, RWLOCK of Scope
# will be removed and will cause some random failed in multi-thread. # will be removed and will cause some random failed in multi-thread.
if(WITH_PYTHON) if(WITH_PYTHON)
py_test_modules(test_lac MODULES test_lac ENVS FLAGS_enable_eager_mode=1) py_test_modules(test_lac MODULES test_lac)
set_tests_properties(test_lac PROPERTIES TIMEOUT 120) set_tests_properties(test_lac PROPERTIES TIMEOUT 120)
endif() endif()
......
...@@ -12,10 +12,8 @@ ...@@ -12,10 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
os.environ["FLAGS_enable_eager_mode"] = "0"
import math import math
import os
import tempfile import tempfile
import time import time
import unittest import unittest
......
...@@ -327,11 +327,6 @@ class TestDistBase(unittest.TestCase): ...@@ -327,11 +327,6 @@ class TestDistBase(unittest.TestCase):
'NVIDIA_TF32_OVERRIDE', '' 'NVIDIA_TF32_OVERRIDE', ''
) )
if eager_mode:
required_envs["FLAGS_enable_eager_mode"] = "%d" % 1
else:
required_envs["FLAGS_enable_eager_mode"] = "%d" % 0
tr0_out, tr1_out, pid0, pid1 = self._run_cluster( tr0_out, tr1_out, pid0, pid1 = self._run_cluster(
model_file, required_envs model_file, required_envs
) )
......
...@@ -124,9 +124,6 @@ def start_local_trainers( ...@@ -124,9 +124,6 @@ def start_local_trainers(
"PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()), "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()),
} }
if not eager_mode:
proc_env["FLAGS_enable_eager_mode"] = "%d" % 0
proc_env["FLAGS_allocator_strategy"] = allocator_strategy proc_env["FLAGS_allocator_strategy"] = allocator_strategy
if allocator_strategy == "auto_growth": if allocator_strategy == "auto_growth":
proc_env["FLAGS_fraction_of_gpu_memory_to_use"] = "0.1" proc_env["FLAGS_fraction_of_gpu_memory_to_use"] = "0.1"
...@@ -241,5 +238,4 @@ class TestGradientCheckInEagerMode(TestMultipleGpus): ...@@ -241,5 +238,4 @@ class TestGradientCheckInEagerMode(TestMultipleGpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -80,9 +80,6 @@ def start_local_trainers( ...@@ -80,9 +80,6 @@ def start_local_trainers(
"PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()), "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()),
} }
if not eager_mode:
proc_env["FLAGS_enable_eager_mode"] = "%d" % 0
current_env.update(proc_env) current_env.update(proc_env)
print("trainer proc env:{}".format(current_env)) print("trainer proc env:{}".format(current_env))
...@@ -152,7 +149,6 @@ class TestGradientCheckInEagerMode(TestMultipleXpus): ...@@ -152,7 +149,6 @@ class TestGradientCheckInEagerMode(TestMultipleXpus):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
os.environ["BKCL_PCIE_RING"] = "1" os.environ["BKCL_PCIE_RING"] = "1"
os.environ["BKCL_CCIX_RING"] = "0" os.environ["BKCL_CCIX_RING"] = "0"
unittest.main() unittest.main()
...@@ -1558,8 +1558,6 @@ class TracedLayer: ...@@ -1558,8 +1558,6 @@ class TracedLayer:
Examples: Examples:
.. code-block:: python: .. code-block:: python:
import os
os.environ['FLAGS_enable_eager_mode'] = '0'
import paddle import paddle
class ExampleLayer(paddle.nn.Layer): class ExampleLayer(paddle.nn.Layer):
...@@ -1610,8 +1608,6 @@ class TracedLayer: ...@@ -1610,8 +1608,6 @@ class TracedLayer:
Examples: Examples:
.. code-block:: python: .. code-block:: python:
import os
os.environ['FLAGS_enable_eager_mode'] = '0'
import paddle import paddle
class ExampleLayer(paddle.nn.Layer): class ExampleLayer(paddle.nn.Layer):
...@@ -1716,8 +1712,6 @@ class TracedLayer: ...@@ -1716,8 +1712,6 @@ class TracedLayer:
Examples: Examples:
.. code-block:: python: .. code-block:: python:
import os
os.environ['FLAGS_enable_eager_mode'] = '0'
import numpy as np import numpy as np
import paddle import paddle
......
...@@ -78,9 +78,6 @@ def start_local_trainers( ...@@ -78,9 +78,6 @@ def start_local_trainers(
"PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()), "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()),
} }
if not eager_mode:
proc_env["FLAGS_enable_eager_mode"] = "%d" % 0
current_env.update(proc_env) current_env.update(proc_env)
print("trainer proc env:{}".format(current_env)) print("trainer proc env:{}".format(current_env))
...@@ -148,5 +145,4 @@ class TestMultipleGpus(unittest.TestCase): ...@@ -148,5 +145,4 @@ class TestMultipleGpus(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
os.environ["FLAGS_enable_eager_mode"] = "1"
unittest.main() unittest.main()
...@@ -47,7 +47,6 @@ def classify_cases_by_mem(rootPath): ...@@ -47,7 +47,6 @@ def classify_cases_by_mem(rootPath):
case_exec_200 = [ case_exec_200 = [
'test_post_training_quantization_mnist', 'test_post_training_quantization_mnist',
'test_imperative_auto_mixed_precision',
'test_trt_dynamic_shape_ernie_fp16_ser_deser', 'test_trt_dynamic_shape_ernie_fp16_ser_deser',
'test_trt_dynamic_shape_ernie', 'test_trt_dynamic_shape_ernie',
'test_layer_norm_op', 'test_layer_norm_op',
......
...@@ -1033,7 +1033,6 @@ FOURTH_HIGH_PARALLEL_JOB_NEW = [ ...@@ -1033,7 +1033,6 @@ FOURTH_HIGH_PARALLEL_JOB_NEW = [
'test_elementwise_div_grad_grad', 'test_elementwise_div_grad_grad',
'test_minus_op', 'test_minus_op',
'test_shard_index_op', 'test_shard_index_op',
'test_dygraph_recompute',
'test_momentum_op', 'test_momentum_op',
'test_modelaverage', 'test_modelaverage',
'test_compare_reduce_op', 'test_compare_reduce_op',
...@@ -2406,7 +2405,6 @@ TETRAD_PARALLEL_JOB = [ ...@@ -2406,7 +2405,6 @@ TETRAD_PARALLEL_JOB = [
'test_matrix_rank_op', 'test_matrix_rank_op',
'test_margin_cross_entropy_op', 'test_margin_cross_entropy_op',
'test_elementwise_pow_op', 'test_elementwise_pow_op',
'test_dygraph_recompute',
'test_qr_op', 'test_qr_op',
'test_dygraph_spectral_norm', 'test_dygraph_spectral_norm',
'test_cumsum_op', 'test_cumsum_op',
......
...@@ -239,7 +239,6 @@ long_time_test="^test_gru_op$|\ ...@@ -239,7 +239,6 @@ long_time_test="^test_gru_op$|\
^test_sequence_conv$|\ ^test_sequence_conv$|\
^test_sgd_op$|\ ^test_sgd_op$|\
^test_transformer$|\ ^test_transformer$|\
^test_imperative_auto_mixed_precision$|\
^test_trt_matmul_quant_dequant$|\ ^test_trt_matmul_quant_dequant$|\
^test_strided_slice_op$" ^test_strided_slice_op$"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册