diff --git a/python/paddle/distributed/fleet/cloud_utils.py b/python/paddle/distributed/fleet/cloud_utils.py index e05196f6314509e66a86212f250ecafe13de2b82..f5a24cf48ca06d1719ff1f788d1b2c06a667f541 100644 --- a/python/paddle/distributed/fleet/cloud_utils.py +++ b/python/paddle/distributed/fleet/cloud_utils.py @@ -22,7 +22,7 @@ def get_cloud_cluster(args_node_ips, devices_per_proc, args_port=6170): """ - args_node_ips:string, device_mode:DeviceMode(IntEnum), device_per_proc:list, args_port: int + args_node_ips:string, device_mode:DeviceMode(Int), device_per_proc:list, args_port: int """ #you can automatically get ip info while using paddlecloud multi nodes mode. node_ips = os.getenv("PADDLE_TRAINERS") diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index 526d586f1c37333e1808ffdc2e278d966fffc85f..93c7d8a6ab9f609b0aadaab2bb7a1f2662d9e90a 100644 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -27,7 +27,6 @@ from contextlib import closing import socket import warnings import six -from enum import IntEnum import paddle import paddle.fluid as fluid @@ -35,7 +34,7 @@ logger = logging.getLogger("root") logger.propagate = False -class DistributeMode(IntEnum): +class DistributeMode(): """ There are various mode for fleetrun, each of them is designed for different model. """ @@ -44,7 +43,7 @@ class DistributeMode(IntEnum): PS_HETER = 2 -class DeviceMode(IntEnum): +class DeviceMode(): """ Training devices type """