From 994217ea057c8bd9d21f63a3ccccceed4947518c Mon Sep 17 00:00:00 2001 From: Dong Daxiang <35550832+guru4elephant@users.noreply.github.com> Date: Sat, 29 Aug 2020 18:53:52 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90paddle.fleet=E3=80=91fix=20api=20docum?= =?UTF-8?q?ents=20(#26777)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix api document --- python/paddle/distributed/__init__.py | 7 +++++-- python/paddle/distributed/fleet/__init__.py | 5 ++--- .../distributed/fleet/base/distributed_strategy.py | 2 ++ python/paddle/distributed/fleet/base/fleet_base.py | 2 +- .../fleet/base/meta_optimizer_factory.py | 2 -- python/paddle/distributed/fleet/base/role_maker.py | 2 +- .../distributed/fleet/meta_optimizers/__init__.py | 14 -------------- .../fleet/meta_optimizers/amp_optimizer.py | 2 -- .../fleet/meta_optimizers/dgc_optimizer.py | 2 -- .../meta_optimizers/gradient_merge_optimizer.py | 4 ---- .../fleet/meta_optimizers/lamb_optimizer.py | 2 -- .../fleet/meta_optimizers/lars_optimizer.py | 2 -- .../fleet/meta_optimizers/meta_optimizer_base.py | 2 -- .../fleet/meta_optimizers/pipeline_optimizer.py | 2 -- .../fleet/meta_optimizers/recompute_optimizer.py | 2 -- .../paddle/distributed/fleet/metrics/__init__.py | 13 +++++++++++++ .../paddle/distributed/fleet/runtime/__init__.py | 2 -- python/paddle/distributed/fleet/utils/__init__.py | 2 +- 18 files changed, 25 insertions(+), 44 deletions(-) diff --git a/python/paddle/distributed/__init__.py b/python/paddle/distributed/__init__.py index d66577102c..b7357eef7a 100644 --- a/python/paddle/distributed/__init__.py +++ b/python/paddle/distributed/__init__.py @@ -30,8 +30,11 @@ __all__ = ["spawn"] # dygraph parallel apis __all__ += [ - "init_parallel_env", "get_rank", "get_world_size", "prepare_context", - "ParallelEnv" + "init_parallel_env", + "get_rank", + "get_world_size", + "prepare_context", + "ParallelEnv", ] # collective apis diff --git a/python/paddle/distributed/fleet/__init__.py b/python/paddle/distributed/fleet/__init__.py index b080fb1755..42ac68ba1a 100644 --- a/python/paddle/distributed/fleet/__init__.py +++ b/python/paddle/distributed/fleet/__init__.py @@ -18,16 +18,15 @@ from .base.distributed_strategy import DistributedStrategy from .base.fleet_base import Fleet from .base.util_factory import UtilBase from .dataset import * +#from . import metrics __all__ = [ "DistributedStrategy", "UtilBase", "DatasetFactory", - "DatasetBase", - "InMemoryDataset", - "QueueDataset", "UserDefinedRoleMaker", "PaddleCloudRoleMaker", + "Fleet", ] fleet = Fleet() diff --git a/python/paddle/distributed/fleet/base/distributed_strategy.py b/python/paddle/distributed/fleet/base/distributed_strategy.py index 5e527ea03a..26063d1b8a 100755 --- a/python/paddle/distributed/fleet/base/distributed_strategy.py +++ b/python/paddle/distributed/fleet/base/distributed_strategy.py @@ -17,6 +17,8 @@ from paddle.distributed.fleet.proto import distributed_strategy_pb2 from paddle.fluid.framework import Variable, set_flags, core import google.protobuf.text_format +__all__ = ["DistributedStrategy"] + def get_msg_dict(msg): res_dict = {} diff --git a/python/paddle/distributed/fleet/base/fleet_base.py b/python/paddle/distributed/fleet/base/fleet_base.py index a6286bcca8..eb2cb19eae 100644 --- a/python/paddle/distributed/fleet/base/fleet_base.py +++ b/python/paddle/distributed/fleet/base/fleet_base.py @@ -22,7 +22,7 @@ from .runtime_factory import RuntimeFactory from .util_factory import UtilFactory from paddle.fluid.wrapped_decorator import wrap_decorator -__all__ = ['Fleet'] +#__all__ = ['Fleet'] def _inited_runtime_handler_(func): diff --git a/python/paddle/distributed/fleet/base/meta_optimizer_factory.py b/python/paddle/distributed/fleet/base/meta_optimizer_factory.py index 459070fcc4..f845b3fcd8 100755 --- a/python/paddle/distributed/fleet/base/meta_optimizer_factory.py +++ b/python/paddle/distributed/fleet/base/meta_optimizer_factory.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -__all__ = ["MetaOptimizerFactory"] - from ..meta_optimizers import * meta_optimizer_names = list( diff --git a/python/paddle/distributed/fleet/base/role_maker.py b/python/paddle/distributed/fleet/base/role_maker.py index 6aeeb4a289..3d159a6312 100644 --- a/python/paddle/distributed/fleet/base/role_maker.py +++ b/python/paddle/distributed/fleet/base/role_maker.py @@ -17,7 +17,7 @@ import numpy as np from multiprocessing import Process, Manager import paddle.fluid as fluid -__all__ = ['RoleMakerBase', 'UserDefinedRoleMaker', 'PaddleCloudRoleMaker'] +#__all__ = ['UserDefinedRoleMaker', 'PaddleCloudRoleMaker'] class Role: diff --git a/python/paddle/distributed/fleet/meta_optimizers/__init__.py b/python/paddle/distributed/fleet/meta_optimizers/__init__.py index 075e8b6c43..78b2b8117b 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/__init__.py +++ b/python/paddle/distributed/fleet/meta_optimizers/__init__.py @@ -22,17 +22,3 @@ from .lars_optimizer import LarsOptimizer from .async_graph_execution_optimizer import AsyncGraphExecutionOptimizer from .dgc_optimizer import DGCOptimizer from .lamb_optimizer import LambOptimizer - -__all__ = [ - 'AMPOptimizer', - 'RecomputeOptimizer', - 'GradientMergeOptimizer', - 'AsyncMetaOptimizer', - 'GraphExecutionOptimizer', - 'PipelineOptimizer', - 'LocalSGDOptimizer', - 'LarsOptimizer', - 'AsyncGraphExecutionOptimizer', - 'DGCOptimizer', - 'LambOptimizer', -] diff --git a/python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py index 66db14209b..b1952276e4 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py @@ -14,8 +14,6 @@ import paddle.fluid.contrib.mixed_precision as mixed_precision from .meta_optimizer_base import MetaOptimizerBase -__all__ = ["AMPOptimizer"] - class AMPOptimizer(MetaOptimizerBase): def __init__(self, optimizer): diff --git a/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py index f34786f9dc..f1c6defc5c 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py @@ -15,8 +15,6 @@ from paddle.fluid.optimizer import Momentum, DGCMomentumOptimizer from .meta_optimizer_base import MetaOptimizerBase import logging -__all__ = ["DGCOptimizer"] - class DGCOptimizer(MetaOptimizerBase): def __init__(self, optimizer): diff --git a/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py index bd52179a35..7db79ad7b5 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py @@ -14,10 +14,6 @@ from paddle.fluid.optimizer import GradientMergeOptimizer as GM from .meta_optimizer_base import MetaOptimizerBase -__all__ = ["GradientMergeOptimizer"] - -# amp + gradient merge + lamb - class GradientMergeOptimizer(MetaOptimizerBase): def __init__(self, optimizer): diff --git a/python/paddle/distributed/fleet/meta_optimizers/lamb_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/lamb_optimizer.py index 7e08a02eb1..9fa29c4078 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/lamb_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/lamb_optimizer.py @@ -16,8 +16,6 @@ from paddle.fluid.optimizer import LambOptimizer as LAMB from .meta_optimizer_base import MetaOptimizerBase import logging -__all__ = ["LambOptimizer"] - class LambOptimizer(MetaOptimizerBase): def __init__(self, optimizer): diff --git a/python/paddle/distributed/fleet/meta_optimizers/lars_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/lars_optimizer.py index 09c418fa79..a7b856ff5b 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/lars_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/lars_optimizer.py @@ -15,8 +15,6 @@ from paddle.fluid.optimizer import Momentum, LarsMomentumOptimizer from .meta_optimizer_base import MetaOptimizerBase import logging -__all__ = ["LarsOptimizer"] - class LarsOptimizer(MetaOptimizerBase): def __init__(self, optimizer): diff --git a/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py b/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py index 12a4d90434..073148e11a 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py +++ b/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -__all__ = ["MetaOptimizerBase"] - from paddle.fluid.optimizer import Optimizer diff --git a/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py index fe9221307c..d5a45e2b4e 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py @@ -20,8 +20,6 @@ from paddle.fluid.optimizer import PipelineOptimizer as PO from .meta_optimizer_base import MetaOptimizerBase from .common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY, CollectiveHelper, is_update_op, is_loss_grad_op, is_backward_op, is_optimizer_op -__all__ = ["PipelineOptimizer"] - class PipelineHelper(CollectiveHelper): def __init__(self, role_maker, nrings=1, wait_port='6174'): diff --git a/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py index 45130b4471..3eb3ca6127 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py @@ -14,8 +14,6 @@ from paddle.fluid.optimizer import RecomputeOptimizer as RO from .meta_optimizer_base import MetaOptimizerBase -__all__ = ["RecomputeOptimizer"] - class RecomputeOptimizer(MetaOptimizerBase): def __init__(self, optimizer): diff --git a/python/paddle/distributed/fleet/metrics/__init__.py b/python/paddle/distributed/fleet/metrics/__init__.py index abf198b97e..bc30c06378 100644 --- a/python/paddle/distributed/fleet/metrics/__init__.py +++ b/python/paddle/distributed/fleet/metrics/__init__.py @@ -11,3 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from .metric import * + +__all__ = [ + "sum", + "max", + "min", + "auc", + "mae", + "rmse", + "mse", + "acc", +] diff --git a/python/paddle/distributed/fleet/runtime/__init__.py b/python/paddle/distributed/fleet/runtime/__init__.py index a796a73fc9..cf718b199e 100644 --- a/python/paddle/distributed/fleet/runtime/__init__.py +++ b/python/paddle/distributed/fleet/runtime/__init__.py @@ -14,5 +14,3 @@ from .collective_runtime import CollectiveRuntime from .parameter_server_runtime import ParameterServerRuntime - -__all__ = ["CollectiveRuntime," "ParameterServerRuntime", ] diff --git a/python/paddle/distributed/fleet/utils/__init__.py b/python/paddle/distributed/fleet/utils/__init__.py index 212308159a..f1911408c8 100644 --- a/python/paddle/distributed/fleet/utils/__init__.py +++ b/python/paddle/distributed/fleet/utils/__init__.py @@ -15,4 +15,4 @@ from .fs import * from .http_server import KVHandler, KVHTTPServer, KVServer -__all__ = ['KVHandler', 'KVHTTPServer', 'KVServer'] + fs.__all__ +#__all__ = ['KVHandler', 'KVHTTPServer', 'KVServer'] + fs.__all__ -- GitLab