From 09eaa7d7006ea5e727d0b92d0165ad69d8939663 Mon Sep 17 00:00:00 2001 From: Haohongxiang <86215757+haohongxiang@users.noreply.github.com> Date: Wed, 15 Sep 2021 04:12:06 -0500 Subject: [PATCH] fix bugs of PR 35401 (#35746) --- python/paddle/distributed/fleet/utils/hybrid_parallel_util.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py index 94dd29f746..0f5c24f022 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py @@ -19,7 +19,6 @@ import warnings from paddle import framework import paddle from paddle.fluid import core -import paddle.distributed as dist from paddle.fluid.dygraph.parallel import _split_tensors, sync_params_buffers, build_groups from collections import OrderedDict from .log_util import logger @@ -45,7 +44,7 @@ def _apply_collective_grads(parameters, comm_group): for coalesced_grad, _, _ in coalesced_grads_and_vars: # need to div nranks - nranks = dist.get_world_size( + nranks = paddle.distributed.get_world_size( ) if comm_group is None else comm_group.nranks div_factor = paddle.to_tensor(nranks, dtype=coalesced_grad.dtype) paddle.fluid.framework._dygraph_tracer().trace_op( -- GitLab