未验证 提交 147fbfe0 编写于 作者: S sneaxiy 提交者: GitHub

remove useless logs (#55729)

上级 011f97bc
......@@ -23,10 +23,8 @@ from paddle.distributed import fleet
from ...utils.log_util import logger
from ...utils.tensor_fusion_helper import fused_parameters
g_shard_use_reduce = int(os.environ.get("FLAGS_shard_use_reduce", 0))
logger.info(f"g_shard_use_reduce {g_shard_use_reduce}")
g_shard_norm_align_dp = int(os.environ.get("FLAGS_shard_norm_align_dp", 1))
logger.info(f"g_shard_norm_align_dp {g_shard_norm_align_dp}")
g_shard_use_reduce = int(os.environ.get("FLAGS_shard_use_reduce", 1))
g_shard_norm_align_dp = int(os.environ.get("FLAGS_shard_norm_align_dp", 0))
if g_shard_norm_align_dp:
assert (
......
......@@ -39,8 +39,7 @@ from ...utils.mix_precision_utils import MixPrecisionOptimizer
__all__ = []
g_shard_norm_align_dp = int(os.environ.get("FLAGS_shard_norm_align_dp", 1))
logger.info(f"g_shard_norm_align_dp {g_shard_norm_align_dp}")
g_shard_norm_align_dp = int(os.environ.get("FLAGS_shard_norm_align_dp", 0))
class HybridParallelClipGrad:
......
......@@ -44,8 +44,7 @@ from paddle.distributed.fleet.utils.tensor_fusion_helper import (
__all__ = []
g_shard_use_reduce = int(os.environ.get("FLAGS_shard_use_reduce", 0))
logger.info(f"g_shard_use_reduce {g_shard_use_reduce}")
g_shard_use_reduce = int(os.environ.get("FLAGS_shard_use_reduce", 1))
# assume only the first stage and last stage need data, and data consumption is ordred
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册