From 4465ba27ee718ee71bbe7d9368886b2a15e59274 Mon Sep 17 00:00:00 2001 From: Haohongxiang <86215757+haohongxiang@users.noreply.github.com> Date: Fri, 11 Nov 2022 10:44:56 +0800 Subject: [PATCH] rename fw_bw func name of interleave pp (#47571) (#47862) --- .../distributed/fleet/meta_parallel/pipeline_parallel.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py b/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py index b5c876dd3c..7757040733 100755 --- a/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py +++ b/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py @@ -537,7 +537,7 @@ class PipelineParallelWithInterleave(PipelineParallel): return input_tensor_grad - def interleave_pipeline( + def forward_backward_pipeline( self, data, scaler, forward_only=False, compute_loss=True ): # use interleave scheduling strategy. @@ -766,7 +766,7 @@ class PipelineParallelWithInterleave(PipelineParallel): def train_batch(self, data, optimizer, lr_scheduler=None, scaler=None): data = self._prepare_training(data, optimizer, lr_scheduler) # interleave scheduler for pipeline parallel - train_loss = self.interleave_pipeline(data, scaler) + train_loss = self.forward_backward_pipeline(data, scaler) # optimizer with paddle.amp.auto_cast(enable=False): @@ -781,4 +781,4 @@ class PipelineParallelWithInterleave(PipelineParallel): self._layers.eval() self._compute_loss = compute_loss - return self.interleave_pipeline(data, None, forward_only=True) + return self.forward_backward_pipeline(data, None, forward_only=True) -- GitLab