From d3b98f0d84a38b2bfd541a6967f61634e63a918d Mon Sep 17 00:00:00 2001 From: LielinJiang <50691816+LielinJiang@users.noreply.github.com> Date: Sat, 10 Oct 2020 10:57:34 +0800 Subject: [PATCH] Fix dynamic parallel train mode for hapi (#27787) * fix dynamic parallel for hapi * fix code style --- python/paddle/hapi/model.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 8505544a71f..459d6cd3284 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -638,19 +638,14 @@ class DynamicGraphAdapter(object): if self._nranks > 1: outputs = self.ddp_model.forward(* [to_variable(x) for x in inputs]) - losses = self.model._loss(*(to_list(outputs) + labels)) - losses = to_list(losses) - final_loss = fluid.layers.sum(losses) - final_loss = self.ddp_model.scale_loss(final_loss) - final_loss.backward() - self.ddp_model.apply_collective_grads() else: outputs = self.model.network.forward( * [to_variable(x) for x in inputs]) - losses = self.model._loss(*(to_list(outputs) + labels)) - losses = to_list(losses) - final_loss = fluid.layers.sum(losses) - final_loss.backward() + + losses = self.model._loss(*(to_list(outputs) + labels)) + losses = to_list(losses) + final_loss = fluid.layers.sum(losses) + final_loss.backward() self.model._optimizer.minimize(final_loss) self.model.network.clear_gradients() -- GitLab