From c4f8afa2582031c578f852439bd2b5d6864668c1 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 8 Aug 2018 13:48:02 +0000 Subject: [PATCH] Fix bug when memopt optimize loss.grad and use ParallelExecutor --- .../details/multi_devices_graph_builder.cc | 14 ++++++++------ .../details/multi_devices_graph_builder.h | 4 +++- paddle/fluid/framework/operator.cc | 6 ++++-- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index 5ca2ed8f96..a4fdbcb26d 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -275,7 +275,8 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( if (strategy_.gradient_scale_ != BuildStrategy::GradientScaleStrategy::kCustomized) { // TODO(paddle-dev): Why is there no input for this op_handle? - CreateScaleLossGradOp(&result); + auto loss_grad_name = node->Op()->OutputArgumentNames()[0]; + CreateScaleLossGradOp(&result, loss_grad_name); } // This assumes the backward generating code will ensure IsScaleLossOp // is true only for the op that scale the final scalar loss. @@ -535,7 +536,8 @@ int MultiDevSSAGraphBuilder::GetVarDeviceID(const ir::Graph &graph, return got == sharded_var_device.end() ? -1 : got->second; } -void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(ir::Graph *result) const { +void MultiDevSSAGraphBuilder::CreateScaleLossGradOp( + ir::Graph *result, const std::string &loss_grad_name) const { for (size_t i = 0; i < places_.size(); ++i) { // Insert ScaleCost OpHandle #ifdef PADDLE_WITH_CUDA @@ -558,10 +560,10 @@ void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(ir::Graph *result) const { // loss->pending_ops_.emplace_back(op_handle); // op_handle->inputs_.emplace_back(loss); - CreateOpOutput(result, op_handle, - result->CreateEmptyNode(GradVarName(loss_var_name_), - ir::Node::Type::kVariable), - places_[i], i); + CreateOpOutput( + result, op_handle, + result->CreateEmptyNode(loss_grad_name, ir::Node::Type::kVariable), + places_[i], i); } } diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.h b/paddle/fluid/framework/details/multi_devices_graph_builder.h index 099dbe5abe..f2cb6bb1c8 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.h +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.h @@ -75,7 +75,9 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { void CreateComputationalOps(ir::Graph *result, ir::Node *node, size_t num_places) const; - void CreateScaleLossGradOp(ir::Graph *result) const; + void CreateScaleLossGradOp(ir::Graph *result, + const std::string &loss_grad_name) const; + VarHandle *CreateReduceOp(ir::Graph *result, const std::string &og, int dst_dev_id) const; void CreateComputationalOp(ir::Graph *result, ir::Node *node, diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 0c8acf71bf..16c7f819f3 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -778,6 +778,7 @@ proto::VarType::Type OperatorWithKernel::IndicateDataType( const ExecutionContext& ctx) const { auto& scope = ctx.scope(); int data_type = -1; + std::string last_input_name; for (auto& input : this->inputs_) { for (auto& ipt_name : input.second) { auto* var = scope.FindVar(ipt_name); @@ -794,9 +795,10 @@ proto::VarType::Type OperatorWithKernel::IndicateDataType( int tmp = static_cast(ToDataType(t->type())); PADDLE_ENFORCE( tmp == data_type || data_type == -1, - "DataType of Paddle Op %s must be the same. Get %d != %d", Type(), - data_type, tmp); + "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)", + Type(), last_input_name, data_type, ipt_name, tmp); data_type = tmp; + last_input_name = ipt_name; } } } -- GitLab