diff --git a/paddle/fluid/eager/backward.cc b/paddle/fluid/eager/backward.cc index ebd3333c5265990a8ae2fb6840113bd0ea4d4766..0e9dc19c2e310e75e32d52d011a65630ea5b967d 100644 --- a/paddle/fluid/eager/backward.cc +++ b/paddle/fluid/eager/backward.cc @@ -612,7 +612,9 @@ std::vector RunBackward( for (size_t i = 0; i < edges.size(); i++) { for (size_t j = 0; j < edges[i].size(); j++) { const Edge& edge = edges[i][j]; - + if (!edge.IsInitialized()) { + continue; + } auto edge_rank = edge.GetEdgeRankInfo(); // Since we make edge has as same rank as bwd outputs, we indexing them // with diff --git a/paddle/fluid/eager/grad_node_info.cc b/paddle/fluid/eager/grad_node_info.cc index 891ad4d8983b5b37b31ab5f5f980e74ccff47069..1d44d842b0825aa96380c947c67082fbcb5e1642 100644 --- a/paddle/fluid/eager/grad_node_info.cc +++ b/paddle/fluid/eager/grad_node_info.cc @@ -63,6 +63,8 @@ void GradNodeBase::AddEdges(std::vector* metas, size_t slot_id) { adj_edges_[slot_id].emplace_back(meta->GetMutableGradNode(), meta->OutRankInfo()); + } else { + adj_edges_[slot_id].emplace_back(); } } } @@ -85,6 +87,8 @@ void GradNodeBase::AddEdges(AutogradMeta* meta, size_t slot_id) { adj_edges_[slot_id].emplace_back(meta->GetMutableGradNode(), meta->OutRankInfo()); + } else { + adj_edges_[slot_id].emplace_back(); } } diff --git a/paddle/fluid/eager/grad_node_info.h b/paddle/fluid/eager/grad_node_info.h index 4b21a193ee021f06538e1a11bbffb898376739a7..28c12717a24b0c89b8a3b6544124ad6533d6c70d 100644 --- a/paddle/fluid/eager/grad_node_info.h +++ b/paddle/fluid/eager/grad_node_info.h @@ -257,12 +257,22 @@ class Edge { } // Currently we use grad_node_ to identify if a edge is initialized. - bool IsInitialized() const { return grad_node_.get(); } + bool IsInitialized() const { + if (!grad_node_) { + return false; + } else { + if (!(grad_node_.get())) { + return false; + } else { + return true; + } + } + } private: size_t in_slot_id_; size_t in_rank_; - std::shared_ptr grad_node_; + std::shared_ptr grad_node_{nullptr}; }; } // namespace egr