diff --git a/paddle/fluid/framework/details/all_reduce_deps_pass.cc b/paddle/fluid/framework/details/all_reduce_deps_pass.cc index fe21e21bcfc42bfb3251a7d0d15aa5926f56813f..b7d6edd389d8e40835dadf56d7c54d53402f6f4d 100644 --- a/paddle/fluid/framework/details/all_reduce_deps_pass.cc +++ b/paddle/fluid/framework/details/all_reduce_deps_pass.cc @@ -82,13 +82,13 @@ std::unique_ptr AllReduceDepsPass::ApplyImpl( PADDLE_ENFORCE(i0 != nullptr && i1 != nullptr, "%s convert to %s error", op1->DebugString(), op2->DebugString()); - auto l_it = vars.find(i0->name_); - auto r_it = vars.find(i1->name_); + auto l_it = vars.find(i0->name()); + auto r_it = vars.find(i1->name()); if (l_it->second < r_it->second) return true; if (l_it->second == r_it->second) { - return i0->name_ < i1->name_; + return i0->name() < i1->name(); } return false; diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.cc b/paddle/fluid/framework/details/all_reduce_op_handle.cc index a24e3d3e487e488f0d0c59809a0adc9f9524cc6e..dd77f7099f581a5b825916c4ea010023f3ad5bcd 100644 --- a/paddle/fluid/framework/details/all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/all_reduce_op_handle.cc @@ -70,9 +70,9 @@ void AllReduceOpHandle::RunImpl() { auto *s = local_scopes_[i]; auto &local_scope = *s->FindVar(kLocalExecScopeName)->Get(); auto &lod_tensor = - local_scope.FindVar(in_var_handles[i]->name_)->Get(); + local_scope.FindVar(in_var_handles[i]->name())->Get(); lod_tensors.emplace_back(&lod_tensor); - PADDLE_ENFORCE_EQ(in_var_handles[i]->name_, out_var_handles[i]->name_, + PADDLE_ENFORCE_EQ(in_var_handles[i]->name(), out_var_handles[i]->name(), "The name of input and output should be equal."); } @@ -134,7 +134,7 @@ void AllReduceOpHandle::RunImpl() { auto &trg = *this->local_scopes_[0] ->FindVar(kLocalExecScopeName) ->Get() - ->FindVar(out_var_handles[0]->name_) + ->FindVar(out_var_handles[0]->name()) ->GetMutable(); // Reduce All Tensor to trg in CPU @@ -145,7 +145,7 @@ void AllReduceOpHandle::RunImpl() { auto &scope = *local_scopes_[i]->FindVar(kLocalExecScopeName)->Get(); auto &p = places_[i]; - auto *var = scope.FindVar(out_var_handles[i]->name_); + auto *var = scope.FindVar(out_var_handles[i]->name()); auto *dev_ctx = dev_ctxes_.at(p); RunAndRecordEvent(p, [&trg, var, dev_ctx, p] { diff --git a/paddle/fluid/framework/details/broadcast_op_handle.cc b/paddle/fluid/framework/details/broadcast_op_handle.cc index cf280c29ff8c7416be3b2d0b529bd04776150950..89d626edddfee3d2c43a3cf2232ad4fc1611e655 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle.cc @@ -56,11 +56,11 @@ void BroadcastOpHandle::BroadcastOneVar( const std::vector &out_var_handles, const std::vector &var_scopes) { auto *in_var = - var_scopes.at(in_var_handle.scope_idx_)->FindVar(in_var_handle.name_); + var_scopes.at(in_var_handle.scope_idx())->FindVar(in_var_handle.name()); PADDLE_ENFORCE_NOT_NULL(in_var); Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var); if (UNLIKELY(!in_tensor.IsInitialized())) { - VLOG(3) << "in var " << in_var_handle.name_ << "not inited, return!"; + VLOG(3) << "in var " << in_var_handle.name() << "not inited, return!"; return; } @@ -71,9 +71,9 @@ void BroadcastOpHandle::BroadcastOneVar( if (out_var_handle->IsTheSameVar(in_var_handle)) { continue; } - auto &out_p = out_var_handle->place_; - auto *out_var = var_scopes.at(out_var_handle->scope_idx_) - ->FindVar(out_var_handle->name_); + auto &out_p = out_var_handle->place(); + auto *out_var = var_scopes.at(out_var_handle->scope_idx()) + ->FindVar(out_var_handle->name()); RunAndRecordEvent(out_p, [in_tensor, out_var] { paddle::framework::TensorCopy( @@ -91,11 +91,11 @@ void BroadcastOpHandle::BroadcastOneVar( size_t numel = static_cast(in_tensor.numel()); for (auto out_var_handle : out_var_handles) { - Variable *out_var = var_scopes.at(out_var_handle->scope_idx_) - ->FindVar(out_var_handle->name_); + Variable *out_var = var_scopes.at(out_var_handle->scope_idx()) + ->FindVar(out_var_handle->name()); int dst_id = - boost::get(out_var_handle->place_).device; + boost::get(out_var_handle->place()).device; auto &nccl_ctx = nccl_ctxs_->at(dst_id); @@ -106,7 +106,7 @@ void BroadcastOpHandle::BroadcastOneVar( } else { send_recv_buffer = VariableVisitor::GetMutableTensor(out_var) .Resize(in_tensor.dims()) - .mutable_data(out_var_handle->place_); + .mutable_data(out_var_handle->place()); } broadcast_calls.emplace_back( @@ -126,11 +126,11 @@ void BroadcastOpHandle::BroadcastOneVar( } if (!out_handle->IsTheSameVar(in_var_handle)) { - auto out_var = var_scopes.at(in_var_handle.scope_idx_) - ->FindVar(out_var_handles[0]->name_); + auto out_var = var_scopes.at(in_var_handle.scope_idx()) + ->FindVar(out_var_handles[0]->name()); paddle::framework::TensorCopy( - in_tensor, in_var_handle.place_, - *(dev_ctxes_.at(in_var_handle.place_)), + in_tensor, in_var_handle.place(), + *(dev_ctxes_.at(in_var_handle.place())), &VariableVisitor::GetMutableTensor(out_var)); } }); @@ -148,7 +148,7 @@ void BroadcastOpHandle::InitOutputValue( var_scopes.emplace_back(s->FindVar(kLocalExecScopeName)->Get()); } auto *in_var = - var_scopes.at(in_var_handle.scope_idx_)->FindVar(in_var_handle.name_); + var_scopes.at(in_var_handle.scope_idx())->FindVar(in_var_handle.name()); Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var); @@ -158,9 +158,9 @@ void BroadcastOpHandle::InitOutputValue( if (out_var_handle->IsTheSameVar(in_var_handle)) { continue; } - auto t_out_p = out_var_handle->place_; - auto *out_var = var_scopes.at(out_var_handle->scope_idx_) - ->FindVar(out_var_handle->name_); + auto t_out_p = out_var_handle->place(); + auto *out_var = var_scopes.at(out_var_handle->scope_idx()) + ->FindVar(out_var_handle->name()); PADDLE_ENFORCE_NOT_NULL(out_var); if (is_gpu_place(in_tensor.place())) { PADDLE_ENFORCE(platform::is_gpu_place(t_out_p), diff --git a/paddle/fluid/framework/details/data_balance_op_handle.cc b/paddle/fluid/framework/details/data_balance_op_handle.cc index cc562c7b102cea80e18cbd2c054c34415a7442c9..48dcc52623369f7b0f51cd8c8aeb198b37467d5f 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.cc +++ b/paddle/fluid/framework/details/data_balance_op_handle.cc @@ -100,13 +100,13 @@ void DataBalanceOpHandle::RunImpl() { std::vector> lod_tensors(data_num); std::vector device_sizes; for (int i = 0; i < static_cast(in_var_handles.size()); ++i) { - PADDLE_ENFORCE_EQ(in_var_handles[i]->name_, out_var_handles[i]->name_, + PADDLE_ENFORCE_EQ(in_var_handles[i]->name(), out_var_handles[i]->name(), "The name of input and output should be equal."); int place_idx = i / data_num; int data_idx = i % data_num; auto *local_scope = local_scopes_[place_idx]->FindVar(kLocalExecScopeName)->Get(); - auto *tensor_var = local_scope->FindVar(in_var_handles[i]->name_); + auto *tensor_var = local_scope->FindVar(in_var_handles[i]->name()); PADDLE_ENFORCE(tensor_var->IsType()); auto *tensor = tensor_var->GetMutable(); lod_tensors[data_idx].push_back(tensor); diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index 648adae06facb504042d8286f6eab5d98e99c015..bbf81e1b8e49cae133858f7aa121701fb0f5456f 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -52,12 +52,12 @@ void FetchOpHandle::RunImpl() { for (size_t i = 0; i < inputs_.size(); ++i) { auto *var_handle = static_cast(inputs_[i]); - auto &scope = scopes.at(var_handle->scope_idx_); + auto &scope = scopes.at(var_handle->scope_idx()); auto *var = scope->FindVar(kLocalExecScopeName) ->Get() - ->FindVar(var_handle->name_); + ->FindVar(var_handle->name()); PADDLE_ENFORCE_NOT_NULL(var, "Cannot find variable %s in execution scope", - var_handle->name_); + var_handle->name()); auto &t = var->Get(); if (platform::is_gpu_place(t.place())) { diff --git a/paddle/fluid/framework/details/fuse_vars_op_handle.cc b/paddle/fluid/framework/details/fuse_vars_op_handle.cc index 018c9bff71e553d8a3641f06f10b350453676b24..d65b0920698748e8a2ded728d78fbcd69b7bae0e 100644 --- a/paddle/fluid/framework/details/fuse_vars_op_handle.cc +++ b/paddle/fluid/framework/details/fuse_vars_op_handle.cc @@ -29,14 +29,14 @@ void FuseVarsOpHandle::RunImpl() { auto scope = local_scope_->FindVar(kLocalExecScopeName)->Get(); auto out_var_handle = out_var_handles[0]; - auto out_var = scope->Var(out_var_handle->name_); + auto out_var = scope->Var(out_var_handle->name()); auto out_tensor = out_var->GetMutable(); out_tensor->Resize({total_numel_}).mutable_data(this->place_, type_); int64_t s = 0; for (size_t i = 1; i < out_var_handles.size(); ++i) { - auto out_name = out_var_handles[i]->name_; + auto out_name = out_var_handles[i]->name(); auto out_t = scope->Var(out_name)->GetMutable(); auto numel = this->inputs_numel_.at(out_name); out_t->ShareDataWith(out_tensor->Slice(s, s + numel)); diff --git a/paddle/fluid/framework/details/gather_op_handle.cc b/paddle/fluid/framework/details/gather_op_handle.cc index ca4633c5a8f22fc9f7319b06aa766f9fe37dc68c..179cca44cb1871bb9667074f6c6b32edee42be09 100644 --- a/paddle/fluid/framework/details/gather_op_handle.cc +++ b/paddle/fluid/framework/details/gather_op_handle.cc @@ -49,7 +49,7 @@ void GatherOpHandle::RunImpl() { auto in_0_handle = in_var_handles[0]; auto pre_in_var = - var_scopes.at(in_0_handle->scope_idx_)->FindVar(in_0_handle->name_); + var_scopes.at(in_0_handle->scope_idx())->FindVar(in_0_handle->name()); PADDLE_ENFORCE_NOT_NULL(pre_in_var); PADDLE_ENFORCE(pre_in_var->IsType(), @@ -65,7 +65,7 @@ void GatherOpHandle::RunImpl() { // Gather the inputs for (auto *in_handle : in_var_handles) { auto *in_var = - var_scopes.at(in_handle->scope_idx_)->FindVar(in_handle->name_); + var_scopes.at(in_handle->scope_idx())->FindVar(in_handle->name()); PADDLE_ENFORCE_NOT_NULL(in_var); VariableVisitor::EnforceShapeAndDTypeEQ(*in_var, *pre_in_var); @@ -77,7 +77,7 @@ void GatherOpHandle::RunImpl() { } // NOTE: The Places of all input tensor must be all on CPU or all on GPU. - platform::Place t_out_p = out_var_handle->place_; + platform::Place t_out_p = out_var_handle->place(); if (platform::is_gpu_place(pre_in_value.place())) { PADDLE_ENFORCE(platform::is_gpu_place(t_out_p), "Places of input and output must be all on GPU."); @@ -85,8 +85,8 @@ void GatherOpHandle::RunImpl() { t_out_p = platform::CPUPlace(); } - auto out_var = - var_scopes.at(out_var_handle->scope_idx_)->FindVar(out_var_handle->name_); + auto out_var = var_scopes.at(out_var_handle->scope_idx()) + ->FindVar(out_var_handle->name()); PADDLE_ENFORCE_NOT_NULL(out_var); auto out_value = out_var->GetMutable(); out_value->set_height(pre_in_value.height()); @@ -99,9 +99,9 @@ void GatherOpHandle::RunImpl() { Tensor *out_tensor = out_value->mutable_value(); // copy - auto dev_ctx = dev_ctxes_.at(out_var_handle->place_); - RunAndRecordEvent(out_var_handle->place_, [in_tensors, out_tensor, &dev_ctx, - t_out_p] { + auto dev_ctx = dev_ctxes_.at(out_var_handle->place()); + RunAndRecordEvent(out_var_handle->place(), [in_tensors, out_tensor, &dev_ctx, + t_out_p] { int s = 0, e = 0; for (size_t j = 0; j < in_tensors.size(); ++j) { e += in_tensors[j].dims()[0]; diff --git a/paddle/fluid/framework/details/memory_early_delete_pass.cc b/paddle/fluid/framework/details/memory_early_delete_pass.cc index 06a2451c136e3243ba41661fa691f9a6ef8b52ac..5906b7d57ce122520a4594f1528e00982eaa1a7f 100644 --- a/paddle/fluid/framework/details/memory_early_delete_pass.cc +++ b/paddle/fluid/framework/details/memory_early_delete_pass.cc @@ -33,7 +33,7 @@ static ComputationOpHandle* FindNextComputationOpHandle(VarHandle* var_in) { queue.pop(); for (auto* op : var->PendingOps()) { auto* compute_op = dynamic_cast(op); - if (compute_op != nullptr && compute_op->GetPlace() == var_in->place_) { + if (compute_op != nullptr && compute_op->GetPlace() == var_in->place()) { return compute_op; } for (auto* out_var : op->Outputs()) { @@ -64,7 +64,7 @@ std::unique_ptr MemoryEarlyDeletePass::ApplyImpl( for (auto& var : vars) { auto* var_handle = dynamic_cast(var); auto var_name = var->Node()->Name(); - auto& var_place = var_handle->place_; + auto& var_place = var_handle->place(); if (unlived_vars.count(var_name) == 0) continue; if (!unlived_vars[var_name].empty()) { if (compute_op != nullptr && diff --git a/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc index c203073845375c879a0fc10564f5dad0f19ceae4..e82eb104fa9f461ec370fc4b31551dd1a9214a7c 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc @@ -52,11 +52,11 @@ void GraphvizSSAGraphPrinter::Print(const ir::Graph &graph, vars[var_ptr] = cur_var_id; if (var_handle_ptr) { - sout << "var_" << cur_var_id << " [label=\"" << var_handle_ptr->name_ + sout << "var_" << cur_var_id << " [label=\"" << var_handle_ptr->name() << "\\n" - << var_handle_ptr->place_ << "\\n" - << "scope: " << var_handle_ptr->scope_idx_ << "\\n" - << "v" << var_handle_ptr->version_ << "\"]" << std::endl; + << var_handle_ptr->place() << "\\n" + << "scope: " << var_handle_ptr->scope_idx() << "\\n" + << "v" << var_handle_ptr->version() << "\"]" << std::endl; } else if (dummy_ptr) { sout << "var_" << cur_var_id << " [label=\"dummy\"]" << std::endl; } diff --git a/paddle/fluid/framework/details/reduce_op_handle.cc b/paddle/fluid/framework/details/reduce_op_handle.cc index 7a5f7de57ef20b4b909894ff8d742a65ea05874d..ee4c8a6ecf77e5d0f23f38b763917d926afdb07a 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.cc +++ b/paddle/fluid/framework/details/reduce_op_handle.cc @@ -60,8 +60,8 @@ void ReduceOpHandle::GatherSelectedRows( *CollectiveContext::GetInstance(); // 1. gather local selected rows, merge them - std::string gathered_var_name = out_var_handle->name_ + "_gathered_tmp"; - auto scope = local_scopes_.at(out_var_handle->scope_idx_); + std::string gathered_var_name = out_var_handle->name() + "_gathered_tmp"; + auto scope = local_scopes_.at(out_var_handle->scope_idx()); auto gathered_var_mid = scope->Var(gathered_var_name); auto gathered_select_rows = gathered_var_mid->GetMutable(); @@ -73,7 +73,7 @@ void ReduceOpHandle::GatherSelectedRows( // merge them auto merged_dev_ctx = dynamic_cast(dev_ctxes.at(out_place)); std::string merged_var_name = - GetRemoteVarName(out_var_handle->name_, collective_context.trainer_id_); + GetRemoteVarName(out_var_handle->name(), collective_context.trainer_id_); auto merged_select_rows = scope->Var(merged_var_name)->GetMutable(); operators::math::scatter::MergeAdd merge_func; @@ -101,7 +101,7 @@ void ReduceOpHandle::GatherSelectedRows( operators::distributed::RemoteVar var; var.trainer_id_ = i; - var.var_name_ = GetRemoteVarName(out_var_handle->name_, i); + var.var_name_ = GetRemoteVarName(out_var_handle->name(), i); var.ep_ = collective_context.endpoints_[i]; vars.push_back(var); @@ -166,7 +166,7 @@ void ReduceOpHandle::RunImpl() { } auto pre_in_var = - var_scopes.at(in_0_handle->scope_idx_)->FindVar(in_0_handle->name_); + var_scopes.at(in_0_handle->scope_idx())->FindVar(in_0_handle->name()); PADDLE_ENFORCE_NOT_NULL(pre_in_var); // Wait input done, this Wait is asynchronous operation @@ -175,15 +175,15 @@ void ReduceOpHandle::RunImpl() { // NOTE: The Places of all input tensor must be all on CPU or all on GPU. std::vector in_places; // used to get dev_ctx for (auto *in_handle : in_var_handles) { - in_places.emplace_back(in_handle->place_); + in_places.emplace_back(in_handle->place()); auto in_var = - var_scopes.at(in_handle->scope_idx_)->FindVar(in_handle->name_); + var_scopes.at(in_handle->scope_idx())->FindVar(in_handle->name()); PADDLE_ENFORCE_NOT_NULL(in_var); VariableVisitor::EnforceShapeAndDTypeEQ(*pre_in_var, *in_var); } - auto out_var = - var_scopes.at(out_var_handle->scope_idx_)->FindVar(out_var_handle->name_); + auto out_var = var_scopes.at(out_var_handle->scope_idx()) + ->FindVar(out_var_handle->name()); PADDLE_ENFORCE_NOT_NULL(out_var); // NOTE: The tensors' Place of input and output must be all on GPU or all on @@ -191,9 +191,9 @@ void ReduceOpHandle::RunImpl() { auto in_p = VariableVisitor::GetMutableTensor(pre_in_var).place(); platform::Place t_out_p; if (platform::is_gpu_place(in_p)) { - PADDLE_ENFORCE(platform::is_gpu_place(out_var_handle->place_), + PADDLE_ENFORCE(platform::is_gpu_place(out_var_handle->place()), "Places of input and output must be all on GPU."); - t_out_p = out_var_handle->place_; + t_out_p = out_var_handle->place(); } else { t_out_p = platform::CPUPlace(); } @@ -253,7 +253,7 @@ void ReduceOpHandle::RunImpl() { auto &reduce_sum_trg = *this->local_scopes_[0] ->FindVar(kLocalExecScopeName) ->Get() - ->FindVar(out_var_handle->name_) + ->FindVar(out_var_handle->name()) ->GetMutable(); ReduceLoDTensor func(lod_tensors, &reduce_sum_trg); VisitDataType(lod_tensors[0]->type(), func); @@ -269,9 +269,9 @@ void ReduceOpHandle::RunImpl() { auto pre_in = pre_in_var->Get(); VariableVisitor::ShareDimsAndLoD(*pre_in_var, out_var); VariableVisitor::GetMutableTensor(out_var).mutable_data( - out_var_handle->place_, pre_in.type()); + out_var_handle->place(), pre_in.type()); - auto out_p = out_var_handle->place_; + auto out_p = out_var_handle->place(); int root_id = boost::get(out_p).device; std::vector> all_reduce_calls; for (size_t i = 0; i < var_scopes.size(); ++i) { @@ -286,7 +286,7 @@ void ReduceOpHandle::RunImpl() { if (root_id == dev_id) { recvbuffer = out_var->GetMutable()->mutable_data( - out_var_handle->place_); + out_var_handle->place()); } int type = platform::ToNCCLDataType(lod_tensor.type()); @@ -320,8 +320,8 @@ std::vector ReduceOpHandle::GetInputValues( const std::vector &var_scopes) const { std::vector in_selected_rows; for (auto *in_handle : in_var_handles) { - auto &in_sr = var_scopes.at(in_handle->scope_idx_) - ->FindVar(in_handle->name_) + auto &in_sr = var_scopes.at(in_handle->scope_idx()) + ->FindVar(in_handle->name()) ->Get(); in_selected_rows.emplace_back(&in_sr); } diff --git a/paddle/fluid/framework/details/rpc_op_handle.cc b/paddle/fluid/framework/details/rpc_op_handle.cc index dfa6c1ade1a024bb9087144d0e96fa5b0417f06a..3e082f247adf7fe22db2b62802f0a87c9c93447a 100644 --- a/paddle/fluid/framework/details/rpc_op_handle.cc +++ b/paddle/fluid/framework/details/rpc_op_handle.cc @@ -30,7 +30,7 @@ RPCOpHandle::RPCOpHandle(ir::Node *node, const framework::OpDesc &op_desc, void RPCOpHandle::RunImpl() { for (auto *in : inputs_) { - auto &p = static_cast(in)->place_; + auto &p = static_cast(in)->place(); if (ir::IsControlDepVar(*in->Node())) { continue; } diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc index e1b8e8fe05f0615d689e78d9c405cc5d76d2abb1..6924549f36d6365534ab288257899a78107675cc 100644 --- a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc @@ -68,7 +68,7 @@ struct ScaleLossGradFunctor { void ScaleLossGradOpHandle::RunImpl() { // Doesn't wait any event - std::string var_name = static_cast(this->outputs_[0])->name_; + std::string var_name = static_cast(this->outputs_[0])->name(); auto &local_scope = *scope_->FindVar(kLocalExecScopeName)->Get(); auto *tensor = local_scope.FindVar(var_name)->GetMutable(); diff --git a/paddle/fluid/framework/details/var_handle.h b/paddle/fluid/framework/details/var_handle.h index 3b007d7b1a52df765a2dbd41939f8f865123cb43..8321c32f8b1d73bf5e6080b4b314abc9fd20536d 100644 --- a/paddle/fluid/framework/details/var_handle.h +++ b/paddle/fluid/framework/details/var_handle.h @@ -111,15 +111,22 @@ struct VarHandle : public VarHandleBase { // version field currently is not used, however, just store the version to // debug easily. + private: size_t version_; size_t scope_idx_; std::string name_; platform::Place place_; + public: bool IsTheSameVar(const VarHandle& o) const { return o.generated_op_ == generated_op_ && o.name_ == name_ && o.scope_idx_ == scope_idx_; } + + size_t version() const { return version_; } + size_t scope_idx() const { return scope_idx_; } + const std::string& name() const { return name_; } + const platform::Place& place() const { return place_; } }; // Dummy Variable. It is used to represent dependencies between operators