From ae3a9c48ed8219efc44848fde55f5892caf82423 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Sun, 30 Jul 2017 15:43:54 -0700 Subject: [PATCH] Add throw_on_error specialization for T*; Update all usages of Scope --- doc/design/scope.md | 18 ++--- paddle/operators/recurrent_network_op.cc | 76 +++++++++---------- paddle/operators/recurrent_network_op.h | 4 +- paddle/operators/recurrent_network_op_test.cc | 70 ++++++++--------- paddle/platform/enforce.h | 5 ++ paddle/pybind/pybind.cc | 10 +-- 6 files changed, 85 insertions(+), 98 deletions(-) diff --git a/doc/design/scope.md b/doc/design/scope.md index afe6bc028ca..c9e0be716b6 100644 --- a/doc/design/scope.md +++ b/doc/design/scope.md @@ -37,8 +37,8 @@ Scope is an association of a name to variable. All variables belong to `Scope`. ```cpp class Scope { public: - Variable* CreateVariable(const std::string& name); - const Variable* GetVariable(const std::string& name) const; + Variable* NewVar(const std::string& name); + const Variable* FindVar(const std::string& name) const; private: std::unordered_map> vars_; @@ -58,12 +58,12 @@ class Scope { public: Scope(const std::shared_ptr& scope): parent_(scope) {} - Variable* GetVariable(const std::string& name) const { + Variable* FindVar(const std::string& name) const { auto it = vars_.find(name); if (it != vars_.end()) { return it->second.get(); } else if (parent_ != nullptr) { - return parent_->GetVariable(name); + return parent_->FindVar(name); } else { return nullptr; } @@ -95,10 +95,10 @@ class Scope { static std::shared_ptr Create(const std::shared_ptr& parent = nullptr); // return nullptr if not found. - Variable* GetVariable(const std::string& name) const; + Variable* FindVar(const std::string& name) const; // return if already contains same name variable. - Variable* CreateVariable(const std::string& name); + Variable* NewVar(const std::string& name); private: std::shared_ptr parent_; @@ -107,11 +107,11 @@ class Scope { ``` ## Only scope can create a variable -To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `CreateVariable` can construct `Variable`. +To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `NewVar` can construct `Variable`. ## When scope destroyed, all variables inside this scope should be destroyed together -The scope hold unique pointers for all variables. User can `GetVariable` from scope, but he should not hold this pointer as a member variable. Because when scope is destroyed, all variables inside this scope will be destroyed together. +The scope hold unique pointers for all variables. User can `FindVar` from scope, but he should not hold this pointer as a member variable. Because when scope is destroyed, all variables inside this scope will be destroyed together. ## Sharing a parent scope @@ -121,4 +121,4 @@ Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shar ## Orthogonal interface -`GetVariable` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `CreateVariable` will return a `Error` when there is a name conflict locally. Combine `GetVariable` and `CreateVariable`, we can implement `CreateOrGetVariable` easily. +`FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `NewVar` will return a `Error` when there is a name conflict locally. Combine `FindVar` and `NewVar`, we can implement `NewVar` easily. diff --git a/paddle/operators/recurrent_network_op.cc b/paddle/operators/recurrent_network_op.cc index 1a101d6ddf1..71eef6a316a 100644 --- a/paddle/operators/recurrent_network_op.cc +++ b/paddle/operators/recurrent_network_op.cc @@ -33,15 +33,14 @@ void SegmentInputs(std::vector>& step_scopes, PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided."); for (size_t i = 0; i < inlinks.size(); ++i) { Tensor* input = - step_scopes[0]->GetVariable(inlinks[i].external)->GetMutable(); + step_scopes[0]->FindVar(inlinks[i].external)->GetMutable(); DDim dims = input->dims(); PADDLE_ENFORCE(static_cast(dims[0]) == seq_len, "all the inlinks must have same length"); DDim step_dims = slice_ddim(dims, 1, dims.size()); for (size_t j = 0; j < seq_len; j++) { - Tensor* step_input = step_scopes[j] - ->CreateVariable(inlinks[i].internal) - ->GetMutable(); + Tensor* step_input = + step_scopes[j]->NewVar(inlinks[i].internal)->GetMutable(); *step_input = input->Slice(j, j + 1); step_input->Resize(step_dims); } @@ -53,12 +52,12 @@ void ConcatOutputs(std::vector>& step_scopes, const size_t seq_len) { for (size_t i = 0; i < outlinks.size(); i++) { Tensor* output = - step_scopes[0]->GetVariable(outlinks[i].external)->GetMutable(); + step_scopes[0]->FindVar(outlinks[i].external)->GetMutable(); // TODO(qingiqng) remove following code after adding // InferShape in RecurrentGradientOp DDim step_dims = step_scopes[0] - ->GetVariable(outlinks[i].internal) + ->FindVar(outlinks[i].internal) ->GetMutable() ->dims(); std::vector dims_vec = vectorize(step_dims); @@ -66,9 +65,8 @@ void ConcatOutputs(std::vector>& step_scopes, output->mutable_data(make_ddim(dims_vec), platform::CPUPlace()); for (size_t j = 0; j < seq_len; j++) { - Tensor* step_output = step_scopes[j] - ->GetVariable(outlinks[i].internal) - ->GetMutable(); + Tensor* step_output = + step_scopes[j]->FindVar(outlinks[i].internal)->GetMutable(); // TODO(luotao02) data type and platform::DeviceContext() should set // correctly (output->Slice(j, j + 1)) @@ -97,14 +95,14 @@ void LinkMemories(std::vector>& scopes, std::shared_ptr scope = scopes[step_id]; std::shared_ptr linked_scope = scopes[step_id + offset]; for (auto& attr : memories) { - auto mem = scope->CreateVariable(attr.pre_var)->GetMutable(); + auto mem = scope->NewVar(attr.pre_var)->GetMutable(); // maybe share variable is better? - auto linked_mem = linked_scope->GetVariable(attr.var)->GetMutable(); + auto linked_mem = linked_scope->FindVar(attr.var)->GetMutable(); mem->ShareDataWith(*linked_mem); // TODO(qingqing) remove following code // the memory of current step should be allocated in step net - auto m = scope->CreateVariable(attr.var)->GetMutable(); + auto m = scope->NewVar(attr.var)->GetMutable(); // for unit test, as addOp and mulOp are null currently, if not // mutable_data, mem.data() in output will be error. We will // remove this line after merge the correct addOp and mulOp. @@ -172,7 +170,7 @@ void InitArgument(const ArgumentName& name, } // namespace rnn void RecurrentAlgorithm::InferShape(const std::shared_ptr& scope) const { - seq_len_ = scope->GetVariable((arg_->inlinks[0]).external) + seq_len_ = scope->FindVar((arg_->inlinks[0]).external) ->GetMutable() ->dims()[0]; CreateScopes(scope); @@ -187,10 +185,10 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr& scope) const { InitMemories(step_scopes[0]); - PADDLE_ENFORCE(scope->HasVariable(arg_->step_net), + PADDLE_ENFORCE(scope->FindVar(arg_->step_net), "stepnet [%s] is not in scope.", arg_->step_net); - Variable* net = scope->GetVariable(arg_->step_net); + Variable* net = scope->FindVar(arg_->step_net); PADDLE_ENFORCE(net != nullptr, "failed to get step net"); // If the InferShape is called in OperatorBase's run function, // the rnn op only needs to do InferShape for the first time step @@ -204,14 +202,14 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr& scope) const { auto outlinks = arg_->outlinks; for (size_t i = 0; i < outlinks.size(); i++) { DDim step_dims = step_scopes[0] - ->GetVariable(outlinks[i].internal) + ->FindVar(outlinks[i].internal) ->GetMutable() ->dims(); std::vector dims_vec = vectorize(step_dims); // now only support fixed length dims_vec.insert(dims_vec.begin(), seq_len_); Tensor* output = - step_scopes[0]->GetVariable(outlinks[i].external)->GetMutable(); + step_scopes[0]->FindVar(outlinks[i].external)->GetMutable(); output->Resize(make_ddim(dims_vec)); } } @@ -220,7 +218,7 @@ void RecurrentAlgorithm::Run(const std::shared_ptr& scope, const platform::DeviceContext& dev_ctx) const { auto step_scopes = GetStepScopes(scope); - Variable* net = scope->GetVariable(arg_->step_net); + Variable* net = scope->FindVar(arg_->step_net); for (size_t step_id = 0; step_id < seq_len_; step_id++) { // the link memory is done in InferShape // maybe remove following code after testing @@ -236,7 +234,7 @@ void RecurrentAlgorithm::Run(const std::shared_ptr& scope, void RecurrentAlgorithm::CreateScopes(std::shared_ptr scope) const { // TODO(xxx) Only two scopes are needed for inference, this case will be // supported later. - auto step_scopes = scope->GetVariable(arg_->step_scopes) + auto step_scopes = scope->FindVar(arg_->step_scopes) ->GetMutable>>(); if (seq_len_ > step_scopes->size()) { @@ -244,12 +242,12 @@ void RecurrentAlgorithm::CreateScopes(std::shared_ptr scope) const { std::shared_ptr step_scope = std::make_shared(scope); // Now all variables in scope must be created outside of op. - auto net_op = scope->GetVariable(arg_->step_net)->GetMutable(); + auto net_op = scope->FindVar(arg_->step_net)->GetMutable(); for (auto& input : net_op->inputs_) { - step_scope->CreateVariable(input); + step_scope->NewVar(input); } for (auto& output : net_op->outputs_) { - step_scope->CreateVariable(output); + step_scope->NewVar(output); } step_scopes->push_back(std::make_shared(step_scope)); @@ -259,21 +257,18 @@ void RecurrentAlgorithm::CreateScopes(std::shared_ptr scope) const { void RecurrentAlgorithm::InitMemories(std::shared_ptr step_scope) const { for (auto& attr : arg_->memories) { - Tensor* pre_mem = - step_scope->CreateVariable(attr.pre_var)->GetMutable(); - PADDLE_ENFORCE(step_scope->HasVariable(attr.boot_var), + Tensor* pre_mem = step_scope->NewVar(attr.pre_var)->GetMutable(); + PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var), "memory [%s]'s boot variable [%s] not exists", attr.var, attr.boot_var); - Tensor* boot_mem = - step_scope->GetVariable(attr.boot_var)->GetMutable(); + Tensor* boot_mem = step_scope->FindVar(attr.boot_var)->GetMutable(); pre_mem->ShareDataWith(*boot_mem); // TODO(qingqing) remove following code // the memory of current step should be allocated in step net // here for unit test - auto cur_step_mem = - step_scope->CreateVariable(attr.var)->GetMutable(); + auto cur_step_mem = step_scope->NewVar(attr.var)->GetMutable(); cur_step_mem->mutable_data(boot_mem->dims(), platform::CPUPlace()); } } @@ -337,9 +332,8 @@ void RecurrentGradientAlgorithm::Run( const platform::DeviceContext& dev_ctx) const { auto step_scopes = GetStepScopes(scope); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); - PADDLE_ENFORCE(scope->HasVariable(arg_->step_net), - "step net is not in scope."); - Variable* net = scope->GetVariable(arg_->step_net); + PADDLE_ENFORCE(scope->FindVar(arg_->step_net), "step net is not in scope."); + Variable* net = scope->FindVar(arg_->step_net); PADDLE_ENFORCE(net != nullptr, "failed to get step net"); for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { if (static_cast(step_id) != seq_len_ - 1) { @@ -354,31 +348,29 @@ void RecurrentGradientAlgorithm::Run( void RecurrentGradientAlgorithm::LinkBootMemoryGradients( std::shared_ptr step_scope) const { for (auto& attr : arg_->memories) { - Tensor* mem_grad = - step_scope->CreateVariable(attr.var)->GetMutable(); + Tensor* mem_grad = step_scope->NewVar(attr.var)->GetMutable(); PADDLE_ENFORCE(mem_grad != nullptr, "boot_tensor should be retrieved before"); - PADDLE_ENFORCE(step_scope->HasVariable(attr.boot_var), + PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var), "memory [%s]'s boot variable [%s] not exists", attr.var, attr.boot_var); Tensor* boot_mem_grad = - step_scope->CreateVariable(attr.boot_var)->GetMutable(); + step_scope->NewVar(attr.boot_var)->GetMutable(); boot_mem_grad->ShareDataWith(*mem_grad); } } void RecurrentGradientAlgorithm::InferShape( const std::shared_ptr& scope) const { - seq_len_ = scope->GetVariable((arg_->inlinks[0]).external) + seq_len_ = scope->FindVar((arg_->inlinks[0]).external) ->GetMutable() ->dims()[0]; auto step_scopes = GetStepScopes(scope); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); - PADDLE_ENFORCE(scope->HasVariable(arg_->step_net), - "step net is not in scope."); - Variable* net = scope->GetVariable(arg_->step_net); + PADDLE_ENFORCE(scope->FindVar(arg_->step_net), "step net is not in scope."); + Variable* net = scope->FindVar(arg_->step_net); PADDLE_ENFORCE(net != nullptr, "failed to get step net"); for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { @@ -391,14 +383,14 @@ void RecurrentGradientAlgorithm::InferShape( auto outlinks = arg_->outlinks; for (size_t i = 0; i < outlinks.size(); i++) { DDim step_dims = step_scopes[0] - ->GetVariable(outlinks[i].internal) + ->FindVar(outlinks[i].internal) ->GetMutable() ->dims(); std::vector dims_vec = vectorize(step_dims); // now only support fixed length dims_vec.insert(dims_vec.begin(), seq_len_); Tensor* output = - step_scopes[0]->GetVariable(outlinks[i].external)->GetMutable(); + step_scopes[0]->FindVar(outlinks[i].external)->GetMutable(); output->Resize(make_ddim(dims_vec)); } LinkBootMemoryGradients(step_scopes[0]); diff --git a/paddle/operators/recurrent_network_op.h b/paddle/operators/recurrent_network_op.h index 8946c8ce381..eabcc52f6b8 100644 --- a/paddle/operators/recurrent_network_op.h +++ b/paddle/operators/recurrent_network_op.h @@ -121,7 +121,7 @@ protected: inline const std::vector>& GetStepScopes( std::shared_ptr scope) const { - return *(scope->GetVariable(arg_->step_scopes)) + return *(scope->FindVar(arg_->step_scopes)) ->GetMutable>>(); } @@ -159,7 +159,7 @@ public: protected: inline const std::vector>& GetStepScopes( std::shared_ptr scope) const { - return *(scope->GetVariable(arg_->step_scopes)) + return *(scope->FindVar(arg_->step_scopes)) ->GetMutable>>(); } diff --git a/paddle/operators/recurrent_network_op_test.cc b/paddle/operators/recurrent_network_op_test.cc index 6784ac6001a..b22cb40f286 100644 --- a/paddle/operators/recurrent_network_op_test.cc +++ b/paddle/operators/recurrent_network_op_test.cc @@ -38,37 +38,37 @@ protected: // create input, and init content LOG(INFO) << "create global variable x"; for (auto inlink : std::vector{"x", "x0", "x1", "h"}) { - Variable* x = scope_->CreateVariable(inlink); + Variable* x = scope_->NewVar(inlink); DDim dims = make_ddim(std::vector{ 10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); x->GetMutable()->mutable_data(dims, platform::CPUPlace()); } // create output alias just for test for (auto inlink : std::vector{"h@alias"}) { - Variable* x = scope_->CreateVariable(inlink); + Variable* x = scope_->NewVar(inlink); DDim dims = make_ddim(std::vector{20 /*batch size*/, 30 /*input dim*/}); x->GetMutable()->mutable_data(dims, platform::CPUPlace()); } LOG(INFO) << "create global variable w"; - Variable* w = scope_->CreateVariable("rnn/w"); + Variable* w = scope_->NewVar("rnn/w"); w->GetMutable()->mutable_data( make_ddim(std::vector{30, 30}), platform::CPUPlace()); for (auto boot : std::vector{"x_boot", "h_boot"}) { LOG(INFO) << "create global variable " << boot; - Variable* h_boot = scope_->CreateVariable(boot); + Variable* h_boot = scope_->NewVar(boot); h_boot->GetMutable()->mutable_data( make_ddim(std::vector{20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace()); } LOG(INFO) << "create variable step_scopes"; - scope_->CreateVariable("step_scopes"); + scope_->NewVar("step_scopes"); LOG(INFO) << "create variable h"; - scope_->CreateVariable("h"); + scope_->NewVar("h"); } void CreateRNNOp() { @@ -150,7 +150,7 @@ protected: void CreateStepNet() { LOG(INFO) << "create variable step_net"; - Variable* var = scope_->CreateVariable("step_net"); + Variable* var = scope_->NewVar("step_net"); auto net = var->GetMutable(); // rnn/s is net's input or output? net->inputs_ = {"rnn/h@pre", "rnn/w", "rnn/x"}; @@ -194,64 +194,62 @@ protected: scope_ = std::make_shared(); // inputs: x LOG(INFO) << "create global variable x"; - Variable* x = scope_->CreateVariable("x"); + Variable* x = scope_->NewVar("x"); DDim dims = make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); x->GetMutable()->mutable_data(dims, platform::CPUPlace()); // inputs: h_boot LOG(INFO) << "create global variable h_boot"; - Variable* h_boot = scope_->CreateVariable("h_boot"); + Variable* h_boot = scope_->NewVar("h_boot"); h_boot->GetMutable()->mutable_data( make_ddim({20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace()); // inputs: w LOG(INFO) << "create global variable w"; - Variable* w = scope_->CreateVariable("rnn/w"); + Variable* w = scope_->NewVar("rnn/w"); w->GetMutable()->mutable_data(make_ddim({30, 30}), platform::CPUPlace()); // inputs: h_grad LOG(INFO) << "create variable h_grad"; - Variable* dh = scope_->CreateVariable("h_grad"); + Variable* dh = scope_->NewVar("h_grad"); dh->GetMutable()->mutable_data(make_ddim({10, 20, 30}), platform::CPUPlace()); // inputs: step_scopes LOG(INFO) << "create variable step_scopes"; - scope_->CreateVariable("step_scopes"); + scope_->NewVar("step_scopes"); // inputs: step_net LOG(INFO) << "create variable step_net"; - scope_->CreateVariable("step_net"); + scope_->NewVar("step_net"); // outputs: w_grad LOG(INFO) << "create global variable w_grad"; - scope_->CreateVariable("rnn/w_grad"); + scope_->NewVar("rnn/w_grad"); // outputs: x_grad LOG(INFO) << "create global variable x_grad"; - scope_->CreateVariable("x_grad"); + scope_->NewVar("x_grad"); // outputs: h_boot_grad LOG(INFO) << "create global variable h_boot_grad"; - scope_->CreateVariable("h_boot_grad"); + scope_->NewVar("h_boot_grad"); } void CreateStepScopes() { std::vector>* step_scopes = - scope_->GetVariable("step_scopes") + scope_->FindVar("step_scopes") ->GetMutable>>(); for (int i = 0; i < 10; ++i) { auto scope = std::make_shared(scope_); - auto pre_t = scope->CreateVariable("rnn/pre_h")->GetMutable(); + auto pre_t = scope->NewVar("rnn/pre_h")->GetMutable(); pre_t->mutable_data(make_ddim({20, 30}), platform::CPUPlace()); - auto tensor = scope->CreateVariable("rnn/h")->GetMutable(); + auto tensor = scope->NewVar("rnn/h")->GetMutable(); tensor->mutable_data(make_ddim({20, 30}), platform::CPUPlace()); // for unit test of ConcatOutputs - auto xg = scope->CreateVariable("rnn/x_grad")->GetMutable(); + auto xg = scope->NewVar("rnn/x_grad")->GetMutable(); xg->mutable_data(make_ddim({20, 30}), platform::CPUPlace()); step_scopes->push_back(scope); } // last time step - auto g = (*step_scopes)[9] - ->CreateVariable("rnn/h_pre_grad") - ->GetMutable(); + auto g = (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable(); g->mutable_data(make_ddim({20, 30}), platform::CPUPlace()); } @@ -280,7 +278,7 @@ protected: void CreateStepNet() { LOG(INFO) << "create variable step_net"; - Variable* var = scope_->CreateVariable("step_net"); + Variable* var = scope_->NewVar("step_net"); auto net = var->GetMutable(); net->AddOp(OpRegistry::CreateOp("mul", {"rnn/h_pre", "rnn/w", "rnn/s_grad"}, @@ -301,7 +299,7 @@ protected: inlink.external = "x"; inlink.internal = "rnn/x"; std::vector>* step_scopes = - scope_->GetVariable("step_scopes") + scope_->FindVar("step_scopes") ->GetMutable>>(); rnn::SegmentInputs(*step_scopes, std::vector{inlink}, 10); } @@ -315,7 +313,7 @@ protected: std::vector memories; memories.push_back(mem_attr); std::vector>* step_scopes = - scope_->GetVariable("step_scopes") + scope_->FindVar("step_scopes") ->GetMutable>>(); for (int i = 1; i < 10; ++i) { rnn::LinkMemories(*step_scopes, memories, i, -1); @@ -344,8 +342,8 @@ TEST(RecurrentOp, LinkMemories) { std::vector> step_scopes; for (int i = 0; i < len; ++i) { auto scope = std::make_shared(); - scope->CreateVariable("pre_h"); - auto tensor = scope->CreateVariable("h")->GetMutable(); + scope->NewVar("pre_h"); + auto tensor = scope->NewVar("h")->GetMutable(); float* data = tensor->mutable_data(make_ddim({15, 20}), CPUPlace()); for (int i = 0; i < 15 * 20; ++i) { data[i] = rand() * (1. / (double)RAND_MAX); @@ -367,9 +365,9 @@ TEST(RecurrentOp, LinkMemories) { // check for (int i = 0; i < len - 1; ++i) { const float* a = - step_scopes[i]->GetVariable("h")->GetMutable()->data(); + step_scopes[i]->FindVar("h")->GetMutable()->data(); const float* b = step_scopes[i + 1] - ->GetVariable("pre_h") + ->FindVar("pre_h") ->GetMutable() ->data(); for (size_t i = 0; i < 15 * 20; ++i) { @@ -382,14 +380,10 @@ TEST(RecurrentOp, LinkMemories) { } // check for (int i = len - 2; i >= 0; --i) { - const float* a = step_scopes[i] - ->GetVariable("pre_h") - ->GetMutable() - ->data(); - const float* b = step_scopes[i + 1] - ->GetVariable("h") - ->GetMutable() - ->data(); + const float* a = + step_scopes[i]->FindVar("pre_h")->GetMutable()->data(); + const float* b = + step_scopes[i + 1]->FindVar("h")->GetMutable()->data(); for (size_t i = 0; i < 15 * 20; ++i) { ASSERT_FLOAT_EQ(a[i], b[i]); } diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index fd4adbd9dec..ab474508d98 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -127,6 +127,11 @@ inline typename std::enable_if::type throw_on_error( #endif // PADDLE_ONLY_CPU +template +inline void throw_on_error(T* e) { + throw_on_error(e != nullptr, ""); +} + template inline void throw_on_error(T e) { throw_on_error(e, ""); diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 08a8bd0d8b6..eacec91cb29 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -104,13 +104,9 @@ All parameter, weight, gradient are variables in Paddle. py::class_>(m, "Scope") .def(py::init&>()) - .def("get_var", - &pd::Scope::GetVariable, - py::return_value_policy::reference) - .def("create_var", - &pd::Scope::CreateVariable, - py::return_value_policy::reference) - .def("get_var_name", &pd::Scope::GetVariableName); + .def("get_var", &pd::Scope::FindVar, py::return_value_policy::reference) + .def("create_var", &pd::Scope::NewVar, py::return_value_policy::reference) + .def("get_var_name", &pd::Scope::FindVarName); //! @note: Be careful! PyBind will return std::string as an unicode, not //! Python str. If you want a str object, you should cast them in Python. -- GitLab