From 5d134a03f1788bab1ee01904aa54382a46e25551 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 1 Aug 2017 15:04:43 +0800 Subject: [PATCH] Refine remove std::shared_ptr in Scope * Make interface of Operator to `const Scope&` --- paddle/framework/net.h | 4 +- paddle/framework/net_op_test.cc | 7 +- paddle/framework/op_registry_test.cc | 14 +-- paddle/framework/operator.h | 28 +++--- paddle/framework/operator_test.cc | 24 ++--- paddle/framework/scope.cc | 10 +- paddle/framework/scope.h | 37 +++++--- paddle/framework/variable.h | 4 +- paddle/operators/recurrent_network_op.cc | 72 +++++++------- paddle/operators/recurrent_network_op.h | 42 ++++----- paddle/operators/recurrent_network_op_test.cc | 93 ++++++++++--------- paddle/platform/enforce.h | 12 +-- paddle/pybind/pybind.cc | 17 +++- .../v2/framework/default_scope_funcs.py | 19 ++-- python/paddle/v2/framework/network.py | 19 ++-- .../paddle/v2/framework/tests/op_test_util.py | 8 +- .../tests/test_default_scope_funcs.py | 10 +- .../paddle/v2/framework/tests/test_fc_op.py | 12 +-- .../paddle/v2/framework/tests/test_scope.py | 22 ++--- .../paddle/v2/framework/tests/test_tensor.py | 8 +- 20 files changed, 239 insertions(+), 223 deletions(-) diff --git a/paddle/framework/net.h b/paddle/framework/net.h index 089c1355951..fc98080b178 100644 --- a/paddle/framework/net.h +++ b/paddle/framework/net.h @@ -43,7 +43,7 @@ class NetOp : public OperatorBase { * Infer all the operators' input and output variables' shapes, will be called * before every mini-batch */ - void InferShape(const std::shared_ptr& scope) const override { + void InferShape(const Scope& scope) const override { for (auto& op : ops_) { op->InferShape(scope); } @@ -56,7 +56,7 @@ class NetOp : public OperatorBase { * scope will be used instead. If no OpContext is provicded, default context * will be used. */ - void Run(const std::shared_ptr& scope, + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override { for (auto& op : ops_) { op->Run(scope, dev_ctx); diff --git a/paddle/framework/net_op_test.cc b/paddle/framework/net_op_test.cc index 8048311fe54..3392b033c50 100644 --- a/paddle/framework/net_op_test.cc +++ b/paddle/framework/net_op_test.cc @@ -16,11 +16,10 @@ static int run_cnt = 0; class TestOp : public OperatorBase { public: - void InferShape( - const std::shared_ptr& scope) const override { + void InferShape(const framework::Scope& scope) const override { ++infer_shape_cnt; } - void Run(const std::shared_ptr& scope, + void Run(const framework::Scope& scope, const paddle::platform::DeviceContext& dev_ctx) const override { ++run_cnt; } @@ -62,7 +61,7 @@ TEST(OpKernel, all) { ASSERT_EQ(1UL, tmp_idx.size()); ASSERT_EQ("y", net->outputs_[tmp_idx[0]]); - auto scope = std::make_shared(); + Scope scope; platform::CPUDeviceContext dev_ctx; net->InferShape(scope); diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index 2ef781bf867..d8ae3d07227 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -7,9 +7,9 @@ namespace paddle { namespace framework { class CosineOp : public OperatorBase { public: - void Run(const std::shared_ptr& scope, + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override {} - void InferShape(const std::shared_ptr& scope) const override {} + void InferShape(const Scope& scope) const override {} }; class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { @@ -27,8 +27,8 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { class MyTestOp : public OperatorBase { public: - void InferShape(const std::shared_ptr& scope) const override {} - void Run(const std::shared_ptr& scope, + void InferShape(const Scope& scope) const override {} + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override {} }; @@ -69,7 +69,7 @@ TEST(OpRegistry, CreateOp) { std::shared_ptr op = paddle::framework::OpRegistry::CreateOp(op_desc); - auto scope = std::make_shared(); + paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); float scale_get = op->GetAttr("scale"); @@ -111,7 +111,7 @@ TEST(OpRegistry, DefaultValue) { std::shared_ptr op = paddle::framework::OpRegistry::CreateOp(op_desc); - auto scope = std::make_shared(); + paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); ASSERT_EQ(op->GetAttr("scale"), 1.0); @@ -173,7 +173,7 @@ TEST(OpRegistry, CustomChecker) { SetInputFormat(&op_desc); auto op = paddle::framework::OpRegistry::CreateOp(op_desc); paddle::platform::CPUDeviceContext dev_ctx; - auto scope = std::make_shared(); + paddle::framework::Scope scope; op->Run(scope, dev_ctx); int test_attr = op->GetAttr("test_attr"); ASSERT_EQ(test_attr, 4); diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index a78d91f1b9d..4912a5f290b 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -84,10 +84,10 @@ class OperatorBase { /// InferShape infer the size of Variables used by this Operator with /// information inside scope - virtual void InferShape(const std::shared_ptr& scope) const = 0; + virtual void InferShape(const Scope& scope) const = 0; /// Net will call this function to Run an op. - virtual void Run(const std::shared_ptr& scope, + virtual void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const = 0; virtual bool IsNetOp() const { return false; } @@ -114,24 +114,24 @@ class OperatorBase { class KernelContext { public: - KernelContext(const OperatorBase* op, const std::shared_ptr& scope, + KernelContext(const OperatorBase* op, const Scope& scope, const platform::DeviceContext& device_context) : op_(*op), scope_(scope), device_context_(device_context) {} const Variable* Input(int index) const { - return scope_->FindVar(op_.inputs_[index]); + return scope_.FindVar(op_.inputs_[index]); } Variable* Output(int index) const { - return scope_->FindVar(op_.outputs_[index]); + return scope_.FindVar(op_.outputs_[index]); } const Variable* Input(const std::string& name) const { - return scope_->FindVar(op_.Input(name)); + return scope_.FindVar(op_.Input(name)); } const Variable* Output(const std::string& name) const { - return scope_->FindVar(op_.Output(name)); + return scope_.FindVar(op_.Output(name)); } const std::vector Inputs(const std::string& name) const { @@ -139,7 +139,7 @@ class KernelContext { std::vector res; std::transform( names.begin(), names.end(), res.begin(), - [this](const std::string& name) { return scope_->FindVar(name); }); + [this](const std::string& name) { return scope_.FindVar(name); }); return res; } @@ -148,7 +148,7 @@ class KernelContext { std::vector res; std::transform( names.begin(), names.end(), res.begin(), - [this](const std::string& name) { return scope_->FindVar(name); }); + [this](const std::string& name) { return scope_.FindVar(name); }); return res; } @@ -160,7 +160,7 @@ class KernelContext { platform::Place GetPlace() const { return device_context_.GetPlace(); } const OperatorBase& op_; - const std::shared_ptr& scope_; + const Scope& scope_; const platform::DeviceContext& device_context_; }; @@ -216,7 +216,7 @@ class OperatorWithKernel : public OperatorBase { using OpKernelMap = std::unordered_map, OpKernelHash>; - void Run(const std::shared_ptr& scope, + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const final { auto& opKernel = AllOpKernels().at(type_).at(OpKernelKey(dev_ctx)); opKernel->Compute(KernelContext(this, scope, dev_ctx)); @@ -228,7 +228,7 @@ class OperatorWithKernel : public OperatorBase { return g_all_op_kernels; } - void InferShape(const std::shared_ptr& scope) const final { + void InferShape(const Scope& scope) const final { std::vector ins; VarNamesToTensors(scope, inputs_, &ins); std::vector outs; @@ -238,13 +238,13 @@ class OperatorWithKernel : public OperatorBase { private: template - void VarNamesToTensors(const std::shared_ptr& scope, + void VarNamesToTensors(const Scope& scope, const std::vector& var_names, std::vector* container) const { container->reserve(var_names.size()); VarToTensor convert; for (auto& name : var_names) { - auto var = scope->FindVar(name); + auto var = scope.FindVar(name); if (var != nullptr) { container->push_back(convert(var)); } else { diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index 2eeb2946fc4..1fcaf2d48d9 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -24,15 +24,15 @@ static int op_run_num = 0; class OpWithoutKernelTest : public OperatorBase { public: void Init() override { x = 1; } - void InferShape(const std::shared_ptr& scope) const override {} - void Run(const std::shared_ptr& scope, + void InferShape(const Scope& scope) const override {} + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override { op_run_num++; ASSERT_EQ((int)inputs_.size(), 1); ASSERT_EQ((int)outputs_.size(), 1); - ASSERT_EQ(scope->FindVar(inputs_[0]), nullptr); + ASSERT_EQ(scope.FindVar(inputs_[0]), nullptr); ASSERT_EQ(x, 1); - ASSERT_NE(scope->FindVar(outputs_[0]), nullptr); + ASSERT_NE(scope.FindVar(outputs_[0]), nullptr); } public: @@ -68,10 +68,10 @@ TEST(OperatorBase, all) { attr->set_f(3.14); paddle::platform::CPUDeviceContext device_context; - auto scope = std::make_shared(); + paddle::framework::Scope scope; auto op = paddle::framework::OpRegistry::CreateOp(op_desc); - scope->NewVar("OUT1"); + scope.NewVar("OUT1"); ASSERT_EQ(paddle::framework::op_run_num, 0); op->Run(scope, device_context); ASSERT_EQ(paddle::framework::op_run_num, 1); @@ -117,12 +117,12 @@ class CPUKernelTest : public OpKernel { class OperatorMultiInputsTest : public OperatorBase { public: void Init() override { x = 1; } - void InferShape(const std::shared_ptr& scope) const override {} - void Run(const std::shared_ptr& scope, + void InferShape(const Scope& scope) const override {} + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override { - ASSERT_EQ(scope->FindVar(inputs_[0]), nullptr); + ASSERT_EQ(scope.FindVar(inputs_[0]), nullptr); ASSERT_EQ(x, 1); - ASSERT_NE(scope->FindVar(outputs_[0]), nullptr); + ASSERT_NE(scope.FindVar(outputs_[0]), nullptr); ASSERT_EQ(Input("x"), "IN1"); ASSERT_EQ(Input("y"), "OUT1"); } @@ -186,7 +186,7 @@ TEST(OpKernel, all) { attr->set_f(3.14); paddle::platform::CPUDeviceContext cpu_device_context; - auto scope = std::make_shared(); + paddle::framework::Scope scope; auto op = paddle::framework::OpRegistry::CreateOp(op_desc); ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 0); @@ -232,7 +232,7 @@ TEST(OpKernel, multi_inputs) { output_format->Add(2); // y1 paddle::platform::CPUDeviceContext cpu_device_context; - auto scope = std::make_shared(); + paddle::framework::Scope scope; auto op = paddle::framework::OpRegistry::CreateOp(op_desc); op->Run(scope, cpu_device_context); diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index 3c9ec92d720..080b4ac621c 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -19,11 +19,11 @@ namespace paddle { namespace framework { Scope::~Scope() { + DropKids(); for (auto& kv : vars_) delete kv.second; - for (Scope* s : kids_) delete s; } -Scope& Scope::NewScope() { +Scope& Scope::NewScope() const { kids_.push_back(new Scope(this)); return *kids_.back(); } @@ -49,7 +49,7 @@ Variable* Scope::FindVar(const std::string& name) const { return (parent_ == nullptr) ? nullptr : parent_->FindVar(name); } -Scope* Scope::FindScope(const Variable* var) { +const Scope* Scope::FindScope(const Variable* var) const { for (auto& kv : vars_) { if (kv.second == var) { return this; @@ -57,6 +57,10 @@ Scope* Scope::FindScope(const Variable* var) { } return (parent_ == nullptr) ? nullptr : parent_->FindScope(var); } +void Scope::DropKids() { + for (Scope* s : kids_) delete s; + kids_.clear(); +} } // namespace framework } // namespace paddle diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index 9b4fffb9a62..2ba3f8ed355 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include -#include #include +#include #include "paddle/framework/variable.h" @@ -38,30 +38,39 @@ class Scope { Scope() {} ~Scope(); - // Create a sub-scope. Returns a reference other than a pointer so - // to prevent from manual deletion. - Scope& NewScope(); + // Disable Copy, Assign, Move. + Scope(const Scope& other) = delete; + Scope& operator=(const Scope& other) = delete; + Scope(Scope&& other) = delete; - // Create a variable with given name if it doesn't exist. + /// Create a sub-scope. Returns a reference other than a pointer so + /// to prevent from manual deletion. + /// Mark it to const because that new kid scope cannot change parent scope. + Scope& NewScope() const; + + /// Create a variable with given name if it doesn't exist. Variable* NewVar(const std::string& name); - // Create a variable with a scope-unique name. + /// Create a variable with a scope-unique name. Variable* NewVar(); - // Find a variable in the scope or any of its ancestors. Returns - // nullptr if cannot find. + /// Find a variable in the scope or any of its ancestors. Returns + /// nullptr if cannot find. Variable* FindVar(const std::string& name) const; - // Find the scope or an ancestor scope that contains the given variable. - Scope* FindScope(const Variable* var); + /// Find the scope or an ancestor scope that contains the given variable. + const Scope* FindScope(const Variable* var) const; + + /// Drop all kids scopes belonged to this scope. + void DropKids(); private: // Call Scope::NewScope for a sub-scope. - explicit Scope(Scope* parent) : parent_(parent) {} + explicit Scope(Scope const* parent) : parent_(parent) {} - std::map vars_; - std::list kids_; - Scope* parent_{nullptr}; + std::unordered_map vars_; + mutable std::list kids_; + Scope const* parent_{nullptr}; }; } // namespace framework diff --git a/paddle/framework/variable.h b/paddle/framework/variable.h index 10a3866b850..38fc2720a30 100644 --- a/paddle/framework/variable.h +++ b/paddle/framework/variable.h @@ -16,7 +16,7 @@ #include #include -#include "paddle/platform/assert.h" +#include "paddle/platform/enforce.h" namespace paddle { namespace framework { @@ -25,7 +25,7 @@ class Variable { public: template const T& Get() const { - PADDLE_ASSERT(IsType()); + PADDLE_ENFORCE(IsType(), "Variable must be type %s", typeid(T).name()); return *static_cast(holder_->Ptr()); } diff --git a/paddle/operators/recurrent_network_op.cc b/paddle/operators/recurrent_network_op.cc index 71eef6a316a..224bb1432ac 100644 --- a/paddle/operators/recurrent_network_op.cc +++ b/paddle/operators/recurrent_network_op.cc @@ -27,7 +27,7 @@ namespace operators { namespace rnn { -void SegmentInputs(std::vector>& step_scopes, +void SegmentInputs(const std::vector& step_scopes, const std::vector& inlinks, const size_t seq_len) { PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided."); @@ -47,7 +47,7 @@ void SegmentInputs(std::vector>& step_scopes, } } -void ConcatOutputs(std::vector>& step_scopes, +void ConcatOutputs(const std::vector& step_scopes, const std::vector& outlinks, const size_t seq_len) { for (size_t i = 0; i < outlinks.size(); i++) { @@ -75,7 +75,7 @@ void ConcatOutputs(std::vector>& step_scopes, } } -void LinkMemories(std::vector>& scopes, +void LinkMemories(const std::vector& scopes, const std::vector& memories, size_t step_id, int offset) { @@ -92,8 +92,8 @@ void LinkMemories(std::vector>& scopes, offset, scopes.size(), step_id); - std::shared_ptr scope = scopes[step_id]; - std::shared_ptr linked_scope = scopes[step_id + offset]; + auto scope = scopes[step_id]; + auto linked_scope = scopes[step_id + offset]; for (auto& attr : memories) { auto mem = scope->NewVar(attr.pre_var)->GetMutable(); // maybe share variable is better? @@ -169,8 +169,8 @@ void InitArgument(const ArgumentName& name, } // namespace rnn -void RecurrentAlgorithm::InferShape(const std::shared_ptr& scope) const { - seq_len_ = scope->FindVar((arg_->inlinks[0]).external) +void RecurrentAlgorithm::InferShape(const Scope& scope) const { + seq_len_ = scope.FindVar((arg_->inlinks[0]).external) ->GetMutable() ->dims()[0]; CreateScopes(scope); @@ -185,10 +185,10 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr& scope) const { InitMemories(step_scopes[0]); - PADDLE_ENFORCE(scope->FindVar(arg_->step_net), + PADDLE_ENFORCE(scope.FindVar(arg_->step_net) != nullptr, "stepnet [%s] is not in scope.", arg_->step_net); - Variable* net = scope->FindVar(arg_->step_net); + Variable* net = scope.FindVar(arg_->step_net); PADDLE_ENFORCE(net != nullptr, "failed to get step net"); // If the InferShape is called in OperatorBase's run function, // the rnn op only needs to do InferShape for the first time step @@ -196,7 +196,7 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr& scope) const { if (i > 0) { rnn::LinkMemories(step_scopes, arg_->memories, i, -1); } - net->GetMutable()->InferShape(step_scopes[i]); + net->GetMutable()->InferShape(*step_scopes[i]); } auto outlinks = arg_->outlinks; @@ -214,51 +214,51 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr& scope) const { } } -void RecurrentAlgorithm::Run(const std::shared_ptr& scope, +void RecurrentAlgorithm::Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const { auto step_scopes = GetStepScopes(scope); - Variable* net = scope->FindVar(arg_->step_net); + Variable* net = scope.FindVar(arg_->step_net); for (size_t step_id = 0; step_id < seq_len_; step_id++) { // the link memory is done in InferShape // maybe remove following code after testing if (step_id > 0) { rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1); } - net->GetMutable()->Run(step_scopes[step_id], dev_ctx); + net->GetMutable()->Run(*step_scopes[step_id], dev_ctx); } rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_); } -void RecurrentAlgorithm::CreateScopes(std::shared_ptr scope) const { +void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { // TODO(xxx) Only two scopes are needed for inference, this case will be // supported later. - auto step_scopes = scope->FindVar(arg_->step_scopes) - ->GetMutable>>(); + auto step_scopes = + scope.FindVar(arg_->step_scopes)->GetMutable>(); if (seq_len_ > step_scopes->size()) { for (size_t i = step_scopes->size(); i < seq_len_; ++i) { - std::shared_ptr step_scope = std::make_shared(scope); + auto& step_scope = scope.NewScope(); // Now all variables in scope must be created outside of op. - auto net_op = scope->FindVar(arg_->step_net)->GetMutable(); + auto net_op = scope.FindVar(arg_->step_net)->GetMutable(); for (auto& input : net_op->inputs_) { - step_scope->NewVar(input); + if (!step_scope.FindVar(input)) step_scope.NewVar(input); } for (auto& output : net_op->outputs_) { - step_scope->NewVar(output); + step_scope.NewVar(output); } - step_scopes->push_back(std::make_shared(step_scope)); + step_scopes->emplace_back(&step_scope); } } } -void RecurrentAlgorithm::InitMemories(std::shared_ptr step_scope) const { +void RecurrentAlgorithm::InitMemories(Scope* step_scope) const { for (auto& attr : arg_->memories) { Tensor* pre_mem = step_scope->NewVar(attr.pre_var)->GetMutable(); - PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var), + PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, "memory [%s]'s boot variable [%s] not exists", attr.var, attr.boot_var); @@ -328,30 +328,30 @@ public: }; void RecurrentGradientAlgorithm::Run( - const std::shared_ptr& scope, - const platform::DeviceContext& dev_ctx) const { + const Scope& scope, const platform::DeviceContext& dev_ctx) const { auto step_scopes = GetStepScopes(scope); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); - PADDLE_ENFORCE(scope->FindVar(arg_->step_net), "step net is not in scope."); - Variable* net = scope->FindVar(arg_->step_net); + PADDLE_ENFORCE(scope.FindVar(arg_->step_net) != nullptr, + "step net is not in scope."); + Variable* net = scope.FindVar(arg_->step_net); PADDLE_ENFORCE(net != nullptr, "failed to get step net"); for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { if (static_cast(step_id) != seq_len_ - 1) { rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1); } - net->GetMutable()->Run(step_scopes[step_id], dev_ctx); + net->GetMutable()->Run(*step_scopes[step_id], dev_ctx); } LinkBootMemoryGradients(step_scopes[0]); rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_); } void RecurrentGradientAlgorithm::LinkBootMemoryGradients( - std::shared_ptr step_scope) const { + Scope* step_scope) const { for (auto& attr : arg_->memories) { Tensor* mem_grad = step_scope->NewVar(attr.var)->GetMutable(); PADDLE_ENFORCE(mem_grad != nullptr, "boot_tensor should be retrieved before"); - PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var), + PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, "memory [%s]'s boot variable [%s] not exists", attr.var, attr.boot_var); @@ -361,23 +361,23 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients( } } -void RecurrentGradientAlgorithm::InferShape( - const std::shared_ptr& scope) const { - seq_len_ = scope->FindVar((arg_->inlinks[0]).external) +void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const { + seq_len_ = scope.FindVar((arg_->inlinks[0]).external) ->GetMutable() ->dims()[0]; auto step_scopes = GetStepScopes(scope); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); - PADDLE_ENFORCE(scope->FindVar(arg_->step_net), "step net is not in scope."); - Variable* net = scope->FindVar(arg_->step_net); + PADDLE_ENFORCE(scope.FindVar(arg_->step_net) != nullptr, + "step net is not in scope."); + Variable* net = scope.FindVar(arg_->step_net); PADDLE_ENFORCE(net != nullptr, "failed to get step net"); for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { if (static_cast(step_id) != seq_len_ - 1) { rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1); } - net->GetMutable()->InferShape(step_scopes[step_id]); + net->GetMutable()->InferShape(*step_scopes[step_id]); } auto outlinks = arg_->outlinks; diff --git a/paddle/operators/recurrent_network_op.h b/paddle/operators/recurrent_network_op.h index eabcc52f6b8..d57a1a2e51c 100644 --- a/paddle/operators/recurrent_network_op.h +++ b/paddle/operators/recurrent_network_op.h @@ -70,18 +70,18 @@ struct ArgumentName { /** * Prepare inputs for each step net. */ -void SegmentInputs(std::vector>& step_scopes, +void SegmentInputs(const std::vector& step_scopes, const std::vector& inlinks, const size_t seq_len); /** * Process outputs of step nets and merge to variables. */ -void ConcatOutputs(std::vector>& step_scopes, +void ConcatOutputs(const std::vector& step_scopes, const std::vector& outlinks, const size_t seq_len); -void LinkMemories(std::vector>& step_scopes, +void LinkMemories(const std::vector& step_scopes, const std::vector& memories, size_t step_id, int offset); @@ -100,15 +100,14 @@ void InitArgument(const ArgumentName& name, Argument* arg); class RecurrentAlgorithm { public: - void Run(const std::shared_ptr& scope, - const platform::DeviceContext& dev_ctx) const; + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const; void Init(std::unique_ptr arg) { arg_ = std::move(arg); } /** * InferShape must be called before Run. */ - void InferShape(const std::shared_ptr& scope) const; + void InferShape(const Scope& scope) const; protected: /* @@ -117,15 +116,13 @@ protected: * NOTE the scopes are reused in both the forward and backward, so just * create once and expand its size if more steps need. */ - void CreateScopes(std::shared_ptr scope) const; + void CreateScopes(const Scope& scope) const; - inline const std::vector>& GetStepScopes( - std::shared_ptr scope) const { - return *(scope->FindVar(arg_->step_scopes)) - ->GetMutable>>(); + const std::vector& GetStepScopes(const Scope& scope) const { + return *scope.FindVar(arg_->step_scopes)->GetMutable>(); } - void InitMemories(std::shared_ptr step_scopes) const; + void InitMemories(Scope* step_scopes) const; private: std::unique_ptr arg_; @@ -146,21 +143,18 @@ class RecurrentGradientAlgorithm { public: void Init(std::unique_ptr arg) { arg_ = std::move(arg); } - void Run(const std::shared_ptr& scope, - const platform::DeviceContext& dev_ctx) const; + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const; - void LinkBootMemoryGradients(std::shared_ptr step_scopes) const; + void LinkBootMemoryGradients(Scope* step_scopes) const; /** * InferShape must be called before Run. */ - void InferShape(const std::shared_ptr& scope) const; + void InferShape(const Scope& scope) const; protected: - inline const std::vector>& GetStepScopes( - std::shared_ptr scope) const { - return *(scope->FindVar(arg_->step_scopes)) - ->GetMutable>>(); + inline const std::vector& GetStepScopes(const Scope& scope) const { + return *scope.FindVar(arg_->step_scopes)->GetMutable>(); } private: @@ -175,11 +169,11 @@ public: /** * InferShape must be called before Run. */ - virtual void InferShape(const std::shared_ptr& scope) const override { + virtual void InferShape(const Scope& scope) const override { alg_.InferShape(scope); } - virtual void Run(const std::shared_ptr& scope, + virtual void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override { alg_.Run(scope, dev_ctx); } @@ -197,11 +191,11 @@ public: /** * InferShape must be called before Run. */ - virtual void InferShape(const std::shared_ptr& scope) const override { + virtual void InferShape(const Scope& scope) const override { alg_.InferShape(scope); } - virtual void Run(const std::shared_ptr& scope, + virtual void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override { alg_.Run(scope, dev_ctx); } diff --git a/paddle/operators/recurrent_network_op_test.cc b/paddle/operators/recurrent_network_op_test.cc index b22cb40f286..b0e61fbee61 100644 --- a/paddle/operators/recurrent_network_op_test.cc +++ b/paddle/operators/recurrent_network_op_test.cc @@ -34,41 +34,40 @@ protected: virtual void TearDown() override {} void CreateGlobalVariables() { - scope_ = std::make_shared(); // create input, and init content LOG(INFO) << "create global variable x"; for (auto inlink : std::vector{"x", "x0", "x1", "h"}) { - Variable* x = scope_->NewVar(inlink); + Variable* x = scope_.NewVar(inlink); DDim dims = make_ddim(std::vector{ 10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); x->GetMutable()->mutable_data(dims, platform::CPUPlace()); } // create output alias just for test for (auto inlink : std::vector{"h@alias"}) { - Variable* x = scope_->NewVar(inlink); + Variable* x = scope_.NewVar(inlink); DDim dims = make_ddim(std::vector{20 /*batch size*/, 30 /*input dim*/}); x->GetMutable()->mutable_data(dims, platform::CPUPlace()); } LOG(INFO) << "create global variable w"; - Variable* w = scope_->NewVar("rnn/w"); + Variable* w = scope_.NewVar("rnn/w"); w->GetMutable()->mutable_data( make_ddim(std::vector{30, 30}), platform::CPUPlace()); for (auto boot : std::vector{"x_boot", "h_boot"}) { LOG(INFO) << "create global variable " << boot; - Variable* h_boot = scope_->NewVar(boot); + Variable* h_boot = scope_.NewVar(boot); h_boot->GetMutable()->mutable_data( make_ddim(std::vector{20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace()); } LOG(INFO) << "create variable step_scopes"; - scope_->NewVar("step_scopes"); + scope_.NewVar("step_scopes"); LOG(INFO) << "create variable h"; - scope_->NewVar("h"); + scope_.NewVar("h"); } void CreateRNNOp() { @@ -150,7 +149,7 @@ protected: void CreateStepNet() { LOG(INFO) << "create variable step_net"; - Variable* var = scope_->NewVar("step_net"); + Variable* var = scope_.NewVar("step_net"); auto net = var->GetMutable(); // rnn/s is net's input or output? net->inputs_ = {"rnn/h@pre", "rnn/w", "rnn/x"}; @@ -164,7 +163,7 @@ protected: } // father scope - std::shared_ptr scope_; + Scope scope_; std::shared_ptr rnn_op_; }; @@ -191,66 +190,64 @@ protected: virtual void TearDown() override {} void CreateGlobalVariables() { - scope_ = std::make_shared(); // inputs: x LOG(INFO) << "create global variable x"; - Variable* x = scope_->NewVar("x"); + Variable* x = scope_.NewVar("x"); DDim dims = make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); x->GetMutable()->mutable_data(dims, platform::CPUPlace()); // inputs: h_boot LOG(INFO) << "create global variable h_boot"; - Variable* h_boot = scope_->NewVar("h_boot"); + Variable* h_boot = scope_.NewVar("h_boot"); h_boot->GetMutable()->mutable_data( make_ddim({20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace()); // inputs: w LOG(INFO) << "create global variable w"; - Variable* w = scope_->NewVar("rnn/w"); + Variable* w = scope_.NewVar("rnn/w"); w->GetMutable()->mutable_data(make_ddim({30, 30}), platform::CPUPlace()); // inputs: h_grad LOG(INFO) << "create variable h_grad"; - Variable* dh = scope_->NewVar("h_grad"); + Variable* dh = scope_.NewVar("h_grad"); dh->GetMutable()->mutable_data(make_ddim({10, 20, 30}), platform::CPUPlace()); // inputs: step_scopes LOG(INFO) << "create variable step_scopes"; - scope_->NewVar("step_scopes"); + scope_.NewVar("step_scopes"); // inputs: step_net LOG(INFO) << "create variable step_net"; - scope_->NewVar("step_net"); + scope_.NewVar("step_net"); // outputs: w_grad LOG(INFO) << "create global variable w_grad"; - scope_->NewVar("rnn/w_grad"); + scope_.NewVar("rnn/w_grad"); // outputs: x_grad LOG(INFO) << "create global variable x_grad"; - scope_->NewVar("x_grad"); + scope_.NewVar("x_grad"); // outputs: h_boot_grad LOG(INFO) << "create global variable h_boot_grad"; - scope_->NewVar("h_boot_grad"); + scope_.NewVar("h_boot_grad"); } void CreateStepScopes() { - std::vector>* step_scopes = - scope_->FindVar("step_scopes") - ->GetMutable>>(); + auto step_scopes = + scope_.FindVar("step_scopes")->GetMutable>(); for (int i = 0; i < 10; ++i) { - auto scope = std::make_shared(scope_); - auto pre_t = scope->NewVar("rnn/pre_h")->GetMutable(); - pre_t->mutable_data(make_ddim({20, 30}), platform::CPUPlace()); - auto tensor = scope->NewVar("rnn/h")->GetMutable(); - tensor->mutable_data(make_ddim({20, 30}), platform::CPUPlace()); + auto& scope = scope_.NewScope(); + auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable(); + pre_t->mutable_data({20, 30}, platform::CPUPlace()); + auto tensor = scope.NewVar("rnn/h")->GetMutable(); + tensor->mutable_data({20, 30}, platform::CPUPlace()); // for unit test of ConcatOutputs - auto xg = scope->NewVar("rnn/x_grad")->GetMutable(); - xg->mutable_data(make_ddim({20, 30}), platform::CPUPlace()); + auto xg = scope.NewVar("rnn/x_grad")->GetMutable(); + xg->mutable_data({20, 30}, platform::CPUPlace()); - step_scopes->push_back(scope); + step_scopes->emplace_back(&scope); } // last time step auto g = (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable(); - g->mutable_data(make_ddim({20, 30}), platform::CPUPlace()); + g->mutable_data({20, 30}, platform::CPUPlace()); } void CreateRNNGradientAlgorithm() { @@ -278,7 +275,7 @@ protected: void CreateStepNet() { LOG(INFO) << "create variable step_net"; - Variable* var = scope_->NewVar("step_net"); + Variable* var = scope_.NewVar("step_net"); auto net = var->GetMutable(); net->AddOp(OpRegistry::CreateOp("mul", {"rnn/h_pre", "rnn/w", "rnn/s_grad"}, @@ -298,9 +295,8 @@ protected: rnn::Link inlink; inlink.external = "x"; inlink.internal = "rnn/x"; - std::vector>* step_scopes = - scope_->FindVar("step_scopes") - ->GetMutable>>(); + auto step_scopes = + scope_.FindVar("step_scopes")->GetMutable>(); rnn::SegmentInputs(*step_scopes, std::vector{inlink}, 10); } @@ -312,15 +308,14 @@ protected: mem_attr.boot_var = "boot_h"; std::vector memories; memories.push_back(mem_attr); - std::vector>* step_scopes = - scope_->FindVar("step_scopes") - ->GetMutable>>(); + auto step_scopes = + scope_.FindVar("step_scopes")->GetMutable>(); for (int i = 1; i < 10; ++i) { rnn::LinkMemories(*step_scopes, memories, i, -1); } } - std::shared_ptr scope_; + Scope scope_; RecurrentGradientAlgorithm rnn_grad_algo_; }; @@ -339,14 +334,14 @@ TEST(RecurrentOp, LinkMemories) { // create and init step scopes int len = 10; - std::vector> step_scopes; + std::vector step_scopes; for (int i = 0; i < len; ++i) { - auto scope = std::make_shared(); + auto scope = new Scope(); scope->NewVar("pre_h"); auto tensor = scope->NewVar("h")->GetMutable(); - float* data = tensor->mutable_data(make_ddim({15, 20}), CPUPlace()); - for (int i = 0; i < 15 * 20; ++i) { - data[i] = rand() * (1. / (double)RAND_MAX); + float* data = tensor->mutable_data({15, 20}, CPUPlace()); + for (int j = 0; j < 15 * 20; ++j) { + data[j] = rand() * (1. / (double)RAND_MAX); } step_scopes.push_back(scope); } @@ -388,7 +383,17 @@ TEST(RecurrentOp, LinkMemories) { ASSERT_FLOAT_EQ(a[i], b[i]); } } + + for (auto s : step_scopes) { + delete s; + } } USE_OP(add_two); USE_OP(mul); + +// int main() { +// //! TODO(yuyang18): Temporary disable this unit-test because implementation +// //! error. +// return 0; +//} \ No newline at end of file diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index 89a948e495e..ff69a0f8ab9 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include #include @@ -127,17 +128,6 @@ inline typename std::enable_if::type throw_on_error( #endif // PADDLE_ONLY_CPU -template -inline typename std::enable_if::value, void>::type -throw_on_error(T stat, const Args&... args) { - if (stat == nullptr) { - return; - } else { - throw std::runtime_error("Pointer value is nullptr: " + - string::Sprintf(args...)); - } -} - template inline void throw_on_error(T e) { throw_on_error(e, ""); diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index eacec91cb29..ee5f675e251 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -102,11 +102,18 @@ All parameter, weight, gradient are variables in Paddle. }, py::return_value_policy::reference); - py::class_>(m, "Scope") - .def(py::init&>()) - .def("get_var", &pd::Scope::FindVar, py::return_value_policy::reference) - .def("create_var", &pd::Scope::NewVar, py::return_value_policy::reference) - .def("get_var_name", &pd::Scope::FindVarName); + py::class_(m, "Scope", "") + .def("new_var", + [](pd::Scope& self, const std::string& name) -> pd::Variable* { + return self.NewVar(name); + }, + py::return_value_policy::reference) + .def("find_var", &pd::Scope::FindVar, py::return_value_policy::reference) + .def(py::init<>()) + .def("new_scope", + [](pd::Scope& self) -> pd::Scope* { return &self.NewScope(); }, + py::return_value_policy::reference) + .def("drop_kids", &pd::Scope::DropKids); //! @note: Be careful! PyBind will return std::string as an unicode, not //! Python str. If you want a str object, you should cast them in Python. diff --git a/python/paddle/v2/framework/default_scope_funcs.py b/python/paddle/v2/framework/default_scope_funcs.py index 4e772326c94..1b5580c8b30 100644 --- a/python/paddle/v2/framework/default_scope_funcs.py +++ b/python/paddle/v2/framework/default_scope_funcs.py @@ -5,7 +5,7 @@ Default scope function. thread-local stack of Scope. Top of that stack is current scope, the bottom of that stack is all scopes' parent. -Invoking `create_var/get_var` can `create/get` variable in current scope. +Invoking `new_var/find_var` can `new/find` variable in current scope. Invoking `enter_local_scope/leave_local_scope` can create or destroy local scope. @@ -19,8 +19,8 @@ import threading __tl_scope__ = threading.local() __all__ = [ - 'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'create_var', - 'get_var', 'scoped_function' + 'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'new_var', + 'find_var', 'scoped_function' ] @@ -33,7 +33,7 @@ def get_cur_scope(): if cur_scope_stack is None: __tl_scope__.cur_scope = list() if len(__tl_scope__.cur_scope) == 0: - __tl_scope__.cur_scope.append(paddle.v2.framework.core.Scope(None)) + __tl_scope__.cur_scope.append(paddle.v2.framework.core.Scope()) return __tl_scope__.cur_scope[-1] @@ -42,7 +42,7 @@ def enter_local_scope(): Enter a new local scope """ cur_scope = get_cur_scope() - new_scope = paddle.v2.framework.core.Scope(cur_scope) + new_scope = cur_scope.new_scope() __tl_scope__.cur_scope.append(new_scope) @@ -51,20 +51,21 @@ def leave_local_scope(): Leave local scope """ __tl_scope__.cur_scope.pop() + get_cur_scope().drop_kids() -def create_var(name): +def new_var(name): """ create variable in current scope. """ - return get_cur_scope().create_var(name) + return get_cur_scope().new_var(name) -def get_var(name): +def find_var(name): """ get variable in current scope. """ - return get_cur_scope().get_var(name) + return get_cur_scope().find_var(name) def scoped_function(func): diff --git a/python/paddle/v2/framework/network.py b/python/paddle/v2/framework/network.py index c85e87413ef..cfeb0e3dec0 100644 --- a/python/paddle/v2/framework/network.py +++ b/python/paddle/v2/framework/network.py @@ -1,6 +1,6 @@ import paddle.v2.framework.core as core from paddle.v2.framework.create_op_creation_methods import op_creations -from default_scope_funcs import create_var, get_var, get_cur_scope +from default_scope_funcs import new_var, find_var, get_cur_scope __all__ = ['Network'] # Only expose Network @@ -29,12 +29,15 @@ class NetworkFunctor(object): if ipt in kwargs: var = kwargs[ipt] if isinstance(var, basestring): - var = create_var(var) + tmp = new_var(var) + self.net.var_names[tmp] = var + var = tmp + if not isinstance(var, core.Variable): raise TypeError( "Input of op creation must be string or variable") - kwargs[ipt] = get_cur_scope().get_var_name(var) + kwargs[ipt] = self.net.var_names[var] notemp_outputs = self.func.all_not_temp_output_args @@ -49,17 +52,20 @@ class NetworkFunctor(object): if opt in kwargs: var = kwargs[opt] if isinstance(var, basestring): - var = create_var(var) + tmp = new_var(var) + self.net.var_names[tmp] = var + var = tmp + if not isinstance(var, core.Variable): raise TypeError( "Output of op creation must be string or variable") - kwargs[opt] = get_cur_scope().get_var_name(var) + kwargs[opt] = self.net.var_names[var] op = self.func(**kwargs) self.net.net.add_op(op) - lst = [get_var(kwargs[opt]) for opt in notemp_outputs] + lst = [find_var(kwargs[opt]) for opt in notemp_outputs] if len(lst) == 1: return lst[0] elif len(lst) == 0: @@ -89,6 +95,7 @@ class Network(object): self.net = core.Net.create() funcs = (func_name for func_name in dir(op_creations) if not func_name.startswith("__")) + self.var_names = dict() # TODO(yuyang18): This code can work, but do not generate a good # docstring, try to give a better way generate function in runtime diff --git a/python/paddle/v2/framework/tests/op_test_util.py b/python/paddle/v2/framework/tests/op_test_util.py index 7b62313f8ac..99085c36722 100644 --- a/python/paddle/v2/framework/tests/op_test_util.py +++ b/python/paddle/v2/framework/tests/op_test_util.py @@ -24,13 +24,13 @@ class OpTestMeta(type): func = getattr(creation.op_creations, self.type, None) self.assertIsNotNone(func) - scope = core.Scope(None) + scope = core.Scope() kwargs = dict() for in_name in func.all_input_args: if hasattr(self, in_name): kwargs[in_name] = in_name - var = scope.create_var(in_name).get_tensor() + var = scope.new_var(in_name).get_tensor() arr = getattr(self, in_name) var.set_dims(arr.shape) var.set(arr) @@ -40,7 +40,7 @@ class OpTestMeta(type): for out_name in func.all_output_args: if hasattr(self, out_name): kwargs[out_name] = out_name - scope.create_var(out_name).get_tensor() + scope.new_var(out_name).get_tensor() for attr_name in func.all_attr_args: if hasattr(self, attr_name): @@ -54,7 +54,7 @@ class OpTestMeta(type): op.run(scope, ctx) for out_name in func.all_output_args: - actual = numpy.array(scope.get_var(out_name).get_tensor()) + actual = numpy.array(scope.find_var(out_name).get_tensor()) expect = getattr(self, out_name) # TODO(qijun) The default decimal is 7, but numpy.dot and eigen.mul # has some diff, and could not pass unittest. So I set decimal 3 here. diff --git a/python/paddle/v2/framework/tests/test_default_scope_funcs.py b/python/paddle/v2/framework/tests/test_default_scope_funcs.py index 81033deb154..495863c4562 100644 --- a/python/paddle/v2/framework/tests/test_default_scope_funcs.py +++ b/python/paddle/v2/framework/tests/test_default_scope_funcs.py @@ -7,19 +7,19 @@ class TestDefaultScopeFuncs(unittest.TestCase): self.assertIsNotNone(get_cur_scope()) def test_none_variable(self): - self.assertIsNone(get_var("test")) + self.assertIsNone(find_var("test")) def test_create_var_get_var(self): - var_a = create_var("var_a") + var_a = new_var("var_a") self.assertIsNotNone(var_a) - self.assertIsNotNone(get_cur_scope().get_var('var_a')) + self.assertIsNotNone(get_cur_scope().find_var('var_a')) enter_local_scope() - self.assertIsNotNone(get_cur_scope().get_var('var_a')) + self.assertIsNotNone(get_cur_scope().find_var('var_a')) leave_local_scope() def test_var_get_int(self): def __new_scope__(): - i = create_var("var_i") + i = new_var("var_i") self.assertFalse(i.is_int()) i.set_int(10) self.assertTrue(i.is_int()) diff --git a/python/paddle/v2/framework/tests/test_fc_op.py b/python/paddle/v2/framework/tests/test_fc_op.py index 59e7e61249e..43931aac406 100644 --- a/python/paddle/v2/framework/tests/test_fc_op.py +++ b/python/paddle/v2/framework/tests/test_fc_op.py @@ -6,13 +6,13 @@ import paddle.v2.framework.create_op_creation_methods as creation class TestFc(unittest.TestCase): def test_fc(self): - scope = core.Scope(None) - x = scope.create_var("X") + scope = core.Scope() + x = scope.new_var("X") x_tensor = x.get_tensor() x_tensor.set_dims([1000, 784]) x_tensor.alloc_float() - w = scope.create_var("W") + w = scope.new_var("W") w_tensor = w.get_tensor() w_tensor.set_dims([784, 100]) w_tensor.alloc_float() @@ -25,10 +25,10 @@ class TestFc(unittest.TestCase): op = creation.op_creations.fc(X="X", Y="Y", W="W") for out in op.outputs(): - if scope.get_var(out) is None: - scope.create_var(out).get_tensor() + if scope.find_var(out) is None: + scope.new_var(out).get_tensor() - tensor = scope.get_var("Y").get_tensor() + tensor = scope.find_var("Y").get_tensor() op.infer_shape(scope) self.assertEqual([1000, 100], tensor.shape()) diff --git a/python/paddle/v2/framework/tests/test_scope.py b/python/paddle/v2/framework/tests/test_scope.py index f0ee45cfc75..1ce9454067f 100644 --- a/python/paddle/v2/framework/tests/test_scope.py +++ b/python/paddle/v2/framework/tests/test_scope.py @@ -5,29 +5,29 @@ import unittest class TestScope(unittest.TestCase): def test_create_destroy(self): paddle_c = paddle.v2.framework.core - scope = paddle_c.Scope(None) + scope = paddle_c.Scope() self.assertIsNotNone(scope) - scope_with_parent = paddle_c.Scope(scope) + scope_with_parent = scope.new_scope() self.assertIsNotNone(scope_with_parent) def test_none_variable(self): paddle_c = paddle.v2.framework.core - scope = paddle_c.Scope(None) - self.assertIsNone(scope.get_var("test")) + scope = paddle_c.Scope() + self.assertIsNone(scope.find_var("test")) def test_create_var_get_var(self): paddle_c = paddle.v2.framework.core - scope = paddle_c.Scope(None) - var_a = scope.create_var("var_a") + scope = paddle_c.Scope() + var_a = scope.new_var("var_a") self.assertIsNotNone(var_a) - self.assertIsNotNone(scope.get_var('var_a')) - scope2 = paddle_c.Scope(scope) - self.assertIsNotNone(scope2.get_var('var_a')) + self.assertIsNotNone(scope.find_var('var_a')) + scope2 = scope.new_scope() + self.assertIsNotNone(scope2.find_var('var_a')) def test_var_get_int(self): paddle_c = paddle.v2.framework.core - scope = paddle_c.Scope(None) - var = scope.create_var("test_int") + scope = paddle_c.Scope() + var = scope.new_var("test_int") var.set_int(10) self.assertTrue(var.is_int()) self.assertEqual(10, var.get_int()) diff --git a/python/paddle/v2/framework/tests/test_tensor.py b/python/paddle/v2/framework/tests/test_tensor.py index b72aff3b9cd..6d59863cea2 100644 --- a/python/paddle/v2/framework/tests/test_tensor.py +++ b/python/paddle/v2/framework/tests/test_tensor.py @@ -5,8 +5,8 @@ import numpy class TestScope(unittest.TestCase): def test_int_tensor(self): - scope = core.Scope(None) - var = scope.create_var("test_tensor") + scope = core.Scope() + var = scope.new_var("test_tensor") tensor = var.get_tensor() tensor.set_dims([1000, 784]) @@ -23,8 +23,8 @@ class TestScope(unittest.TestCase): self.assertEqual(2.0, tensor_array_2[19, 11]) def test_float_tensor(self): - scope = core.Scope(None) - var = scope.create_var("test_tensor") + scope = core.Scope() + var = scope.new_var("test_tensor") tensor = var.get_tensor() tensor.set_dims([1000, 784]) -- GitLab