diff --git a/doc/design/scope.md b/doc/design/scope.md index afe6bc028cafc5ee24b0041905857af58d3f5790..c9e0be716b606f6c7bf0373e0c6e632647e07a6f 100644 --- a/doc/design/scope.md +++ b/doc/design/scope.md @@ -37,8 +37,8 @@ Scope is an association of a name to variable. All variables belong to `Scope`. ```cpp class Scope { public: - Variable* CreateVariable(const std::string& name); - const Variable* GetVariable(const std::string& name) const; + Variable* NewVar(const std::string& name); + const Variable* FindVar(const std::string& name) const; private: std::unordered_map> vars_; @@ -58,12 +58,12 @@ class Scope { public: Scope(const std::shared_ptr& scope): parent_(scope) {} - Variable* GetVariable(const std::string& name) const { + Variable* FindVar(const std::string& name) const { auto it = vars_.find(name); if (it != vars_.end()) { return it->second.get(); } else if (parent_ != nullptr) { - return parent_->GetVariable(name); + return parent_->FindVar(name); } else { return nullptr; } @@ -95,10 +95,10 @@ class Scope { static std::shared_ptr Create(const std::shared_ptr& parent = nullptr); // return nullptr if not found. - Variable* GetVariable(const std::string& name) const; + Variable* FindVar(const std::string& name) const; // return if already contains same name variable. - Variable* CreateVariable(const std::string& name); + Variable* NewVar(const std::string& name); private: std::shared_ptr parent_; @@ -107,11 +107,11 @@ class Scope { ``` ## Only scope can create a variable -To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `CreateVariable` can construct `Variable`. +To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `NewVar` can construct `Variable`. ## When scope destroyed, all variables inside this scope should be destroyed together -The scope hold unique pointers for all variables. User can `GetVariable` from scope, but he should not hold this pointer as a member variable. Because when scope is destroyed, all variables inside this scope will be destroyed together. +The scope hold unique pointers for all variables. User can `FindVar` from scope, but he should not hold this pointer as a member variable. Because when scope is destroyed, all variables inside this scope will be destroyed together. ## Sharing a parent scope @@ -121,4 +121,4 @@ Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shar ## Orthogonal interface -`GetVariable` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `CreateVariable` will return a `Error` when there is a name conflict locally. Combine `GetVariable` and `CreateVariable`, we can implement `CreateOrGetVariable` easily. +`FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `NewVar` will return a `Error` when there is a name conflict locally. Combine `FindVar` and `NewVar`, we can implement `NewVar` easily. diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 21cb7c7265e0052630b68954fa25f9189e641e7b..9d172640493d3c884bb5c142b37829eafe9ee928 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -8,7 +8,9 @@ cc_test(tensor_test SRCS tensor_test.cc DEPS tensor) cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) cc_test(variable_test SRCS variable_test.cc) -cc_test(scope_test SRCS scope_test.cc) + +cc_library(scope SRCS scope.cc) +cc_test(scope_test SRCS scope_test.cc DEPS scope) proto_library(attr_type SRCS attr_type.proto) proto_library(op_proto SRCS op_proto.proto DEPS attr_type) @@ -16,7 +18,7 @@ proto_library(op_desc SRCS op_desc.proto DEPS attr_type) cc_test(op_proto_test SRCS op_proto_test.cc DEPS op_proto protobuf) cc_test(op_desc_test SRCS op_desc_test.cc DEPS op_desc protobuf) -cc_library(operator SRCS operator.cc DEPS op_desc device_context tensor) +cc_library(operator SRCS operator.cc DEPS op_desc device_context tensor scope) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS op_proto operator) diff --git a/paddle/framework/eigen.h b/paddle/framework/eigen.h index 5f3358c69b3fbbbfcd97a96ab50fde3d8b9efad0..a4667cc51fadfc020d3211b7a82356db386fced1 100644 --- a/paddle/framework/eigen.h +++ b/paddle/framework/eigen.h @@ -80,5 +80,21 @@ struct EigenVector : public EigenTensor { } }; +template +struct EigenScalar { + // Scalar tensor (implemented as a rank-0 tensor) of scalar type T. + using Type = Eigen::TensorMap< + Eigen::TensorFixedSize, MajorType, IndexType>>; + using ConstType = Eigen::TensorMap< + Eigen::TensorFixedSize, MajorType, IndexType>>; + + static Type From(Tensor& tensor) { return Type(tensor.data()); } + + static ConstType From(const Tensor& tensor) { + return ConstType(tensor.data()); + } +}; + } // namespace framework } // namespace paddle diff --git a/paddle/framework/eigen_test.cc b/paddle/framework/eigen_test.cc index a9fa728e49a0dcc781e520a22c1ee5f921c4c733..dc1957691b1a202826e10e84c21ac8874df9e378 100644 --- a/paddle/framework/eigen_test.cc +++ b/paddle/framework/eigen_test.cc @@ -46,6 +46,17 @@ TEST(Eigen, Tensor) { } } +TEST(Eigen, ScalarFrom) { + Tensor t; + int* p = t.mutable_data(make_ddim({1}), platform::CPUPlace()); + *p = static_cast(100); + + EigenScalar::Type es = EigenScalar::From(t); + + ASSERT_EQ(0, es.dimension(0)); + ASSERT_EQ(100, es(0)); +} + TEST(Eigen, VectorFrom) { Tensor t; float* p = t.mutable_data(make_ddim({6}), platform::CPUPlace()); diff --git a/paddle/framework/net.h b/paddle/framework/net.h index 089c1355951f59d51db16d4b4bdce4282d6e5c25..fc98080b178bbe2af794e7a9387802f8ed764712 100644 --- a/paddle/framework/net.h +++ b/paddle/framework/net.h @@ -43,7 +43,7 @@ class NetOp : public OperatorBase { * Infer all the operators' input and output variables' shapes, will be called * before every mini-batch */ - void InferShape(const std::shared_ptr& scope) const override { + void InferShape(const Scope& scope) const override { for (auto& op : ops_) { op->InferShape(scope); } @@ -56,7 +56,7 @@ class NetOp : public OperatorBase { * scope will be used instead. If no OpContext is provicded, default context * will be used. */ - void Run(const std::shared_ptr& scope, + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override { for (auto& op : ops_) { op->Run(scope, dev_ctx); diff --git a/paddle/framework/net_op_test.cc b/paddle/framework/net_op_test.cc index 44dea97ef0eeee28b717aed8bf1c5008fd6f3738..3392b033c50da51f1912437e0c45bef62c0d11d4 100644 --- a/paddle/framework/net_op_test.cc +++ b/paddle/framework/net_op_test.cc @@ -16,10 +16,10 @@ static int run_cnt = 0; class TestOp : public OperatorBase { public: - void InferShape(const std::shared_ptr& scope) const override { + void InferShape(const framework::Scope& scope) const override { ++infer_shape_cnt; } - void Run(const std::shared_ptr& scope, + void Run(const framework::Scope& scope, const paddle::platform::DeviceContext& dev_ctx) const override { ++run_cnt; } @@ -61,7 +61,7 @@ TEST(OpKernel, all) { ASSERT_EQ(1UL, tmp_idx.size()); ASSERT_EQ("y", net->outputs_[tmp_idx[0]]); - auto scope = std::make_shared(); + Scope scope; platform::CPUDeviceContext dev_ctx; net->InferShape(scope); diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index 2ef781bf8672c8aa53ae32a44f1ea61973f3792c..d8ae3d07227f1dfab05a98adfc24072aca41ec4d 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -7,9 +7,9 @@ namespace paddle { namespace framework { class CosineOp : public OperatorBase { public: - void Run(const std::shared_ptr& scope, + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override {} - void InferShape(const std::shared_ptr& scope) const override {} + void InferShape(const Scope& scope) const override {} }; class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { @@ -27,8 +27,8 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { class MyTestOp : public OperatorBase { public: - void InferShape(const std::shared_ptr& scope) const override {} - void Run(const std::shared_ptr& scope, + void InferShape(const Scope& scope) const override {} + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override {} }; @@ -69,7 +69,7 @@ TEST(OpRegistry, CreateOp) { std::shared_ptr op = paddle::framework::OpRegistry::CreateOp(op_desc); - auto scope = std::make_shared(); + paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); float scale_get = op->GetAttr("scale"); @@ -111,7 +111,7 @@ TEST(OpRegistry, DefaultValue) { std::shared_ptr op = paddle::framework::OpRegistry::CreateOp(op_desc); - auto scope = std::make_shared(); + paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); ASSERT_EQ(op->GetAttr("scale"), 1.0); @@ -173,7 +173,7 @@ TEST(OpRegistry, CustomChecker) { SetInputFormat(&op_desc); auto op = paddle::framework::OpRegistry::CreateOp(op_desc); paddle::platform::CPUDeviceContext dev_ctx; - auto scope = std::make_shared(); + paddle::framework::Scope scope; op->Run(scope, dev_ctx); int test_attr = op->GetAttr("test_attr"); ASSERT_EQ(test_attr, 4); diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index ff518265a45382115b53d61cab52b85a73de5f70..45155fae4c8f2903a017cb3478a591ac80beed55 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -71,10 +71,10 @@ class OperatorBase { /// InferShape infer the size of Variables used by this Operator with /// information inside scope - virtual void InferShape(const std::shared_ptr& scope) const = 0; + virtual void InferShape(const Scope& scope) const = 0; /// Net will call this function to Run an op. - virtual void Run(const std::shared_ptr& scope, + virtual void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const = 0; virtual bool IsNetOp() const { return false; } @@ -101,7 +101,7 @@ class OperatorBase { class OperatorContext { public: - OperatorContext(const OperatorBase* op, const std::shared_ptr& scope) + OperatorContext(const OperatorBase* op, const Scope& scope) : op_(*op), scope_(scope) {} size_t InputSize() const { return op_.inputs_.size(); } @@ -109,19 +109,19 @@ class OperatorContext { size_t OutputSize() const { return op_.outputs_.size(); } const Variable* InputVar(const size_t index) const { - return scope_->GetVariable(op_.inputs_.at(index)); + return scope_.FindVar(op_.inputs_.at(index)); } Variable* OutputVar(const size_t index) const { - return scope_->GetVariable(op_.outputs_.at(index)); + return scope_.FindVar(op_.outputs_.at(index)); } const Variable* InputVar(const std::string& name) const { - return scope_->GetVariable(op_.Input(name)); + return scope_.FindVar(op_.Input(name)); } Variable* OutputVar(const std::string& name) const { - return scope_->GetVariable(op_.Output(name)); + return scope_.FindVar(op_.Output(name)); } const std::vector MultiInputVar( @@ -131,7 +131,7 @@ class OperatorContext { res.reserve(names.size()); std::transform( names.begin(), names.end(), std::back_inserter(res), - [this](const std::string& name) { return scope_->GetVariable(name); }); + [this](const std::string& name) { return scope_.FindVar(name); }); return res; } @@ -141,7 +141,7 @@ class OperatorContext { res.reserve(names.size()); std::transform( names.begin(), names.end(), std::back_inserter(res), - [this](const std::string& name) { return scope_->GetVariable(name); }); + [this](const std::string& name) { return scope_.FindVar(name); }); return res; } @@ -180,7 +180,7 @@ class OperatorContext { res.reserve(names.size()); std::transform(names.begin(), names.end(), std::back_inserter(res), [&](const std::string& sub_name) { - auto var = scope_->GetVariable(sub_name); + auto var = scope_.FindVar(sub_name); PADDLE_ENFORCE(var != nullptr, "MultiInput(%s:%s) should not be nullptr", name, sub_name); @@ -196,7 +196,7 @@ class OperatorContext { res.reserve(names.size()); std::transform(names.begin(), names.end(), std::back_inserter(res), [&](const std::string& sub_name) { - auto var = scope_->GetVariable(sub_name); + auto var = scope_.FindVar(sub_name); PADDLE_ENFORCE(var != nullptr, "MultiOutput(%s:%s) should not be nullptr", name, sub_name); @@ -206,12 +206,12 @@ class OperatorContext { } const OperatorBase& op_; - const std::shared_ptr& scope_; + const Scope& scope_; }; class InferShapeContext : public OperatorContext { public: - InferShapeContext(const OperatorBase* op, const std::shared_ptr& scope) + InferShapeContext(const OperatorBase* op, const Scope& scope) : OperatorContext(op, scope) {} }; @@ -232,7 +232,7 @@ struct EigenDeviceConverter { class ExecutionContext : public OperatorContext { public: - ExecutionContext(const OperatorBase* op, const std::shared_ptr& scope, + ExecutionContext(const OperatorBase* op, const Scope& scope, const platform::DeviceContext& device_context) : OperatorContext(op, scope), device_context_(device_context) {} @@ -285,11 +285,11 @@ class OperatorWithKernel : public OperatorBase { using OpKernelMap = std::unordered_map, OpKernelHash>; - void InferShape(const std::shared_ptr& scope) const { + void InferShape(const Scope& scope) const { InferShape(InferShapeContext(this, scope)); } - void Run(const std::shared_ptr& scope, + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const final { auto& opKernel = AllOpKernels().at(type_).at(OpKernelKey(dev_ctx)); opKernel->Compute(ExecutionContext(this, scope, dev_ctx)); diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index daa3645b4d7588baffc57491d0a7f7f6368eda7b..401bce0fbbc953afedf5f352f9d3ae8bedab7cbe 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -24,16 +24,15 @@ static int op_run_num = 0; class OpWithoutKernelTest : public OperatorBase { public: void Init() override { x = 1; } - void InferShape( - const std::shared_ptr& scope) const override {} - void Run(const std::shared_ptr& scope, + void InferShape(const Scope& scope) const override {} + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override { op_run_num++; ASSERT_EQ((int)inputs_.size(), 1); ASSERT_EQ((int)outputs_.size(), 1); - ASSERT_EQ(scope->GetVariable(inputs_[0]), nullptr); + ASSERT_EQ(scope.FindVar(inputs_[0]), nullptr); ASSERT_EQ(x, 1); - ASSERT_NE(scope->GetVariable(outputs_[0]), nullptr); + ASSERT_NE(scope.FindVar(outputs_[0]), nullptr); } public: @@ -69,10 +68,10 @@ TEST(OperatorBase, all) { attr->set_f(3.14); paddle::platform::CPUDeviceContext device_context; - auto scope = std::make_shared(); + paddle::framework::Scope scope; auto op = paddle::framework::OpRegistry::CreateOp(op_desc); - scope->CreateVariable("OUT1"); + scope.NewVar("OUT1"); ASSERT_EQ(paddle::framework::op_run_num, 0); op->InferShape(scope); op->Run(scope, device_context); @@ -118,13 +117,12 @@ class CPUKernelTest : public OpKernel { class OperatorMultiInputsTest : public OperatorBase { public: void Init() override { x = 1; } - void InferShape( - const std::shared_ptr& scope) const override {} - void Run(const std::shared_ptr& scope, + void InferShape(const Scope& scope) const override {} + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override { - ASSERT_EQ(scope->GetVariable(inputs_[0]), nullptr); + ASSERT_EQ(scope.FindVar(inputs_[0]), nullptr); ASSERT_EQ(x, 1); - ASSERT_NE(scope->GetVariable(outputs_[0]), nullptr); + ASSERT_NE(scope.FindVar(outputs_[0]), nullptr); ASSERT_EQ(Input("x"), "IN1"); ASSERT_EQ(Input("y"), "OUT1"); } @@ -206,7 +204,7 @@ TEST(OpKernel, all) { attr->set_f(3.14); paddle::platform::CPUDeviceContext cpu_device_context; - auto scope = std::make_shared(); + paddle::framework::Scope scope; auto op = paddle::framework::OpRegistry::CreateOp(op_desc); ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 0); @@ -252,13 +250,13 @@ TEST(OpKernel, multi_inputs) { output_format->Add(2); // y1 paddle::platform::CPUDeviceContext cpu_device_context; - auto scope = std::make_shared(); - scope->CreateVariable("x0")->GetMutable(); - scope->CreateVariable("x1")->GetMutable(); - scope->CreateVariable("x2")->GetMutable(); - scope->CreateVariable("k0")->GetMutable(); - scope->CreateVariable("y0")->GetMutable(); - scope->CreateVariable("y1")->GetMutable(); + paddle::framework::Scope scope; + scope.NewVar("x0")->GetMutable(); + scope.NewVar("x1")->GetMutable(); + scope.NewVar("x2")->GetMutable(); + scope.NewVar("k0")->GetMutable(); + scope.NewVar("y0")->GetMutable(); + scope.NewVar("y1")->GetMutable(); auto op = paddle::framework::OpRegistry::CreateOp(op_desc); op->Run(scope, cpu_device_context); diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc new file mode 100644 index 0000000000000000000000000000000000000000..080b4ac621c1b8c0d4b4e7b26f394cf2be263894 --- /dev/null +++ b/paddle/framework/scope.cc @@ -0,0 +1,66 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/scope.h" +#include "paddle/string/printf.h" + +namespace paddle { +namespace framework { + +Scope::~Scope() { + DropKids(); + for (auto& kv : vars_) delete kv.second; +} + +Scope& Scope::NewScope() const { + kids_.push_back(new Scope(this)); + return *kids_.back(); +} + +Variable* Scope::NewVar(const std::string& name) { + auto iter = vars_.find(name); + if (iter != vars_.end()) { + return iter->second; + } + Variable* v = new Variable(); + vars_[name] = v; + v->name_ = &(vars_.find(name)->first); + return v; +} + +Variable* Scope::NewVar() { + return NewVar(string::Sprintf("%p.%d", this, vars_.size())); +} + +Variable* Scope::FindVar(const std::string& name) const { + auto it = vars_.find(name); + if (it != vars_.end()) return it->second; + return (parent_ == nullptr) ? nullptr : parent_->FindVar(name); +} + +const Scope* Scope::FindScope(const Variable* var) const { + for (auto& kv : vars_) { + if (kv.second == var) { + return this; + } + } + return (parent_ == nullptr) ? nullptr : parent_->FindScope(var); +} +void Scope::DropKids() { + for (Scope* s : kids_) delete s; + kids_.clear(); +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index 4faaf841440ba30b79c83d09fea977186bd0270a..2ba3f8ed355b48800cfa4180e4e8a94f2c9958a9 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once +#include #include #include -#include #include "paddle/framework/variable.h" @@ -35,73 +35,42 @@ class Scope; */ class Scope { public: - /** - * @brief Initialize s Scope without parent. - */ Scope() {} + ~Scope(); - /** - * @brief Initialize a Scope with parent. - */ - explicit Scope(const std::shared_ptr& parent) : parent_(parent) {} - - /** - * @brief Create Variable - * - * Create Variable in this Scope. Return the exist one if Variable already - * been created. - */ - Variable* CreateVariable(const std::string& name) { - auto var = GetVariable(name); - if (var) { - return var; - } else { - auto ptr = new Variable(); - name_to_var_[name] = std::unique_ptr(ptr); - var_to_name_[ptr] = name; - return GetVariable(name); - } - } - - /** - * @brief Get Variable. - * - * Get Variable from this Scope, this function will recursive find Variable - * from it's parent scope. Return nullptr if not found. - */ - Variable* GetVariable(const std::string& name) const { - auto it = name_to_var_.find(name); - if (it != name_to_var_.end()) { - return it->second.get(); - } else if (parent_ != nullptr) { - return parent_->GetVariable(name); - } else { - return nullptr; - } - } - - /** - * @brief If this scope has a Var named name. - * - * Find if there is a Variable in this scope and it's parent scope - */ - bool HasVariable(const std::string& name) const { - return (name_to_var_.find(name) != name_to_var_.end() || - (parent_ && parent_->HasVariable(name))); - } - - std::string GetVariableName(Variable* const var) const { - try { - return var_to_name_.at(var); - } catch (...) { - return ""; - } - } + // Disable Copy, Assign, Move. + Scope(const Scope& other) = delete; + Scope& operator=(const Scope& other) = delete; + Scope(Scope&& other) = delete; + + /// Create a sub-scope. Returns a reference other than a pointer so + /// to prevent from manual deletion. + /// Mark it to const because that new kid scope cannot change parent scope. + Scope& NewScope() const; + + /// Create a variable with given name if it doesn't exist. + Variable* NewVar(const std::string& name); + + /// Create a variable with a scope-unique name. + Variable* NewVar(); + + /// Find a variable in the scope or any of its ancestors. Returns + /// nullptr if cannot find. + Variable* FindVar(const std::string& name) const; + + /// Find the scope or an ancestor scope that contains the given variable. + const Scope* FindScope(const Variable* var) const; + + /// Drop all kids scopes belonged to this scope. + void DropKids(); private: - std::unordered_map var_to_name_; - std::unordered_map> name_to_var_; - std::shared_ptr parent_{nullptr}; + // Call Scope::NewScope for a sub-scope. + explicit Scope(Scope const* parent) : parent_(parent) {} + + std::unordered_map vars_; + mutable std::list kids_; + Scope const* parent_{nullptr}; }; } // namespace framework diff --git a/paddle/framework/scope_test.cc b/paddle/framework/scope_test.cc index ff069c7be002e9bcfd63225c3d80aa958935ba14..9d51e355b0f6336d2f875ff2d77266b261baf5ac 100644 --- a/paddle/framework/scope_test.cc +++ b/paddle/framework/scope_test.cc @@ -15,49 +15,42 @@ limitations under the License. */ #include "paddle/framework/scope.h" #include "gtest/gtest.h" -TEST(Scope, Create) { - using paddle::framework::Scope; - using paddle::framework::Variable; +using paddle::framework::Scope; +using paddle::framework::Variable; - auto scope = std::make_shared(); +TEST(Scope, VarsShadowing) { + Scope s; + Scope& ss1 = s.NewScope(); + Scope& ss2 = s.NewScope(); - Variable* var0 = scope->CreateVariable(""); - EXPECT_NE(var0, nullptr); + Variable* v0 = s.NewVar("a"); + Variable* v1 = ss1.NewVar("a"); - /// GetVariable will return nullptr if not exist. - Variable* var1 = scope->GetVariable("a"); - EXPECT_EQ(var1, nullptr); + EXPECT_NE(v0, v1); - /// CreateVariable will return one. - Variable* var2 = scope->CreateVariable("a"); - EXPECT_NE(var2, nullptr); - - /// Get the created variable. - Variable* var3 = scope->GetVariable("a"); - EXPECT_EQ(var2, var3); + EXPECT_EQ(v0, s.FindVar("a")); + EXPECT_EQ(v1, ss1.FindVar("a")); + EXPECT_EQ(v0, ss2.FindVar("a")); +} - /// CreateVariable will just return the variable if it's - /// already exist. - Variable* var4 = scope->CreateVariable("a"); - EXPECT_EQ(var4, var2); +TEST(Scope, FindVar) { + Scope s; + Scope& ss = s.NewScope(); - EXPECT_EQ("a", scope->GetVariableName(var4)); - Scope scope2; - auto var = scope2.CreateVariable("tmp"); - EXPECT_EQ("", scope->GetVariableName(var)); -} + EXPECT_EQ(nullptr, s.FindVar("a")); + EXPECT_EQ(nullptr, ss.FindVar("a")); -TEST(Scope, Parent) { - using paddle::framework::Scope; - using paddle::framework::Variable; + ss.NewVar("a"); - auto parent_scope = std::make_shared(); - auto scope = std::make_shared(parent_scope); + EXPECT_EQ(nullptr, s.FindVar("a")); + EXPECT_NE(nullptr, ss.FindVar("a")); +} - Variable* var0 = parent_scope->CreateVariable("a"); - EXPECT_NE(var0, nullptr); +TEST(Scope, FindScope) { + Scope s; + Scope& ss = s.NewScope(); + Variable* v = s.NewVar("a"); - /// GetVariable will get Variable from parent scope if exist. - Variable* var1 = scope->GetVariable("a"); - EXPECT_EQ(var0, var1); + EXPECT_EQ(&s, s.FindScope(v)); + EXPECT_EQ(&s, ss.FindScope(v)); } diff --git a/paddle/framework/variable.h b/paddle/framework/variable.h index 72c4a7a2a1d1cf93a784f24e687727ee8481484c..38fc2720a3023039aa113b32a394bda9c5def4c0 100644 --- a/paddle/framework/variable.h +++ b/paddle/framework/variable.h @@ -16,7 +16,7 @@ #include #include -#include "paddle/platform/assert.h" +#include "paddle/platform/enforce.h" namespace paddle { namespace framework { @@ -25,7 +25,7 @@ class Variable { public: template const T& Get() const { - PADDLE_ASSERT(IsType()); + PADDLE_ENFORCE(IsType(), "Variable must be type %s", typeid(T).name()); return *static_cast(holder_->Ptr()); } @@ -65,6 +65,17 @@ class Variable { std::unique_ptr holder_; // pointers to a PlaceholderImpl object indeed. + + // name_ is only meaningful with a Scope and accessible by it. + // + // NOTE: Please don't expose name_ by adding methods like + // Variable::Name or Scope::VarName! A variable could have a human + // readable name or an auto-generated scope-unique name. In the + // former case, the caller knows the name and doesn't need to access + // the name; in the latter case, the variable should be identified + // by its address but not the unreadable name. + friend class Scope; + const std::string* name_; }; } // namespace framework diff --git a/paddle/operators/recurrent_network_op.cc b/paddle/operators/recurrent_network_op.cc index 1a101d6ddf149d608dbdbe048ef43d86bacbcc16..224bb1432ac93815b05769f1cc14b45709c436b8 100644 --- a/paddle/operators/recurrent_network_op.cc +++ b/paddle/operators/recurrent_network_op.cc @@ -27,38 +27,37 @@ namespace operators { namespace rnn { -void SegmentInputs(std::vector>& step_scopes, +void SegmentInputs(const std::vector& step_scopes, const std::vector& inlinks, const size_t seq_len) { PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided."); for (size_t i = 0; i < inlinks.size(); ++i) { Tensor* input = - step_scopes[0]->GetVariable(inlinks[i].external)->GetMutable(); + step_scopes[0]->FindVar(inlinks[i].external)->GetMutable(); DDim dims = input->dims(); PADDLE_ENFORCE(static_cast(dims[0]) == seq_len, "all the inlinks must have same length"); DDim step_dims = slice_ddim(dims, 1, dims.size()); for (size_t j = 0; j < seq_len; j++) { - Tensor* step_input = step_scopes[j] - ->CreateVariable(inlinks[i].internal) - ->GetMutable(); + Tensor* step_input = + step_scopes[j]->NewVar(inlinks[i].internal)->GetMutable(); *step_input = input->Slice(j, j + 1); step_input->Resize(step_dims); } } } -void ConcatOutputs(std::vector>& step_scopes, +void ConcatOutputs(const std::vector& step_scopes, const std::vector& outlinks, const size_t seq_len) { for (size_t i = 0; i < outlinks.size(); i++) { Tensor* output = - step_scopes[0]->GetVariable(outlinks[i].external)->GetMutable(); + step_scopes[0]->FindVar(outlinks[i].external)->GetMutable(); // TODO(qingiqng) remove following code after adding // InferShape in RecurrentGradientOp DDim step_dims = step_scopes[0] - ->GetVariable(outlinks[i].internal) + ->FindVar(outlinks[i].internal) ->GetMutable() ->dims(); std::vector dims_vec = vectorize(step_dims); @@ -66,9 +65,8 @@ void ConcatOutputs(std::vector>& step_scopes, output->mutable_data(make_ddim(dims_vec), platform::CPUPlace()); for (size_t j = 0; j < seq_len; j++) { - Tensor* step_output = step_scopes[j] - ->GetVariable(outlinks[i].internal) - ->GetMutable(); + Tensor* step_output = + step_scopes[j]->FindVar(outlinks[i].internal)->GetMutable(); // TODO(luotao02) data type and platform::DeviceContext() should set // correctly (output->Slice(j, j + 1)) @@ -77,7 +75,7 @@ void ConcatOutputs(std::vector>& step_scopes, } } -void LinkMemories(std::vector>& scopes, +void LinkMemories(const std::vector& scopes, const std::vector& memories, size_t step_id, int offset) { @@ -94,17 +92,17 @@ void LinkMemories(std::vector>& scopes, offset, scopes.size(), step_id); - std::shared_ptr scope = scopes[step_id]; - std::shared_ptr linked_scope = scopes[step_id + offset]; + auto scope = scopes[step_id]; + auto linked_scope = scopes[step_id + offset]; for (auto& attr : memories) { - auto mem = scope->CreateVariable(attr.pre_var)->GetMutable(); + auto mem = scope->NewVar(attr.pre_var)->GetMutable(); // maybe share variable is better? - auto linked_mem = linked_scope->GetVariable(attr.var)->GetMutable(); + auto linked_mem = linked_scope->FindVar(attr.var)->GetMutable(); mem->ShareDataWith(*linked_mem); // TODO(qingqing) remove following code // the memory of current step should be allocated in step net - auto m = scope->CreateVariable(attr.var)->GetMutable(); + auto m = scope->NewVar(attr.var)->GetMutable(); // for unit test, as addOp and mulOp are null currently, if not // mutable_data, mem.data() in output will be error. We will // remove this line after merge the correct addOp and mulOp. @@ -171,8 +169,8 @@ void InitArgument(const ArgumentName& name, } // namespace rnn -void RecurrentAlgorithm::InferShape(const std::shared_ptr& scope) const { - seq_len_ = scope->GetVariable((arg_->inlinks[0]).external) +void RecurrentAlgorithm::InferShape(const Scope& scope) const { + seq_len_ = scope.FindVar((arg_->inlinks[0]).external) ->GetMutable() ->dims()[0]; CreateScopes(scope); @@ -187,10 +185,10 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr& scope) const { InitMemories(step_scopes[0]); - PADDLE_ENFORCE(scope->HasVariable(arg_->step_net), + PADDLE_ENFORCE(scope.FindVar(arg_->step_net) != nullptr, "stepnet [%s] is not in scope.", arg_->step_net); - Variable* net = scope->GetVariable(arg_->step_net); + Variable* net = scope.FindVar(arg_->step_net); PADDLE_ENFORCE(net != nullptr, "failed to get step net"); // If the InferShape is called in OperatorBase's run function, // the rnn op only needs to do InferShape for the first time step @@ -198,82 +196,79 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr& scope) const { if (i > 0) { rnn::LinkMemories(step_scopes, arg_->memories, i, -1); } - net->GetMutable()->InferShape(step_scopes[i]); + net->GetMutable()->InferShape(*step_scopes[i]); } auto outlinks = arg_->outlinks; for (size_t i = 0; i < outlinks.size(); i++) { DDim step_dims = step_scopes[0] - ->GetVariable(outlinks[i].internal) + ->FindVar(outlinks[i].internal) ->GetMutable() ->dims(); std::vector dims_vec = vectorize(step_dims); // now only support fixed length dims_vec.insert(dims_vec.begin(), seq_len_); Tensor* output = - step_scopes[0]->GetVariable(outlinks[i].external)->GetMutable(); + step_scopes[0]->FindVar(outlinks[i].external)->GetMutable(); output->Resize(make_ddim(dims_vec)); } } -void RecurrentAlgorithm::Run(const std::shared_ptr& scope, +void RecurrentAlgorithm::Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const { auto step_scopes = GetStepScopes(scope); - Variable* net = scope->GetVariable(arg_->step_net); + Variable* net = scope.FindVar(arg_->step_net); for (size_t step_id = 0; step_id < seq_len_; step_id++) { // the link memory is done in InferShape // maybe remove following code after testing if (step_id > 0) { rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1); } - net->GetMutable()->Run(step_scopes[step_id], dev_ctx); + net->GetMutable()->Run(*step_scopes[step_id], dev_ctx); } rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_); } -void RecurrentAlgorithm::CreateScopes(std::shared_ptr scope) const { +void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { // TODO(xxx) Only two scopes are needed for inference, this case will be // supported later. - auto step_scopes = scope->GetVariable(arg_->step_scopes) - ->GetMutable>>(); + auto step_scopes = + scope.FindVar(arg_->step_scopes)->GetMutable>(); if (seq_len_ > step_scopes->size()) { for (size_t i = step_scopes->size(); i < seq_len_; ++i) { - std::shared_ptr step_scope = std::make_shared(scope); + auto& step_scope = scope.NewScope(); // Now all variables in scope must be created outside of op. - auto net_op = scope->GetVariable(arg_->step_net)->GetMutable(); + auto net_op = scope.FindVar(arg_->step_net)->GetMutable(); for (auto& input : net_op->inputs_) { - step_scope->CreateVariable(input); + if (!step_scope.FindVar(input)) step_scope.NewVar(input); } for (auto& output : net_op->outputs_) { - step_scope->CreateVariable(output); + step_scope.NewVar(output); } - step_scopes->push_back(std::make_shared(step_scope)); + step_scopes->emplace_back(&step_scope); } } } -void RecurrentAlgorithm::InitMemories(std::shared_ptr step_scope) const { +void RecurrentAlgorithm::InitMemories(Scope* step_scope) const { for (auto& attr : arg_->memories) { - Tensor* pre_mem = - step_scope->CreateVariable(attr.pre_var)->GetMutable(); - PADDLE_ENFORCE(step_scope->HasVariable(attr.boot_var), + Tensor* pre_mem = step_scope->NewVar(attr.pre_var)->GetMutable(); + PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, "memory [%s]'s boot variable [%s] not exists", attr.var, attr.boot_var); - Tensor* boot_mem = - step_scope->GetVariable(attr.boot_var)->GetMutable(); + Tensor* boot_mem = step_scope->FindVar(attr.boot_var)->GetMutable(); pre_mem->ShareDataWith(*boot_mem); // TODO(qingqing) remove following code // the memory of current step should be allocated in step net // here for unit test - auto cur_step_mem = - step_scope->CreateVariable(attr.var)->GetMutable(); + auto cur_step_mem = step_scope->NewVar(attr.var)->GetMutable(); cur_step_mem->mutable_data(boot_mem->dims(), platform::CPUPlace()); } } @@ -333,72 +328,69 @@ public: }; void RecurrentGradientAlgorithm::Run( - const std::shared_ptr& scope, - const platform::DeviceContext& dev_ctx) const { + const Scope& scope, const platform::DeviceContext& dev_ctx) const { auto step_scopes = GetStepScopes(scope); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); - PADDLE_ENFORCE(scope->HasVariable(arg_->step_net), + PADDLE_ENFORCE(scope.FindVar(arg_->step_net) != nullptr, "step net is not in scope."); - Variable* net = scope->GetVariable(arg_->step_net); + Variable* net = scope.FindVar(arg_->step_net); PADDLE_ENFORCE(net != nullptr, "failed to get step net"); for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { if (static_cast(step_id) != seq_len_ - 1) { rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1); } - net->GetMutable()->Run(step_scopes[step_id], dev_ctx); + net->GetMutable()->Run(*step_scopes[step_id], dev_ctx); } LinkBootMemoryGradients(step_scopes[0]); rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_); } void RecurrentGradientAlgorithm::LinkBootMemoryGradients( - std::shared_ptr step_scope) const { + Scope* step_scope) const { for (auto& attr : arg_->memories) { - Tensor* mem_grad = - step_scope->CreateVariable(attr.var)->GetMutable(); + Tensor* mem_grad = step_scope->NewVar(attr.var)->GetMutable(); PADDLE_ENFORCE(mem_grad != nullptr, "boot_tensor should be retrieved before"); - PADDLE_ENFORCE(step_scope->HasVariable(attr.boot_var), + PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, "memory [%s]'s boot variable [%s] not exists", attr.var, attr.boot_var); Tensor* boot_mem_grad = - step_scope->CreateVariable(attr.boot_var)->GetMutable(); + step_scope->NewVar(attr.boot_var)->GetMutable(); boot_mem_grad->ShareDataWith(*mem_grad); } } -void RecurrentGradientAlgorithm::InferShape( - const std::shared_ptr& scope) const { - seq_len_ = scope->GetVariable((arg_->inlinks[0]).external) +void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const { + seq_len_ = scope.FindVar((arg_->inlinks[0]).external) ->GetMutable() ->dims()[0]; auto step_scopes = GetStepScopes(scope); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); - PADDLE_ENFORCE(scope->HasVariable(arg_->step_net), + PADDLE_ENFORCE(scope.FindVar(arg_->step_net) != nullptr, "step net is not in scope."); - Variable* net = scope->GetVariable(arg_->step_net); + Variable* net = scope.FindVar(arg_->step_net); PADDLE_ENFORCE(net != nullptr, "failed to get step net"); for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { if (static_cast(step_id) != seq_len_ - 1) { rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1); } - net->GetMutable()->InferShape(step_scopes[step_id]); + net->GetMutable()->InferShape(*step_scopes[step_id]); } auto outlinks = arg_->outlinks; for (size_t i = 0; i < outlinks.size(); i++) { DDim step_dims = step_scopes[0] - ->GetVariable(outlinks[i].internal) + ->FindVar(outlinks[i].internal) ->GetMutable() ->dims(); std::vector dims_vec = vectorize(step_dims); // now only support fixed length dims_vec.insert(dims_vec.begin(), seq_len_); Tensor* output = - step_scopes[0]->GetVariable(outlinks[i].external)->GetMutable(); + step_scopes[0]->FindVar(outlinks[i].external)->GetMutable(); output->Resize(make_ddim(dims_vec)); } LinkBootMemoryGradients(step_scopes[0]); diff --git a/paddle/operators/recurrent_network_op.h b/paddle/operators/recurrent_network_op.h index 8946c8ce38117c391edcf56558c640ebd0d7f75c..d57a1a2e51cbed22549ab6ebce79223e2d4e3bcf 100644 --- a/paddle/operators/recurrent_network_op.h +++ b/paddle/operators/recurrent_network_op.h @@ -70,18 +70,18 @@ struct ArgumentName { /** * Prepare inputs for each step net. */ -void SegmentInputs(std::vector>& step_scopes, +void SegmentInputs(const std::vector& step_scopes, const std::vector& inlinks, const size_t seq_len); /** * Process outputs of step nets and merge to variables. */ -void ConcatOutputs(std::vector>& step_scopes, +void ConcatOutputs(const std::vector& step_scopes, const std::vector& outlinks, const size_t seq_len); -void LinkMemories(std::vector>& step_scopes, +void LinkMemories(const std::vector& step_scopes, const std::vector& memories, size_t step_id, int offset); @@ -100,15 +100,14 @@ void InitArgument(const ArgumentName& name, Argument* arg); class RecurrentAlgorithm { public: - void Run(const std::shared_ptr& scope, - const platform::DeviceContext& dev_ctx) const; + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const; void Init(std::unique_ptr arg) { arg_ = std::move(arg); } /** * InferShape must be called before Run. */ - void InferShape(const std::shared_ptr& scope) const; + void InferShape(const Scope& scope) const; protected: /* @@ -117,15 +116,13 @@ protected: * NOTE the scopes are reused in both the forward and backward, so just * create once and expand its size if more steps need. */ - void CreateScopes(std::shared_ptr scope) const; + void CreateScopes(const Scope& scope) const; - inline const std::vector>& GetStepScopes( - std::shared_ptr scope) const { - return *(scope->GetVariable(arg_->step_scopes)) - ->GetMutable>>(); + const std::vector& GetStepScopes(const Scope& scope) const { + return *scope.FindVar(arg_->step_scopes)->GetMutable>(); } - void InitMemories(std::shared_ptr step_scopes) const; + void InitMemories(Scope* step_scopes) const; private: std::unique_ptr arg_; @@ -146,21 +143,18 @@ class RecurrentGradientAlgorithm { public: void Init(std::unique_ptr arg) { arg_ = std::move(arg); } - void Run(const std::shared_ptr& scope, - const platform::DeviceContext& dev_ctx) const; + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const; - void LinkBootMemoryGradients(std::shared_ptr step_scopes) const; + void LinkBootMemoryGradients(Scope* step_scopes) const; /** * InferShape must be called before Run. */ - void InferShape(const std::shared_ptr& scope) const; + void InferShape(const Scope& scope) const; protected: - inline const std::vector>& GetStepScopes( - std::shared_ptr scope) const { - return *(scope->GetVariable(arg_->step_scopes)) - ->GetMutable>>(); + inline const std::vector& GetStepScopes(const Scope& scope) const { + return *scope.FindVar(arg_->step_scopes)->GetMutable>(); } private: @@ -175,11 +169,11 @@ public: /** * InferShape must be called before Run. */ - virtual void InferShape(const std::shared_ptr& scope) const override { + virtual void InferShape(const Scope& scope) const override { alg_.InferShape(scope); } - virtual void Run(const std::shared_ptr& scope, + virtual void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override { alg_.Run(scope, dev_ctx); } @@ -197,11 +191,11 @@ public: /** * InferShape must be called before Run. */ - virtual void InferShape(const std::shared_ptr& scope) const override { + virtual void InferShape(const Scope& scope) const override { alg_.InferShape(scope); } - virtual void Run(const std::shared_ptr& scope, + virtual void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override { alg_.Run(scope, dev_ctx); } diff --git a/paddle/operators/recurrent_network_op_test.cc b/paddle/operators/recurrent_network_op_test.cc index 6784ac6001ad1b464d65814cff1ad6247826ad66..b0e61fbee611744adb85b498b1c3540f059afc8c 100644 --- a/paddle/operators/recurrent_network_op_test.cc +++ b/paddle/operators/recurrent_network_op_test.cc @@ -34,41 +34,40 @@ protected: virtual void TearDown() override {} void CreateGlobalVariables() { - scope_ = std::make_shared(); // create input, and init content LOG(INFO) << "create global variable x"; for (auto inlink : std::vector{"x", "x0", "x1", "h"}) { - Variable* x = scope_->CreateVariable(inlink); + Variable* x = scope_.NewVar(inlink); DDim dims = make_ddim(std::vector{ 10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); x->GetMutable()->mutable_data(dims, platform::CPUPlace()); } // create output alias just for test for (auto inlink : std::vector{"h@alias"}) { - Variable* x = scope_->CreateVariable(inlink); + Variable* x = scope_.NewVar(inlink); DDim dims = make_ddim(std::vector{20 /*batch size*/, 30 /*input dim*/}); x->GetMutable()->mutable_data(dims, platform::CPUPlace()); } LOG(INFO) << "create global variable w"; - Variable* w = scope_->CreateVariable("rnn/w"); + Variable* w = scope_.NewVar("rnn/w"); w->GetMutable()->mutable_data( make_ddim(std::vector{30, 30}), platform::CPUPlace()); for (auto boot : std::vector{"x_boot", "h_boot"}) { LOG(INFO) << "create global variable " << boot; - Variable* h_boot = scope_->CreateVariable(boot); + Variable* h_boot = scope_.NewVar(boot); h_boot->GetMutable()->mutable_data( make_ddim(std::vector{20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace()); } LOG(INFO) << "create variable step_scopes"; - scope_->CreateVariable("step_scopes"); + scope_.NewVar("step_scopes"); LOG(INFO) << "create variable h"; - scope_->CreateVariable("h"); + scope_.NewVar("h"); } void CreateRNNOp() { @@ -150,7 +149,7 @@ protected: void CreateStepNet() { LOG(INFO) << "create variable step_net"; - Variable* var = scope_->CreateVariable("step_net"); + Variable* var = scope_.NewVar("step_net"); auto net = var->GetMutable(); // rnn/s is net's input or output? net->inputs_ = {"rnn/h@pre", "rnn/w", "rnn/x"}; @@ -164,7 +163,7 @@ protected: } // father scope - std::shared_ptr scope_; + Scope scope_; std::shared_ptr rnn_op_; }; @@ -191,68 +190,64 @@ protected: virtual void TearDown() override {} void CreateGlobalVariables() { - scope_ = std::make_shared(); // inputs: x LOG(INFO) << "create global variable x"; - Variable* x = scope_->CreateVariable("x"); + Variable* x = scope_.NewVar("x"); DDim dims = make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); x->GetMutable()->mutable_data(dims, platform::CPUPlace()); // inputs: h_boot LOG(INFO) << "create global variable h_boot"; - Variable* h_boot = scope_->CreateVariable("h_boot"); + Variable* h_boot = scope_.NewVar("h_boot"); h_boot->GetMutable()->mutable_data( make_ddim({20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace()); // inputs: w LOG(INFO) << "create global variable w"; - Variable* w = scope_->CreateVariable("rnn/w"); + Variable* w = scope_.NewVar("rnn/w"); w->GetMutable()->mutable_data(make_ddim({30, 30}), platform::CPUPlace()); // inputs: h_grad LOG(INFO) << "create variable h_grad"; - Variable* dh = scope_->CreateVariable("h_grad"); + Variable* dh = scope_.NewVar("h_grad"); dh->GetMutable()->mutable_data(make_ddim({10, 20, 30}), platform::CPUPlace()); // inputs: step_scopes LOG(INFO) << "create variable step_scopes"; - scope_->CreateVariable("step_scopes"); + scope_.NewVar("step_scopes"); // inputs: step_net LOG(INFO) << "create variable step_net"; - scope_->CreateVariable("step_net"); + scope_.NewVar("step_net"); // outputs: w_grad LOG(INFO) << "create global variable w_grad"; - scope_->CreateVariable("rnn/w_grad"); + scope_.NewVar("rnn/w_grad"); // outputs: x_grad LOG(INFO) << "create global variable x_grad"; - scope_->CreateVariable("x_grad"); + scope_.NewVar("x_grad"); // outputs: h_boot_grad LOG(INFO) << "create global variable h_boot_grad"; - scope_->CreateVariable("h_boot_grad"); + scope_.NewVar("h_boot_grad"); } void CreateStepScopes() { - std::vector>* step_scopes = - scope_->GetVariable("step_scopes") - ->GetMutable>>(); + auto step_scopes = + scope_.FindVar("step_scopes")->GetMutable>(); for (int i = 0; i < 10; ++i) { - auto scope = std::make_shared(scope_); - auto pre_t = scope->CreateVariable("rnn/pre_h")->GetMutable(); - pre_t->mutable_data(make_ddim({20, 30}), platform::CPUPlace()); - auto tensor = scope->CreateVariable("rnn/h")->GetMutable(); - tensor->mutable_data(make_ddim({20, 30}), platform::CPUPlace()); + auto& scope = scope_.NewScope(); + auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable(); + pre_t->mutable_data({20, 30}, platform::CPUPlace()); + auto tensor = scope.NewVar("rnn/h")->GetMutable(); + tensor->mutable_data({20, 30}, platform::CPUPlace()); // for unit test of ConcatOutputs - auto xg = scope->CreateVariable("rnn/x_grad")->GetMutable(); - xg->mutable_data(make_ddim({20, 30}), platform::CPUPlace()); + auto xg = scope.NewVar("rnn/x_grad")->GetMutable(); + xg->mutable_data({20, 30}, platform::CPUPlace()); - step_scopes->push_back(scope); + step_scopes->emplace_back(&scope); } // last time step - auto g = (*step_scopes)[9] - ->CreateVariable("rnn/h_pre_grad") - ->GetMutable(); - g->mutable_data(make_ddim({20, 30}), platform::CPUPlace()); + auto g = (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable(); + g->mutable_data({20, 30}, platform::CPUPlace()); } void CreateRNNGradientAlgorithm() { @@ -280,7 +275,7 @@ protected: void CreateStepNet() { LOG(INFO) << "create variable step_net"; - Variable* var = scope_->CreateVariable("step_net"); + Variable* var = scope_.NewVar("step_net"); auto net = var->GetMutable(); net->AddOp(OpRegistry::CreateOp("mul", {"rnn/h_pre", "rnn/w", "rnn/s_grad"}, @@ -300,9 +295,8 @@ protected: rnn::Link inlink; inlink.external = "x"; inlink.internal = "rnn/x"; - std::vector>* step_scopes = - scope_->GetVariable("step_scopes") - ->GetMutable>>(); + auto step_scopes = + scope_.FindVar("step_scopes")->GetMutable>(); rnn::SegmentInputs(*step_scopes, std::vector{inlink}, 10); } @@ -314,15 +308,14 @@ protected: mem_attr.boot_var = "boot_h"; std::vector memories; memories.push_back(mem_attr); - std::vector>* step_scopes = - scope_->GetVariable("step_scopes") - ->GetMutable>>(); + auto step_scopes = + scope_.FindVar("step_scopes")->GetMutable>(); for (int i = 1; i < 10; ++i) { rnn::LinkMemories(*step_scopes, memories, i, -1); } } - std::shared_ptr scope_; + Scope scope_; RecurrentGradientAlgorithm rnn_grad_algo_; }; @@ -341,14 +334,14 @@ TEST(RecurrentOp, LinkMemories) { // create and init step scopes int len = 10; - std::vector> step_scopes; + std::vector step_scopes; for (int i = 0; i < len; ++i) { - auto scope = std::make_shared(); - scope->CreateVariable("pre_h"); - auto tensor = scope->CreateVariable("h")->GetMutable(); - float* data = tensor->mutable_data(make_ddim({15, 20}), CPUPlace()); - for (int i = 0; i < 15 * 20; ++i) { - data[i] = rand() * (1. / (double)RAND_MAX); + auto scope = new Scope(); + scope->NewVar("pre_h"); + auto tensor = scope->NewVar("h")->GetMutable(); + float* data = tensor->mutable_data({15, 20}, CPUPlace()); + for (int j = 0; j < 15 * 20; ++j) { + data[j] = rand() * (1. / (double)RAND_MAX); } step_scopes.push_back(scope); } @@ -367,9 +360,9 @@ TEST(RecurrentOp, LinkMemories) { // check for (int i = 0; i < len - 1; ++i) { const float* a = - step_scopes[i]->GetVariable("h")->GetMutable()->data(); + step_scopes[i]->FindVar("h")->GetMutable()->data(); const float* b = step_scopes[i + 1] - ->GetVariable("pre_h") + ->FindVar("pre_h") ->GetMutable() ->data(); for (size_t i = 0; i < 15 * 20; ++i) { @@ -382,19 +375,25 @@ TEST(RecurrentOp, LinkMemories) { } // check for (int i = len - 2; i >= 0; --i) { - const float* a = step_scopes[i] - ->GetVariable("pre_h") - ->GetMutable() - ->data(); - const float* b = step_scopes[i + 1] - ->GetVariable("h") - ->GetMutable() - ->data(); + const float* a = + step_scopes[i]->FindVar("pre_h")->GetMutable()->data(); + const float* b = + step_scopes[i + 1]->FindVar("h")->GetMutable()->data(); for (size_t i = 0; i < 15 * 20; ++i) { ASSERT_FLOAT_EQ(a[i], b[i]); } } + + for (auto s : step_scopes) { + delete s; + } } USE_OP(add_two); USE_OP(mul); + +// int main() { +// //! TODO(yuyang18): Temporary disable this unit-test because implementation +// //! error. +// return 0; +//} \ No newline at end of file diff --git a/paddle/operators/type_alias.h b/paddle/operators/type_alias.h index 9d1f5fba2ad3ada4742ada30b41d68d15a69ca45..93b62cddc819e0d1fd48323e474a294ff0d327e1 100644 --- a/paddle/operators/type_alias.h +++ b/paddle/operators/type_alias.h @@ -25,6 +25,10 @@ using OpKernel = framework::OpKernel; using InferShapeContext = framework::InferShapeContext; using ExecutionContext = framework::ExecutionContext; using Variable = framework::Variable; +template +using EigenScalar = framework::EigenScalar; template diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index fd4adbd9deca12ad6c3a59cfd5d30fb0cb6fcf98..ff69a0f8ab944495ef4232fbde253b734abdf9fd 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include #include diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 08a8bd0d8b6177ce757b0d451164b7912bf48179..ee5f675e2512c8d167b590b6a551efd273267fb0 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -102,15 +102,18 @@ All parameter, weight, gradient are variables in Paddle. }, py::return_value_policy::reference); - py::class_>(m, "Scope") - .def(py::init&>()) - .def("get_var", - &pd::Scope::GetVariable, + py::class_(m, "Scope", "") + .def("new_var", + [](pd::Scope& self, const std::string& name) -> pd::Variable* { + return self.NewVar(name); + }, py::return_value_policy::reference) - .def("create_var", - &pd::Scope::CreateVariable, + .def("find_var", &pd::Scope::FindVar, py::return_value_policy::reference) + .def(py::init<>()) + .def("new_scope", + [](pd::Scope& self) -> pd::Scope* { return &self.NewScope(); }, py::return_value_policy::reference) - .def("get_var_name", &pd::Scope::GetVariableName); + .def("drop_kids", &pd::Scope::DropKids); //! @note: Be careful! PyBind will return std::string as an unicode, not //! Python str. If you want a str object, you should cast them in Python. diff --git a/python/paddle/v2/framework/default_scope_funcs.py b/python/paddle/v2/framework/default_scope_funcs.py index 4e772326c94b7ee44906c71f13e9420e078a1917..1b5580c8b30f69016f187b1d8710a57b5f7cfa9f 100644 --- a/python/paddle/v2/framework/default_scope_funcs.py +++ b/python/paddle/v2/framework/default_scope_funcs.py @@ -5,7 +5,7 @@ Default scope function. thread-local stack of Scope. Top of that stack is current scope, the bottom of that stack is all scopes' parent. -Invoking `create_var/get_var` can `create/get` variable in current scope. +Invoking `new_var/find_var` can `new/find` variable in current scope. Invoking `enter_local_scope/leave_local_scope` can create or destroy local scope. @@ -19,8 +19,8 @@ import threading __tl_scope__ = threading.local() __all__ = [ - 'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'create_var', - 'get_var', 'scoped_function' + 'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'new_var', + 'find_var', 'scoped_function' ] @@ -33,7 +33,7 @@ def get_cur_scope(): if cur_scope_stack is None: __tl_scope__.cur_scope = list() if len(__tl_scope__.cur_scope) == 0: - __tl_scope__.cur_scope.append(paddle.v2.framework.core.Scope(None)) + __tl_scope__.cur_scope.append(paddle.v2.framework.core.Scope()) return __tl_scope__.cur_scope[-1] @@ -42,7 +42,7 @@ def enter_local_scope(): Enter a new local scope """ cur_scope = get_cur_scope() - new_scope = paddle.v2.framework.core.Scope(cur_scope) + new_scope = cur_scope.new_scope() __tl_scope__.cur_scope.append(new_scope) @@ -51,20 +51,21 @@ def leave_local_scope(): Leave local scope """ __tl_scope__.cur_scope.pop() + get_cur_scope().drop_kids() -def create_var(name): +def new_var(name): """ create variable in current scope. """ - return get_cur_scope().create_var(name) + return get_cur_scope().new_var(name) -def get_var(name): +def find_var(name): """ get variable in current scope. """ - return get_cur_scope().get_var(name) + return get_cur_scope().find_var(name) def scoped_function(func): diff --git a/python/paddle/v2/framework/network.py b/python/paddle/v2/framework/network.py index c85e87413ef45f40755709e134a277b8d8d1e233..cfeb0e3dec0fd2c6ad4d2d2501f97932495fdd41 100644 --- a/python/paddle/v2/framework/network.py +++ b/python/paddle/v2/framework/network.py @@ -1,6 +1,6 @@ import paddle.v2.framework.core as core from paddle.v2.framework.create_op_creation_methods import op_creations -from default_scope_funcs import create_var, get_var, get_cur_scope +from default_scope_funcs import new_var, find_var, get_cur_scope __all__ = ['Network'] # Only expose Network @@ -29,12 +29,15 @@ class NetworkFunctor(object): if ipt in kwargs: var = kwargs[ipt] if isinstance(var, basestring): - var = create_var(var) + tmp = new_var(var) + self.net.var_names[tmp] = var + var = tmp + if not isinstance(var, core.Variable): raise TypeError( "Input of op creation must be string or variable") - kwargs[ipt] = get_cur_scope().get_var_name(var) + kwargs[ipt] = self.net.var_names[var] notemp_outputs = self.func.all_not_temp_output_args @@ -49,17 +52,20 @@ class NetworkFunctor(object): if opt in kwargs: var = kwargs[opt] if isinstance(var, basestring): - var = create_var(var) + tmp = new_var(var) + self.net.var_names[tmp] = var + var = tmp + if not isinstance(var, core.Variable): raise TypeError( "Output of op creation must be string or variable") - kwargs[opt] = get_cur_scope().get_var_name(var) + kwargs[opt] = self.net.var_names[var] op = self.func(**kwargs) self.net.net.add_op(op) - lst = [get_var(kwargs[opt]) for opt in notemp_outputs] + lst = [find_var(kwargs[opt]) for opt in notemp_outputs] if len(lst) == 1: return lst[0] elif len(lst) == 0: @@ -89,6 +95,7 @@ class Network(object): self.net = core.Net.create() funcs = (func_name for func_name in dir(op_creations) if not func_name.startswith("__")) + self.var_names = dict() # TODO(yuyang18): This code can work, but do not generate a good # docstring, try to give a better way generate function in runtime diff --git a/python/paddle/v2/framework/tests/op_test_util.py b/python/paddle/v2/framework/tests/op_test_util.py index 7b62313f8aca5e9f515d1a9e6df3bb6f51b974fb..99085c367221150c8386a24e8d90d58fd63894c4 100644 --- a/python/paddle/v2/framework/tests/op_test_util.py +++ b/python/paddle/v2/framework/tests/op_test_util.py @@ -24,13 +24,13 @@ class OpTestMeta(type): func = getattr(creation.op_creations, self.type, None) self.assertIsNotNone(func) - scope = core.Scope(None) + scope = core.Scope() kwargs = dict() for in_name in func.all_input_args: if hasattr(self, in_name): kwargs[in_name] = in_name - var = scope.create_var(in_name).get_tensor() + var = scope.new_var(in_name).get_tensor() arr = getattr(self, in_name) var.set_dims(arr.shape) var.set(arr) @@ -40,7 +40,7 @@ class OpTestMeta(type): for out_name in func.all_output_args: if hasattr(self, out_name): kwargs[out_name] = out_name - scope.create_var(out_name).get_tensor() + scope.new_var(out_name).get_tensor() for attr_name in func.all_attr_args: if hasattr(self, attr_name): @@ -54,7 +54,7 @@ class OpTestMeta(type): op.run(scope, ctx) for out_name in func.all_output_args: - actual = numpy.array(scope.get_var(out_name).get_tensor()) + actual = numpy.array(scope.find_var(out_name).get_tensor()) expect = getattr(self, out_name) # TODO(qijun) The default decimal is 7, but numpy.dot and eigen.mul # has some diff, and could not pass unittest. So I set decimal 3 here. diff --git a/python/paddle/v2/framework/tests/test_default_scope_funcs.py b/python/paddle/v2/framework/tests/test_default_scope_funcs.py index 81033deb1546c81e2566ec5474f45dc56781644a..495863c4562b5a2d6755fb02e21a6b0c845fd7b6 100644 --- a/python/paddle/v2/framework/tests/test_default_scope_funcs.py +++ b/python/paddle/v2/framework/tests/test_default_scope_funcs.py @@ -7,19 +7,19 @@ class TestDefaultScopeFuncs(unittest.TestCase): self.assertIsNotNone(get_cur_scope()) def test_none_variable(self): - self.assertIsNone(get_var("test")) + self.assertIsNone(find_var("test")) def test_create_var_get_var(self): - var_a = create_var("var_a") + var_a = new_var("var_a") self.assertIsNotNone(var_a) - self.assertIsNotNone(get_cur_scope().get_var('var_a')) + self.assertIsNotNone(get_cur_scope().find_var('var_a')) enter_local_scope() - self.assertIsNotNone(get_cur_scope().get_var('var_a')) + self.assertIsNotNone(get_cur_scope().find_var('var_a')) leave_local_scope() def test_var_get_int(self): def __new_scope__(): - i = create_var("var_i") + i = new_var("var_i") self.assertFalse(i.is_int()) i.set_int(10) self.assertTrue(i.is_int()) diff --git a/python/paddle/v2/framework/tests/test_fc_op.py b/python/paddle/v2/framework/tests/test_fc_op.py index 59e7e61249e2a7d49a17e5d87209f03b8f35f730..43931aac406cd93beede008066aa1c0c00eba6ea 100644 --- a/python/paddle/v2/framework/tests/test_fc_op.py +++ b/python/paddle/v2/framework/tests/test_fc_op.py @@ -6,13 +6,13 @@ import paddle.v2.framework.create_op_creation_methods as creation class TestFc(unittest.TestCase): def test_fc(self): - scope = core.Scope(None) - x = scope.create_var("X") + scope = core.Scope() + x = scope.new_var("X") x_tensor = x.get_tensor() x_tensor.set_dims([1000, 784]) x_tensor.alloc_float() - w = scope.create_var("W") + w = scope.new_var("W") w_tensor = w.get_tensor() w_tensor.set_dims([784, 100]) w_tensor.alloc_float() @@ -25,10 +25,10 @@ class TestFc(unittest.TestCase): op = creation.op_creations.fc(X="X", Y="Y", W="W") for out in op.outputs(): - if scope.get_var(out) is None: - scope.create_var(out).get_tensor() + if scope.find_var(out) is None: + scope.new_var(out).get_tensor() - tensor = scope.get_var("Y").get_tensor() + tensor = scope.find_var("Y").get_tensor() op.infer_shape(scope) self.assertEqual([1000, 100], tensor.shape()) diff --git a/python/paddle/v2/framework/tests/test_scope.py b/python/paddle/v2/framework/tests/test_scope.py index f0ee45cfc75e486c693a00d92a97ac0970195581..1ce9454067f91f39f01d9eb4c912857464a3c1cb 100644 --- a/python/paddle/v2/framework/tests/test_scope.py +++ b/python/paddle/v2/framework/tests/test_scope.py @@ -5,29 +5,29 @@ import unittest class TestScope(unittest.TestCase): def test_create_destroy(self): paddle_c = paddle.v2.framework.core - scope = paddle_c.Scope(None) + scope = paddle_c.Scope() self.assertIsNotNone(scope) - scope_with_parent = paddle_c.Scope(scope) + scope_with_parent = scope.new_scope() self.assertIsNotNone(scope_with_parent) def test_none_variable(self): paddle_c = paddle.v2.framework.core - scope = paddle_c.Scope(None) - self.assertIsNone(scope.get_var("test")) + scope = paddle_c.Scope() + self.assertIsNone(scope.find_var("test")) def test_create_var_get_var(self): paddle_c = paddle.v2.framework.core - scope = paddle_c.Scope(None) - var_a = scope.create_var("var_a") + scope = paddle_c.Scope() + var_a = scope.new_var("var_a") self.assertIsNotNone(var_a) - self.assertIsNotNone(scope.get_var('var_a')) - scope2 = paddle_c.Scope(scope) - self.assertIsNotNone(scope2.get_var('var_a')) + self.assertIsNotNone(scope.find_var('var_a')) + scope2 = scope.new_scope() + self.assertIsNotNone(scope2.find_var('var_a')) def test_var_get_int(self): paddle_c = paddle.v2.framework.core - scope = paddle_c.Scope(None) - var = scope.create_var("test_int") + scope = paddle_c.Scope() + var = scope.new_var("test_int") var.set_int(10) self.assertTrue(var.is_int()) self.assertEqual(10, var.get_int()) diff --git a/python/paddle/v2/framework/tests/test_tensor.py b/python/paddle/v2/framework/tests/test_tensor.py index b72aff3b9cd16595c7e81856642196b2bb61a790..6d59863cea29832f648139e07a134050e22bfa21 100644 --- a/python/paddle/v2/framework/tests/test_tensor.py +++ b/python/paddle/v2/framework/tests/test_tensor.py @@ -5,8 +5,8 @@ import numpy class TestScope(unittest.TestCase): def test_int_tensor(self): - scope = core.Scope(None) - var = scope.create_var("test_tensor") + scope = core.Scope() + var = scope.new_var("test_tensor") tensor = var.get_tensor() tensor.set_dims([1000, 784]) @@ -23,8 +23,8 @@ class TestScope(unittest.TestCase): self.assertEqual(2.0, tensor_array_2[19, 11]) def test_float_tensor(self): - scope = core.Scope(None) - var = scope.create_var("test_tensor") + scope = core.Scope() + var = scope.new_var("test_tensor") tensor = var.get_tensor() tensor.set_dims([1000, 784])