提交 aced61dd 编写于 作者: Q qiaolongfei

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into refine-context

...@@ -37,8 +37,8 @@ Scope is an association of a name to variable. All variables belong to `Scope`. ...@@ -37,8 +37,8 @@ Scope is an association of a name to variable. All variables belong to `Scope`.
```cpp ```cpp
class Scope { class Scope {
public: public:
Variable* CreateVariable(const std::string& name); Variable* NewVar(const std::string& name);
const Variable* GetVariable(const std::string& name) const; const Variable* FindVar(const std::string& name) const;
private: private:
std::unordered_map<std::string, std::unique_ptr<Variable>> vars_; std::unordered_map<std::string, std::unique_ptr<Variable>> vars_;
...@@ -58,12 +58,12 @@ class Scope { ...@@ -58,12 +58,12 @@ class Scope {
public: public:
Scope(const std::shared_ptr<Scope>& scope): parent_(scope) {} Scope(const std::shared_ptr<Scope>& scope): parent_(scope) {}
Variable* GetVariable(const std::string& name) const { Variable* FindVar(const std::string& name) const {
auto it = vars_.find(name); auto it = vars_.find(name);
if (it != vars_.end()) { if (it != vars_.end()) {
return it->second.get(); return it->second.get();
} else if (parent_ != nullptr) { } else if (parent_ != nullptr) {
return parent_->GetVariable(name); return parent_->FindVar(name);
} else { } else {
return nullptr; return nullptr;
} }
...@@ -95,10 +95,10 @@ class Scope { ...@@ -95,10 +95,10 @@ class Scope {
static std::shared_ptr<Scope> Create(const std::shared_ptr<Scope>& parent = nullptr); static std::shared_ptr<Scope> Create(const std::shared_ptr<Scope>& parent = nullptr);
// return nullptr if not found. // return nullptr if not found.
Variable* GetVariable(const std::string& name) const; Variable* FindVar(const std::string& name) const;
// return if already contains same name variable. // return if already contains same name variable.
Variable* CreateVariable(const std::string& name); Variable* NewVar(const std::string& name);
private: private:
std::shared_ptr<Scope> parent_; std::shared_ptr<Scope> parent_;
...@@ -107,11 +107,11 @@ class Scope { ...@@ -107,11 +107,11 @@ class Scope {
``` ```
## Only scope can create a variable ## Only scope can create a variable
To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `CreateVariable` can construct `Variable`. To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `NewVar` can construct `Variable`.
## When scope destroyed, all variables inside this scope should be destroyed together ## When scope destroyed, all variables inside this scope should be destroyed together
The scope hold unique pointers for all variables. User can `GetVariable` from scope, but he should not hold this pointer as a member variable. Because when scope is destroyed, all variables inside this scope will be destroyed together. The scope hold unique pointers for all variables. User can `FindVar` from scope, but he should not hold this pointer as a member variable. Because when scope is destroyed, all variables inside this scope will be destroyed together.
## Sharing a parent scope ## Sharing a parent scope
...@@ -121,4 +121,4 @@ Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shar ...@@ -121,4 +121,4 @@ Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shar
## Orthogonal interface ## Orthogonal interface
`GetVariable` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `CreateVariable` will return a `Error` when there is a name conflict locally. Combine `GetVariable` and `CreateVariable`, we can implement `CreateOrGetVariable` easily. `FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `NewVar` will return a `Error` when there is a name conflict locally. Combine `FindVar` and `NewVar`, we can implement `NewVar` easily.
...@@ -8,7 +8,9 @@ cc_test(tensor_test SRCS tensor_test.cc DEPS tensor) ...@@ -8,7 +8,9 @@ cc_test(tensor_test SRCS tensor_test.cc DEPS tensor)
cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) cc_test(eigen_test SRCS eigen_test.cc DEPS tensor)
cc_test(variable_test SRCS variable_test.cc) cc_test(variable_test SRCS variable_test.cc)
cc_test(scope_test SRCS scope_test.cc)
cc_library(scope SRCS scope.cc)
cc_test(scope_test SRCS scope_test.cc DEPS scope)
proto_library(attr_type SRCS attr_type.proto) proto_library(attr_type SRCS attr_type.proto)
proto_library(op_proto SRCS op_proto.proto DEPS attr_type) proto_library(op_proto SRCS op_proto.proto DEPS attr_type)
...@@ -16,7 +18,7 @@ proto_library(op_desc SRCS op_desc.proto DEPS attr_type) ...@@ -16,7 +18,7 @@ proto_library(op_desc SRCS op_desc.proto DEPS attr_type)
cc_test(op_proto_test SRCS op_proto_test.cc DEPS op_proto protobuf) cc_test(op_proto_test SRCS op_proto_test.cc DEPS op_proto protobuf)
cc_test(op_desc_test SRCS op_desc_test.cc DEPS op_desc protobuf) cc_test(op_desc_test SRCS op_desc_test.cc DEPS op_desc protobuf)
cc_library(operator SRCS operator.cc DEPS op_desc device_context tensor) cc_library(operator SRCS operator.cc DEPS op_desc device_context tensor scope)
cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry)
cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS op_proto operator) cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS op_proto operator)
......
...@@ -80,5 +80,21 @@ struct EigenVector : public EigenTensor<T, 1, MajorType, IndexType> { ...@@ -80,5 +80,21 @@ struct EigenVector : public EigenTensor<T, 1, MajorType, IndexType> {
} }
}; };
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
struct EigenScalar {
// Scalar tensor (implemented as a rank-0 tensor) of scalar type T.
using Type = Eigen::TensorMap<
Eigen::TensorFixedSize<T, Eigen::Sizes<>, MajorType, IndexType>>;
using ConstType = Eigen::TensorMap<
Eigen::TensorFixedSize<const T, Eigen::Sizes<>, MajorType, IndexType>>;
static Type From(Tensor& tensor) { return Type(tensor.data<T>()); }
static ConstType From(const Tensor& tensor) {
return ConstType(tensor.data<T>());
}
};
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -46,6 +46,17 @@ TEST(Eigen, Tensor) { ...@@ -46,6 +46,17 @@ TEST(Eigen, Tensor) {
} }
} }
TEST(Eigen, ScalarFrom) {
Tensor t;
int* p = t.mutable_data<int>(make_ddim({1}), platform::CPUPlace());
*p = static_cast<int>(100);
EigenScalar<int>::Type es = EigenScalar<int>::From(t);
ASSERT_EQ(0, es.dimension(0));
ASSERT_EQ(100, es(0));
}
TEST(Eigen, VectorFrom) { TEST(Eigen, VectorFrom) {
Tensor t; Tensor t;
float* p = t.mutable_data<float>(make_ddim({6}), platform::CPUPlace()); float* p = t.mutable_data<float>(make_ddim({6}), platform::CPUPlace());
......
...@@ -43,7 +43,7 @@ class NetOp : public OperatorBase { ...@@ -43,7 +43,7 @@ class NetOp : public OperatorBase {
* Infer all the operators' input and output variables' shapes, will be called * Infer all the operators' input and output variables' shapes, will be called
* before every mini-batch * before every mini-batch
*/ */
void InferShape(const std::shared_ptr<Scope>& scope) const override { void InferShape(const Scope& scope) const override {
for (auto& op : ops_) { for (auto& op : ops_) {
op->InferShape(scope); op->InferShape(scope);
} }
...@@ -56,7 +56,7 @@ class NetOp : public OperatorBase { ...@@ -56,7 +56,7 @@ class NetOp : public OperatorBase {
* scope will be used instead. If no OpContext is provicded, default context * scope will be used instead. If no OpContext is provicded, default context
* will be used. * will be used.
*/ */
void Run(const std::shared_ptr<Scope>& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override { const platform::DeviceContext& dev_ctx) const override {
for (auto& op : ops_) { for (auto& op : ops_) {
op->Run(scope, dev_ctx); op->Run(scope, dev_ctx);
......
...@@ -16,10 +16,10 @@ static int run_cnt = 0; ...@@ -16,10 +16,10 @@ static int run_cnt = 0;
class TestOp : public OperatorBase { class TestOp : public OperatorBase {
public: public:
void InferShape(const std::shared_ptr<Scope>& scope) const override { void InferShape(const framework::Scope& scope) const override {
++infer_shape_cnt; ++infer_shape_cnt;
} }
void Run(const std::shared_ptr<framework::Scope>& scope, void Run(const framework::Scope& scope,
const paddle::platform::DeviceContext& dev_ctx) const override { const paddle::platform::DeviceContext& dev_ctx) const override {
++run_cnt; ++run_cnt;
} }
...@@ -61,7 +61,7 @@ TEST(OpKernel, all) { ...@@ -61,7 +61,7 @@ TEST(OpKernel, all) {
ASSERT_EQ(1UL, tmp_idx.size()); ASSERT_EQ(1UL, tmp_idx.size());
ASSERT_EQ("y", net->outputs_[tmp_idx[0]]); ASSERT_EQ("y", net->outputs_[tmp_idx[0]]);
auto scope = std::make_shared<Scope>(); Scope scope;
platform::CPUDeviceContext dev_ctx; platform::CPUDeviceContext dev_ctx;
net->InferShape(scope); net->InferShape(scope);
......
...@@ -7,9 +7,9 @@ namespace paddle { ...@@ -7,9 +7,9 @@ namespace paddle {
namespace framework { namespace framework {
class CosineOp : public OperatorBase { class CosineOp : public OperatorBase {
public: public:
void Run(const std::shared_ptr<Scope>& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override {} const platform::DeviceContext& dev_ctx) const override {}
void InferShape(const std::shared_ptr<Scope>& scope) const override {} void InferShape(const Scope& scope) const override {}
}; };
class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
...@@ -27,8 +27,8 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -27,8 +27,8 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
class MyTestOp : public OperatorBase { class MyTestOp : public OperatorBase {
public: public:
void InferShape(const std::shared_ptr<Scope>& scope) const override {} void InferShape(const Scope& scope) const override {}
void Run(const std::shared_ptr<Scope>& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override {} const platform::DeviceContext& dev_ctx) const override {}
}; };
...@@ -69,7 +69,7 @@ TEST(OpRegistry, CreateOp) { ...@@ -69,7 +69,7 @@ TEST(OpRegistry, CreateOp) {
std::shared_ptr<paddle::framework::OperatorBase> op = std::shared_ptr<paddle::framework::OperatorBase> op =
paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::OpRegistry::CreateOp(op_desc);
auto scope = std::make_shared<paddle::framework::Scope>(); paddle::framework::Scope scope;
paddle::platform::CPUDeviceContext dev_ctx; paddle::platform::CPUDeviceContext dev_ctx;
op->Run(scope, dev_ctx); op->Run(scope, dev_ctx);
float scale_get = op->GetAttr<float>("scale"); float scale_get = op->GetAttr<float>("scale");
...@@ -111,7 +111,7 @@ TEST(OpRegistry, DefaultValue) { ...@@ -111,7 +111,7 @@ TEST(OpRegistry, DefaultValue) {
std::shared_ptr<paddle::framework::OperatorBase> op = std::shared_ptr<paddle::framework::OperatorBase> op =
paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::OpRegistry::CreateOp(op_desc);
auto scope = std::make_shared<paddle::framework::Scope>(); paddle::framework::Scope scope;
paddle::platform::CPUDeviceContext dev_ctx; paddle::platform::CPUDeviceContext dev_ctx;
op->Run(scope, dev_ctx); op->Run(scope, dev_ctx);
ASSERT_EQ(op->GetAttr<float>("scale"), 1.0); ASSERT_EQ(op->GetAttr<float>("scale"), 1.0);
...@@ -173,7 +173,7 @@ TEST(OpRegistry, CustomChecker) { ...@@ -173,7 +173,7 @@ TEST(OpRegistry, CustomChecker) {
SetInputFormat(&op_desc); SetInputFormat(&op_desc);
auto op = paddle::framework::OpRegistry::CreateOp(op_desc); auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
paddle::platform::CPUDeviceContext dev_ctx; paddle::platform::CPUDeviceContext dev_ctx;
auto scope = std::make_shared<paddle::framework::Scope>(); paddle::framework::Scope scope;
op->Run(scope, dev_ctx); op->Run(scope, dev_ctx);
int test_attr = op->GetAttr<int>("test_attr"); int test_attr = op->GetAttr<int>("test_attr");
ASSERT_EQ(test_attr, 4); ASSERT_EQ(test_attr, 4);
......
...@@ -71,10 +71,10 @@ class OperatorBase { ...@@ -71,10 +71,10 @@ class OperatorBase {
/// InferShape infer the size of Variables used by this Operator with /// InferShape infer the size of Variables used by this Operator with
/// information inside scope /// information inside scope
virtual void InferShape(const std::shared_ptr<Scope>& scope) const = 0; virtual void InferShape(const Scope& scope) const = 0;
/// Net will call this function to Run an op. /// Net will call this function to Run an op.
virtual void Run(const std::shared_ptr<Scope>& scope, virtual void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const = 0; const platform::DeviceContext& dev_ctx) const = 0;
virtual bool IsNetOp() const { return false; } virtual bool IsNetOp() const { return false; }
...@@ -101,7 +101,7 @@ class OperatorBase { ...@@ -101,7 +101,7 @@ class OperatorBase {
class OperatorContext { class OperatorContext {
public: public:
OperatorContext(const OperatorBase* op, const std::shared_ptr<Scope>& scope) OperatorContext(const OperatorBase* op, const Scope& scope)
: op_(*op), scope_(scope) {} : op_(*op), scope_(scope) {}
size_t InputSize() const { return op_.inputs_.size(); } size_t InputSize() const { return op_.inputs_.size(); }
...@@ -109,19 +109,19 @@ class OperatorContext { ...@@ -109,19 +109,19 @@ class OperatorContext {
size_t OutputSize() const { return op_.outputs_.size(); } size_t OutputSize() const { return op_.outputs_.size(); }
const Variable* InputVar(const size_t index) const { const Variable* InputVar(const size_t index) const {
return scope_->GetVariable(op_.inputs_.at(index)); return scope_.FindVar(op_.inputs_.at(index));
} }
Variable* OutputVar(const size_t index) const { Variable* OutputVar(const size_t index) const {
return scope_->GetVariable(op_.outputs_.at(index)); return scope_.FindVar(op_.outputs_.at(index));
} }
const Variable* InputVar(const std::string& name) const { const Variable* InputVar(const std::string& name) const {
return scope_->GetVariable(op_.Input(name)); return scope_.FindVar(op_.Input(name));
} }
Variable* OutputVar(const std::string& name) const { Variable* OutputVar(const std::string& name) const {
return scope_->GetVariable(op_.Output(name)); return scope_.FindVar(op_.Output(name));
} }
const std::vector<const Variable*> MultiInputVar( const std::vector<const Variable*> MultiInputVar(
...@@ -131,7 +131,7 @@ class OperatorContext { ...@@ -131,7 +131,7 @@ class OperatorContext {
res.reserve(names.size()); res.reserve(names.size());
std::transform( std::transform(
names.begin(), names.end(), std::back_inserter(res), names.begin(), names.end(), std::back_inserter(res),
[this](const std::string& name) { return scope_->GetVariable(name); }); [this](const std::string& name) { return scope_.FindVar(name); });
return res; return res;
} }
...@@ -141,7 +141,7 @@ class OperatorContext { ...@@ -141,7 +141,7 @@ class OperatorContext {
res.reserve(names.size()); res.reserve(names.size());
std::transform( std::transform(
names.begin(), names.end(), std::back_inserter(res), names.begin(), names.end(), std::back_inserter(res),
[this](const std::string& name) { return scope_->GetVariable(name); }); [this](const std::string& name) { return scope_.FindVar(name); });
return res; return res;
} }
...@@ -180,7 +180,7 @@ class OperatorContext { ...@@ -180,7 +180,7 @@ class OperatorContext {
res.reserve(names.size()); res.reserve(names.size());
std::transform(names.begin(), names.end(), std::back_inserter(res), std::transform(names.begin(), names.end(), std::back_inserter(res),
[&](const std::string& sub_name) { [&](const std::string& sub_name) {
auto var = scope_->GetVariable(sub_name); auto var = scope_.FindVar(sub_name);
PADDLE_ENFORCE(var != nullptr, PADDLE_ENFORCE(var != nullptr,
"MultiInput(%s:%s) should not be nullptr", "MultiInput(%s:%s) should not be nullptr",
name, sub_name); name, sub_name);
...@@ -196,7 +196,7 @@ class OperatorContext { ...@@ -196,7 +196,7 @@ class OperatorContext {
res.reserve(names.size()); res.reserve(names.size());
std::transform(names.begin(), names.end(), std::back_inserter(res), std::transform(names.begin(), names.end(), std::back_inserter(res),
[&](const std::string& sub_name) { [&](const std::string& sub_name) {
auto var = scope_->GetVariable(sub_name); auto var = scope_.FindVar(sub_name);
PADDLE_ENFORCE(var != nullptr, PADDLE_ENFORCE(var != nullptr,
"MultiOutput(%s:%s) should not be nullptr", "MultiOutput(%s:%s) should not be nullptr",
name, sub_name); name, sub_name);
...@@ -206,12 +206,12 @@ class OperatorContext { ...@@ -206,12 +206,12 @@ class OperatorContext {
} }
const OperatorBase& op_; const OperatorBase& op_;
const std::shared_ptr<Scope>& scope_; const Scope& scope_;
}; };
class InferShapeContext : public OperatorContext { class InferShapeContext : public OperatorContext {
public: public:
InferShapeContext(const OperatorBase* op, const std::shared_ptr<Scope>& scope) InferShapeContext(const OperatorBase* op, const Scope& scope)
: OperatorContext(op, scope) {} : OperatorContext(op, scope) {}
}; };
...@@ -232,7 +232,7 @@ struct EigenDeviceConverter<platform::GPUPlace> { ...@@ -232,7 +232,7 @@ struct EigenDeviceConverter<platform::GPUPlace> {
class ExecutionContext : public OperatorContext { class ExecutionContext : public OperatorContext {
public: public:
ExecutionContext(const OperatorBase* op, const std::shared_ptr<Scope>& scope, ExecutionContext(const OperatorBase* op, const Scope& scope,
const platform::DeviceContext& device_context) const platform::DeviceContext& device_context)
: OperatorContext(op, scope), device_context_(device_context) {} : OperatorContext(op, scope), device_context_(device_context) {}
...@@ -285,11 +285,11 @@ class OperatorWithKernel : public OperatorBase { ...@@ -285,11 +285,11 @@ class OperatorWithKernel : public OperatorBase {
using OpKernelMap = using OpKernelMap =
std::unordered_map<OpKernelKey, std::unique_ptr<OpKernel>, OpKernelHash>; std::unordered_map<OpKernelKey, std::unique_ptr<OpKernel>, OpKernelHash>;
void InferShape(const std::shared_ptr<Scope>& scope) const { void InferShape(const Scope& scope) const {
InferShape(InferShapeContext(this, scope)); InferShape(InferShapeContext(this, scope));
} }
void Run(const std::shared_ptr<Scope>& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const final { const platform::DeviceContext& dev_ctx) const final {
auto& opKernel = AllOpKernels().at(type_).at(OpKernelKey(dev_ctx)); auto& opKernel = AllOpKernels().at(type_).at(OpKernelKey(dev_ctx));
opKernel->Compute(ExecutionContext(this, scope, dev_ctx)); opKernel->Compute(ExecutionContext(this, scope, dev_ctx));
......
...@@ -24,16 +24,15 @@ static int op_run_num = 0; ...@@ -24,16 +24,15 @@ static int op_run_num = 0;
class OpWithoutKernelTest : public OperatorBase { class OpWithoutKernelTest : public OperatorBase {
public: public:
void Init() override { x = 1; } void Init() override { x = 1; }
void InferShape( void InferShape(const Scope& scope) const override {}
const std::shared_ptr<framework::Scope>& scope) const override {} void Run(const Scope& scope,
void Run(const std::shared_ptr<Scope>& scope,
const platform::DeviceContext& dev_ctx) const override { const platform::DeviceContext& dev_ctx) const override {
op_run_num++; op_run_num++;
ASSERT_EQ((int)inputs_.size(), 1); ASSERT_EQ((int)inputs_.size(), 1);
ASSERT_EQ((int)outputs_.size(), 1); ASSERT_EQ((int)outputs_.size(), 1);
ASSERT_EQ(scope->GetVariable(inputs_[0]), nullptr); ASSERT_EQ(scope.FindVar(inputs_[0]), nullptr);
ASSERT_EQ(x, 1); ASSERT_EQ(x, 1);
ASSERT_NE(scope->GetVariable(outputs_[0]), nullptr); ASSERT_NE(scope.FindVar(outputs_[0]), nullptr);
} }
public: public:
...@@ -69,10 +68,10 @@ TEST(OperatorBase, all) { ...@@ -69,10 +68,10 @@ TEST(OperatorBase, all) {
attr->set_f(3.14); attr->set_f(3.14);
paddle::platform::CPUDeviceContext device_context; paddle::platform::CPUDeviceContext device_context;
auto scope = std::make_shared<paddle::framework::Scope>(); paddle::framework::Scope scope;
auto op = paddle::framework::OpRegistry::CreateOp(op_desc); auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
scope->CreateVariable("OUT1"); scope.NewVar("OUT1");
ASSERT_EQ(paddle::framework::op_run_num, 0); ASSERT_EQ(paddle::framework::op_run_num, 0);
op->InferShape(scope); op->InferShape(scope);
op->Run(scope, device_context); op->Run(scope, device_context);
...@@ -118,13 +117,12 @@ class CPUKernelTest : public OpKernel { ...@@ -118,13 +117,12 @@ class CPUKernelTest : public OpKernel {
class OperatorMultiInputsTest : public OperatorBase { class OperatorMultiInputsTest : public OperatorBase {
public: public:
void Init() override { x = 1; } void Init() override { x = 1; }
void InferShape( void InferShape(const Scope& scope) const override {}
const std::shared_ptr<framework::Scope>& scope) const override {} void Run(const Scope& scope,
void Run(const std::shared_ptr<Scope>& scope,
const platform::DeviceContext& dev_ctx) const override { const platform::DeviceContext& dev_ctx) const override {
ASSERT_EQ(scope->GetVariable(inputs_[0]), nullptr); ASSERT_EQ(scope.FindVar(inputs_[0]), nullptr);
ASSERT_EQ(x, 1); ASSERT_EQ(x, 1);
ASSERT_NE(scope->GetVariable(outputs_[0]), nullptr); ASSERT_NE(scope.FindVar(outputs_[0]), nullptr);
ASSERT_EQ(Input("x"), "IN1"); ASSERT_EQ(Input("x"), "IN1");
ASSERT_EQ(Input("y"), "OUT1"); ASSERT_EQ(Input("y"), "OUT1");
} }
...@@ -206,7 +204,7 @@ TEST(OpKernel, all) { ...@@ -206,7 +204,7 @@ TEST(OpKernel, all) {
attr->set_f(3.14); attr->set_f(3.14);
paddle::platform::CPUDeviceContext cpu_device_context; paddle::platform::CPUDeviceContext cpu_device_context;
auto scope = std::make_shared<paddle::framework::Scope>(); paddle::framework::Scope scope;
auto op = paddle::framework::OpRegistry::CreateOp(op_desc); auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 0); ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 0);
...@@ -252,13 +250,13 @@ TEST(OpKernel, multi_inputs) { ...@@ -252,13 +250,13 @@ TEST(OpKernel, multi_inputs) {
output_format->Add(2); // y1 output_format->Add(2); // y1
paddle::platform::CPUDeviceContext cpu_device_context; paddle::platform::CPUDeviceContext cpu_device_context;
auto scope = std::make_shared<Scope>(); paddle::framework::Scope scope;
scope->CreateVariable("x0")->GetMutable<Tensor>(); scope.NewVar("x0")->GetMutable<Tensor>();
scope->CreateVariable("x1")->GetMutable<Tensor>(); scope.NewVar("x1")->GetMutable<Tensor>();
scope->CreateVariable("x2")->GetMutable<Tensor>(); scope.NewVar("x2")->GetMutable<Tensor>();
scope->CreateVariable("k0")->GetMutable<Tensor>(); scope.NewVar("k0")->GetMutable<Tensor>();
scope->CreateVariable("y0")->GetMutable<Tensor>(); scope.NewVar("y0")->GetMutable<Tensor>();
scope->CreateVariable("y1")->GetMutable<Tensor>(); scope.NewVar("y1")->GetMutable<Tensor>();
auto op = paddle::framework::OpRegistry::CreateOp(op_desc); auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
op->Run(scope, cpu_device_context); op->Run(scope, cpu_device_context);
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/scope.h"
#include "paddle/string/printf.h"
namespace paddle {
namespace framework {
Scope::~Scope() {
DropKids();
for (auto& kv : vars_) delete kv.second;
}
Scope& Scope::NewScope() const {
kids_.push_back(new Scope(this));
return *kids_.back();
}
Variable* Scope::NewVar(const std::string& name) {
auto iter = vars_.find(name);
if (iter != vars_.end()) {
return iter->second;
}
Variable* v = new Variable();
vars_[name] = v;
v->name_ = &(vars_.find(name)->first);
return v;
}
Variable* Scope::NewVar() {
return NewVar(string::Sprintf("%p.%d", this, vars_.size()));
}
Variable* Scope::FindVar(const std::string& name) const {
auto it = vars_.find(name);
if (it != vars_.end()) return it->second;
return (parent_ == nullptr) ? nullptr : parent_->FindVar(name);
}
const Scope* Scope::FindScope(const Variable* var) const {
for (auto& kv : vars_) {
if (kv.second == var) {
return this;
}
}
return (parent_ == nullptr) ? nullptr : parent_->FindScope(var);
}
void Scope::DropKids() {
for (Scope* s : kids_) delete s;
kids_.clear();
}
} // namespace framework
} // namespace paddle
...@@ -14,9 +14,9 @@ limitations under the License. */ ...@@ -14,9 +14,9 @@ limitations under the License. */
#pragma once #pragma once
#include <list>
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <vector>
#include "paddle/framework/variable.h" #include "paddle/framework/variable.h"
...@@ -35,73 +35,42 @@ class Scope; ...@@ -35,73 +35,42 @@ class Scope;
*/ */
class Scope { class Scope {
public: public:
/**
* @brief Initialize s Scope without parent.
*/
Scope() {} Scope() {}
~Scope();
/** // Disable Copy, Assign, Move.
* @brief Initialize a Scope with parent. Scope(const Scope& other) = delete;
*/ Scope& operator=(const Scope& other) = delete;
explicit Scope(const std::shared_ptr<Scope>& parent) : parent_(parent) {} Scope(Scope&& other) = delete;
/** /// Create a sub-scope. Returns a reference other than a pointer so
* @brief Create Variable /// to prevent from manual deletion.
* /// Mark it to const because that new kid scope cannot change parent scope.
* Create Variable in this Scope. Return the exist one if Variable already Scope& NewScope() const;
* been created.
*/ /// Create a variable with given name if it doesn't exist.
Variable* CreateVariable(const std::string& name) { Variable* NewVar(const std::string& name);
auto var = GetVariable(name);
if (var) { /// Create a variable with a scope-unique name.
return var; Variable* NewVar();
} else {
auto ptr = new Variable(); /// Find a variable in the scope or any of its ancestors. Returns
name_to_var_[name] = std::unique_ptr<Variable>(ptr); /// nullptr if cannot find.
var_to_name_[ptr] = name; Variable* FindVar(const std::string& name) const;
return GetVariable(name);
} /// Find the scope or an ancestor scope that contains the given variable.
} const Scope* FindScope(const Variable* var) const;
/** /// Drop all kids scopes belonged to this scope.
* @brief Get Variable. void DropKids();
*
* Get Variable from this Scope, this function will recursive find Variable
* from it's parent scope. Return nullptr if not found.
*/
Variable* GetVariable(const std::string& name) const {
auto it = name_to_var_.find(name);
if (it != name_to_var_.end()) {
return it->second.get();
} else if (parent_ != nullptr) {
return parent_->GetVariable(name);
} else {
return nullptr;
}
}
/**
* @brief If this scope has a Var named name.
*
* Find if there is a Variable in this scope and it's parent scope
*/
bool HasVariable(const std::string& name) const {
return (name_to_var_.find(name) != name_to_var_.end() ||
(parent_ && parent_->HasVariable(name)));
}
std::string GetVariableName(Variable* const var) const {
try {
return var_to_name_.at(var);
} catch (...) {
return "";
}
}
private: private:
std::unordered_map<Variable*, std::string> var_to_name_; // Call Scope::NewScope for a sub-scope.
std::unordered_map<std::string, std::unique_ptr<Variable>> name_to_var_; explicit Scope(Scope const* parent) : parent_(parent) {}
std::shared_ptr<Scope> parent_{nullptr};
std::unordered_map<std::string, Variable*> vars_;
mutable std::list<Scope*> kids_;
Scope const* parent_{nullptr};
}; };
} // namespace framework } // namespace framework
......
...@@ -15,49 +15,42 @@ limitations under the License. */ ...@@ -15,49 +15,42 @@ limitations under the License. */
#include "paddle/framework/scope.h" #include "paddle/framework/scope.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
TEST(Scope, Create) { using paddle::framework::Scope;
using paddle::framework::Scope; using paddle::framework::Variable;
using paddle::framework::Variable;
auto scope = std::make_shared<Scope>(); TEST(Scope, VarsShadowing) {
Scope s;
Scope& ss1 = s.NewScope();
Scope& ss2 = s.NewScope();
Variable* var0 = scope->CreateVariable(""); Variable* v0 = s.NewVar("a");
EXPECT_NE(var0, nullptr); Variable* v1 = ss1.NewVar("a");
/// GetVariable will return nullptr if not exist. EXPECT_NE(v0, v1);
Variable* var1 = scope->GetVariable("a");
EXPECT_EQ(var1, nullptr);
/// CreateVariable will return one. EXPECT_EQ(v0, s.FindVar("a"));
Variable* var2 = scope->CreateVariable("a"); EXPECT_EQ(v1, ss1.FindVar("a"));
EXPECT_NE(var2, nullptr); EXPECT_EQ(v0, ss2.FindVar("a"));
}
/// Get the created variable.
Variable* var3 = scope->GetVariable("a");
EXPECT_EQ(var2, var3);
/// CreateVariable will just return the variable if it's TEST(Scope, FindVar) {
/// already exist. Scope s;
Variable* var4 = scope->CreateVariable("a"); Scope& ss = s.NewScope();
EXPECT_EQ(var4, var2);
EXPECT_EQ("a", scope->GetVariableName(var4)); EXPECT_EQ(nullptr, s.FindVar("a"));
Scope scope2; EXPECT_EQ(nullptr, ss.FindVar("a"));
auto var = scope2.CreateVariable("tmp");
EXPECT_EQ("", scope->GetVariableName(var));
}
TEST(Scope, Parent) { ss.NewVar("a");
using paddle::framework::Scope;
using paddle::framework::Variable;
auto parent_scope = std::make_shared<Scope>(); EXPECT_EQ(nullptr, s.FindVar("a"));
auto scope = std::make_shared<Scope>(parent_scope); EXPECT_NE(nullptr, ss.FindVar("a"));
}
Variable* var0 = parent_scope->CreateVariable("a"); TEST(Scope, FindScope) {
EXPECT_NE(var0, nullptr); Scope s;
Scope& ss = s.NewScope();
Variable* v = s.NewVar("a");
/// GetVariable will get Variable from parent scope if exist. EXPECT_EQ(&s, s.FindScope(v));
Variable* var1 = scope->GetVariable("a"); EXPECT_EQ(&s, ss.FindScope(v));
EXPECT_EQ(var0, var1);
} }
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <typeindex> #include <typeindex>
#include <typeinfo> #include <typeinfo>
#include "paddle/platform/assert.h" #include "paddle/platform/enforce.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -25,7 +25,7 @@ class Variable { ...@@ -25,7 +25,7 @@ class Variable {
public: public:
template <typename T> template <typename T>
const T& Get() const { const T& Get() const {
PADDLE_ASSERT(IsType<T>()); PADDLE_ENFORCE(IsType<T>(), "Variable must be type %s", typeid(T).name());
return *static_cast<const T*>(holder_->Ptr()); return *static_cast<const T*>(holder_->Ptr());
} }
...@@ -65,6 +65,17 @@ class Variable { ...@@ -65,6 +65,17 @@ class Variable {
std::unique_ptr<Placeholder> std::unique_ptr<Placeholder>
holder_; // pointers to a PlaceholderImpl object indeed. holder_; // pointers to a PlaceholderImpl object indeed.
// name_ is only meaningful with a Scope and accessible by it.
//
// NOTE: Please don't expose name_ by adding methods like
// Variable::Name or Scope::VarName! A variable could have a human
// readable name or an auto-generated scope-unique name. In the
// former case, the caller knows the name and doesn't need to access
// the name; in the latter case, the variable should be identified
// by its address but not the unreadable name.
friend class Scope;
const std::string* name_;
}; };
} // namespace framework } // namespace framework
......
...@@ -27,38 +27,37 @@ namespace operators { ...@@ -27,38 +27,37 @@ namespace operators {
namespace rnn { namespace rnn {
void SegmentInputs(std::vector<std::shared_ptr<Scope>>& step_scopes, void SegmentInputs(const std::vector<Scope*>& step_scopes,
const std::vector<Link>& inlinks, const std::vector<Link>& inlinks,
const size_t seq_len) { const size_t seq_len) {
PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided."); PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided.");
for (size_t i = 0; i < inlinks.size(); ++i) { for (size_t i = 0; i < inlinks.size(); ++i) {
Tensor* input = Tensor* input =
step_scopes[0]->GetVariable(inlinks[i].external)->GetMutable<Tensor>(); step_scopes[0]->FindVar(inlinks[i].external)->GetMutable<Tensor>();
DDim dims = input->dims(); DDim dims = input->dims();
PADDLE_ENFORCE(static_cast<size_t>(dims[0]) == seq_len, PADDLE_ENFORCE(static_cast<size_t>(dims[0]) == seq_len,
"all the inlinks must have same length"); "all the inlinks must have same length");
DDim step_dims = slice_ddim(dims, 1, dims.size()); DDim step_dims = slice_ddim(dims, 1, dims.size());
for (size_t j = 0; j < seq_len; j++) { for (size_t j = 0; j < seq_len; j++) {
Tensor* step_input = step_scopes[j] Tensor* step_input =
->CreateVariable(inlinks[i].internal) step_scopes[j]->NewVar(inlinks[i].internal)->GetMutable<Tensor>();
->GetMutable<Tensor>();
*step_input = input->Slice<float>(j, j + 1); *step_input = input->Slice<float>(j, j + 1);
step_input->Resize(step_dims); step_input->Resize(step_dims);
} }
} }
} }
void ConcatOutputs(std::vector<std::shared_ptr<Scope>>& step_scopes, void ConcatOutputs(const std::vector<Scope*>& step_scopes,
const std::vector<Link>& outlinks, const std::vector<Link>& outlinks,
const size_t seq_len) { const size_t seq_len) {
for (size_t i = 0; i < outlinks.size(); i++) { for (size_t i = 0; i < outlinks.size(); i++) {
Tensor* output = Tensor* output =
step_scopes[0]->GetVariable(outlinks[i].external)->GetMutable<Tensor>(); step_scopes[0]->FindVar(outlinks[i].external)->GetMutable<Tensor>();
// TODO(qingiqng) remove following code after adding // TODO(qingiqng) remove following code after adding
// InferShape in RecurrentGradientOp // InferShape in RecurrentGradientOp
DDim step_dims = step_scopes[0] DDim step_dims = step_scopes[0]
->GetVariable(outlinks[i].internal) ->FindVar(outlinks[i].internal)
->GetMutable<Tensor>() ->GetMutable<Tensor>()
->dims(); ->dims();
std::vector<int> dims_vec = vectorize(step_dims); std::vector<int> dims_vec = vectorize(step_dims);
...@@ -66,9 +65,8 @@ void ConcatOutputs(std::vector<std::shared_ptr<Scope>>& step_scopes, ...@@ -66,9 +65,8 @@ void ConcatOutputs(std::vector<std::shared_ptr<Scope>>& step_scopes,
output->mutable_data<float>(make_ddim(dims_vec), platform::CPUPlace()); output->mutable_data<float>(make_ddim(dims_vec), platform::CPUPlace());
for (size_t j = 0; j < seq_len; j++) { for (size_t j = 0; j < seq_len; j++) {
Tensor* step_output = step_scopes[j] Tensor* step_output =
->GetVariable(outlinks[i].internal) step_scopes[j]->FindVar(outlinks[i].internal)->GetMutable<Tensor>();
->GetMutable<Tensor>();
// TODO(luotao02) data type and platform::DeviceContext() should set // TODO(luotao02) data type and platform::DeviceContext() should set
// correctly // correctly
(output->Slice<float>(j, j + 1)) (output->Slice<float>(j, j + 1))
...@@ -77,7 +75,7 @@ void ConcatOutputs(std::vector<std::shared_ptr<Scope>>& step_scopes, ...@@ -77,7 +75,7 @@ void ConcatOutputs(std::vector<std::shared_ptr<Scope>>& step_scopes,
} }
} }
void LinkMemories(std::vector<std::shared_ptr<Scope>>& scopes, void LinkMemories(const std::vector<Scope*>& scopes,
const std::vector<rnn::MemoryAttr>& memories, const std::vector<rnn::MemoryAttr>& memories,
size_t step_id, size_t step_id,
int offset) { int offset) {
...@@ -94,17 +92,17 @@ void LinkMemories(std::vector<std::shared_ptr<Scope>>& scopes, ...@@ -94,17 +92,17 @@ void LinkMemories(std::vector<std::shared_ptr<Scope>>& scopes,
offset, offset,
scopes.size(), scopes.size(),
step_id); step_id);
std::shared_ptr<Scope> scope = scopes[step_id]; auto scope = scopes[step_id];
std::shared_ptr<Scope> linked_scope = scopes[step_id + offset]; auto linked_scope = scopes[step_id + offset];
for (auto& attr : memories) { for (auto& attr : memories) {
auto mem = scope->CreateVariable(attr.pre_var)->GetMutable<Tensor>(); auto mem = scope->NewVar(attr.pre_var)->GetMutable<Tensor>();
// maybe share variable is better? // maybe share variable is better?
auto linked_mem = linked_scope->GetVariable(attr.var)->GetMutable<Tensor>(); auto linked_mem = linked_scope->FindVar(attr.var)->GetMutable<Tensor>();
mem->ShareDataWith<float>(*linked_mem); mem->ShareDataWith<float>(*linked_mem);
// TODO(qingqing) remove following code // TODO(qingqing) remove following code
// the memory of current step should be allocated in step net // the memory of current step should be allocated in step net
auto m = scope->CreateVariable(attr.var)->GetMutable<Tensor>(); auto m = scope->NewVar(attr.var)->GetMutable<Tensor>();
// for unit test, as addOp and mulOp are null currently, if not // for unit test, as addOp and mulOp are null currently, if not
// mutable_data, mem.data() in output will be error. We will // mutable_data, mem.data() in output will be error. We will
// remove this line after merge the correct addOp and mulOp. // remove this line after merge the correct addOp and mulOp.
...@@ -171,8 +169,8 @@ void InitArgument(const ArgumentName& name, ...@@ -171,8 +169,8 @@ void InitArgument(const ArgumentName& name,
} // namespace rnn } // namespace rnn
void RecurrentAlgorithm::InferShape(const std::shared_ptr<Scope>& scope) const { void RecurrentAlgorithm::InferShape(const Scope& scope) const {
seq_len_ = scope->GetVariable((arg_->inlinks[0]).external) seq_len_ = scope.FindVar((arg_->inlinks[0]).external)
->GetMutable<Tensor>() ->GetMutable<Tensor>()
->dims()[0]; ->dims()[0];
CreateScopes(scope); CreateScopes(scope);
...@@ -187,10 +185,10 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr<Scope>& scope) const { ...@@ -187,10 +185,10 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr<Scope>& scope) const {
InitMemories(step_scopes[0]); InitMemories(step_scopes[0]);
PADDLE_ENFORCE(scope->HasVariable(arg_->step_net), PADDLE_ENFORCE(scope.FindVar(arg_->step_net) != nullptr,
"stepnet [%s] is not in scope.", "stepnet [%s] is not in scope.",
arg_->step_net); arg_->step_net);
Variable* net = scope->GetVariable(arg_->step_net); Variable* net = scope.FindVar(arg_->step_net);
PADDLE_ENFORCE(net != nullptr, "failed to get step net"); PADDLE_ENFORCE(net != nullptr, "failed to get step net");
// If the InferShape is called in OperatorBase's run function, // If the InferShape is called in OperatorBase's run function,
// the rnn op only needs to do InferShape for the first time step // the rnn op only needs to do InferShape for the first time step
...@@ -198,82 +196,79 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr<Scope>& scope) const { ...@@ -198,82 +196,79 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr<Scope>& scope) const {
if (i > 0) { if (i > 0) {
rnn::LinkMemories(step_scopes, arg_->memories, i, -1); rnn::LinkMemories(step_scopes, arg_->memories, i, -1);
} }
net->GetMutable<NetOp>()->InferShape(step_scopes[i]); net->GetMutable<NetOp>()->InferShape(*step_scopes[i]);
} }
auto outlinks = arg_->outlinks; auto outlinks = arg_->outlinks;
for (size_t i = 0; i < outlinks.size(); i++) { for (size_t i = 0; i < outlinks.size(); i++) {
DDim step_dims = step_scopes[0] DDim step_dims = step_scopes[0]
->GetVariable(outlinks[i].internal) ->FindVar(outlinks[i].internal)
->GetMutable<Tensor>() ->GetMutable<Tensor>()
->dims(); ->dims();
std::vector<int> dims_vec = vectorize(step_dims); std::vector<int> dims_vec = vectorize(step_dims);
// now only support fixed length // now only support fixed length
dims_vec.insert(dims_vec.begin(), seq_len_); dims_vec.insert(dims_vec.begin(), seq_len_);
Tensor* output = Tensor* output =
step_scopes[0]->GetVariable(outlinks[i].external)->GetMutable<Tensor>(); step_scopes[0]->FindVar(outlinks[i].external)->GetMutable<Tensor>();
output->Resize(make_ddim(dims_vec)); output->Resize(make_ddim(dims_vec));
} }
} }
void RecurrentAlgorithm::Run(const std::shared_ptr<Scope>& scope, void RecurrentAlgorithm::Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const { const platform::DeviceContext& dev_ctx) const {
auto step_scopes = GetStepScopes(scope); auto step_scopes = GetStepScopes(scope);
Variable* net = scope->GetVariable(arg_->step_net); Variable* net = scope.FindVar(arg_->step_net);
for (size_t step_id = 0; step_id < seq_len_; step_id++) { for (size_t step_id = 0; step_id < seq_len_; step_id++) {
// the link memory is done in InferShape // the link memory is done in InferShape
// maybe remove following code after testing // maybe remove following code after testing
if (step_id > 0) { if (step_id > 0) {
rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1); rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1);
} }
net->GetMutable<NetOp>()->Run(step_scopes[step_id], dev_ctx); net->GetMutable<NetOp>()->Run(*step_scopes[step_id], dev_ctx);
} }
rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_); rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_);
} }
void RecurrentAlgorithm::CreateScopes(std::shared_ptr<Scope> scope) const { void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
// TODO(xxx) Only two scopes are needed for inference, this case will be // TODO(xxx) Only two scopes are needed for inference, this case will be
// supported later. // supported later.
auto step_scopes = scope->GetVariable(arg_->step_scopes) auto step_scopes =
->GetMutable<std::vector<std::shared_ptr<Scope>>>(); scope.FindVar(arg_->step_scopes)->GetMutable<std::vector<Scope*>>();
if (seq_len_ > step_scopes->size()) { if (seq_len_ > step_scopes->size()) {
for (size_t i = step_scopes->size(); i < seq_len_; ++i) { for (size_t i = step_scopes->size(); i < seq_len_; ++i) {
std::shared_ptr<Scope> step_scope = std::make_shared<Scope>(scope); auto& step_scope = scope.NewScope();
// Now all variables in scope must be created outside of op. // Now all variables in scope must be created outside of op.
auto net_op = scope->GetVariable(arg_->step_net)->GetMutable<NetOp>(); auto net_op = scope.FindVar(arg_->step_net)->GetMutable<NetOp>();
for (auto& input : net_op->inputs_) { for (auto& input : net_op->inputs_) {
step_scope->CreateVariable(input); if (!step_scope.FindVar(input)) step_scope.NewVar(input);
} }
for (auto& output : net_op->outputs_) { for (auto& output : net_op->outputs_) {
step_scope->CreateVariable(output); step_scope.NewVar(output);
} }
step_scopes->push_back(std::make_shared<Scope>(step_scope)); step_scopes->emplace_back(&step_scope);
} }
} }
} }
void RecurrentAlgorithm::InitMemories(std::shared_ptr<Scope> step_scope) const { void RecurrentAlgorithm::InitMemories(Scope* step_scope) const {
for (auto& attr : arg_->memories) { for (auto& attr : arg_->memories) {
Tensor* pre_mem = Tensor* pre_mem = step_scope->NewVar(attr.pre_var)->GetMutable<Tensor>();
step_scope->CreateVariable(attr.pre_var)->GetMutable<Tensor>(); PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr,
PADDLE_ENFORCE(step_scope->HasVariable(attr.boot_var),
"memory [%s]'s boot variable [%s] not exists", "memory [%s]'s boot variable [%s] not exists",
attr.var, attr.var,
attr.boot_var); attr.boot_var);
Tensor* boot_mem = Tensor* boot_mem = step_scope->FindVar(attr.boot_var)->GetMutable<Tensor>();
step_scope->GetVariable(attr.boot_var)->GetMutable<Tensor>();
pre_mem->ShareDataWith<float>(*boot_mem); pre_mem->ShareDataWith<float>(*boot_mem);
// TODO(qingqing) remove following code // TODO(qingqing) remove following code
// the memory of current step should be allocated in step net // the memory of current step should be allocated in step net
// here for unit test // here for unit test
auto cur_step_mem = auto cur_step_mem = step_scope->NewVar(attr.var)->GetMutable<Tensor>();
step_scope->CreateVariable(attr.var)->GetMutable<Tensor>();
cur_step_mem->mutable_data<float>(boot_mem->dims(), platform::CPUPlace()); cur_step_mem->mutable_data<float>(boot_mem->dims(), platform::CPUPlace());
} }
} }
...@@ -333,72 +328,69 @@ public: ...@@ -333,72 +328,69 @@ public:
}; };
void RecurrentGradientAlgorithm::Run( void RecurrentGradientAlgorithm::Run(
const std::shared_ptr<Scope>& scope, const Scope& scope, const platform::DeviceContext& dev_ctx) const {
const platform::DeviceContext& dev_ctx) const {
auto step_scopes = GetStepScopes(scope); auto step_scopes = GetStepScopes(scope);
rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_);
PADDLE_ENFORCE(scope->HasVariable(arg_->step_net), PADDLE_ENFORCE(scope.FindVar(arg_->step_net) != nullptr,
"step net is not in scope."); "step net is not in scope.");
Variable* net = scope->GetVariable(arg_->step_net); Variable* net = scope.FindVar(arg_->step_net);
PADDLE_ENFORCE(net != nullptr, "failed to get step net"); PADDLE_ENFORCE(net != nullptr, "failed to get step net");
for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) {
if (static_cast<size_t>(step_id) != seq_len_ - 1) { if (static_cast<size_t>(step_id) != seq_len_ - 1) {
rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1); rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1);
} }
net->GetMutable<NetOp>()->Run(step_scopes[step_id], dev_ctx); net->GetMutable<NetOp>()->Run(*step_scopes[step_id], dev_ctx);
} }
LinkBootMemoryGradients(step_scopes[0]); LinkBootMemoryGradients(step_scopes[0]);
rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_); rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_);
} }
void RecurrentGradientAlgorithm::LinkBootMemoryGradients( void RecurrentGradientAlgorithm::LinkBootMemoryGradients(
std::shared_ptr<Scope> step_scope) const { Scope* step_scope) const {
for (auto& attr : arg_->memories) { for (auto& attr : arg_->memories) {
Tensor* mem_grad = Tensor* mem_grad = step_scope->NewVar(attr.var)->GetMutable<Tensor>();
step_scope->CreateVariable(attr.var)->GetMutable<Tensor>();
PADDLE_ENFORCE(mem_grad != nullptr, PADDLE_ENFORCE(mem_grad != nullptr,
"boot_tensor should be retrieved before"); "boot_tensor should be retrieved before");
PADDLE_ENFORCE(step_scope->HasVariable(attr.boot_var), PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr,
"memory [%s]'s boot variable [%s] not exists", "memory [%s]'s boot variable [%s] not exists",
attr.var, attr.var,
attr.boot_var); attr.boot_var);
Tensor* boot_mem_grad = Tensor* boot_mem_grad =
step_scope->CreateVariable(attr.boot_var)->GetMutable<Tensor>(); step_scope->NewVar(attr.boot_var)->GetMutable<Tensor>();
boot_mem_grad->ShareDataWith<float>(*mem_grad); boot_mem_grad->ShareDataWith<float>(*mem_grad);
} }
} }
void RecurrentGradientAlgorithm::InferShape( void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const {
const std::shared_ptr<Scope>& scope) const { seq_len_ = scope.FindVar((arg_->inlinks[0]).external)
seq_len_ = scope->GetVariable((arg_->inlinks[0]).external)
->GetMutable<Tensor>() ->GetMutable<Tensor>()
->dims()[0]; ->dims()[0];
auto step_scopes = GetStepScopes(scope); auto step_scopes = GetStepScopes(scope);
rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_);
PADDLE_ENFORCE(scope->HasVariable(arg_->step_net), PADDLE_ENFORCE(scope.FindVar(arg_->step_net) != nullptr,
"step net is not in scope."); "step net is not in scope.");
Variable* net = scope->GetVariable(arg_->step_net); Variable* net = scope.FindVar(arg_->step_net);
PADDLE_ENFORCE(net != nullptr, "failed to get step net"); PADDLE_ENFORCE(net != nullptr, "failed to get step net");
for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) {
if (static_cast<size_t>(step_id) != seq_len_ - 1) { if (static_cast<size_t>(step_id) != seq_len_ - 1) {
rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1); rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1);
} }
net->GetMutable<NetOp>()->InferShape(step_scopes[step_id]); net->GetMutable<NetOp>()->InferShape(*step_scopes[step_id]);
} }
auto outlinks = arg_->outlinks; auto outlinks = arg_->outlinks;
for (size_t i = 0; i < outlinks.size(); i++) { for (size_t i = 0; i < outlinks.size(); i++) {
DDim step_dims = step_scopes[0] DDim step_dims = step_scopes[0]
->GetVariable(outlinks[i].internal) ->FindVar(outlinks[i].internal)
->GetMutable<Tensor>() ->GetMutable<Tensor>()
->dims(); ->dims();
std::vector<int> dims_vec = vectorize(step_dims); std::vector<int> dims_vec = vectorize(step_dims);
// now only support fixed length // now only support fixed length
dims_vec.insert(dims_vec.begin(), seq_len_); dims_vec.insert(dims_vec.begin(), seq_len_);
Tensor* output = Tensor* output =
step_scopes[0]->GetVariable(outlinks[i].external)->GetMutable<Tensor>(); step_scopes[0]->FindVar(outlinks[i].external)->GetMutable<Tensor>();
output->Resize(make_ddim(dims_vec)); output->Resize(make_ddim(dims_vec));
} }
LinkBootMemoryGradients(step_scopes[0]); LinkBootMemoryGradients(step_scopes[0]);
......
...@@ -70,18 +70,18 @@ struct ArgumentName { ...@@ -70,18 +70,18 @@ struct ArgumentName {
/** /**
* Prepare inputs for each step net. * Prepare inputs for each step net.
*/ */
void SegmentInputs(std::vector<std::shared_ptr<Scope>>& step_scopes, void SegmentInputs(const std::vector<Scope*>& step_scopes,
const std::vector<Link>& inlinks, const std::vector<Link>& inlinks,
const size_t seq_len); const size_t seq_len);
/** /**
* Process outputs of step nets and merge to variables. * Process outputs of step nets and merge to variables.
*/ */
void ConcatOutputs(std::vector<std::shared_ptr<Scope>>& step_scopes, void ConcatOutputs(const std::vector<Scope*>& step_scopes,
const std::vector<Link>& outlinks, const std::vector<Link>& outlinks,
const size_t seq_len); const size_t seq_len);
void LinkMemories(std::vector<std::shared_ptr<Scope>>& step_scopes, void LinkMemories(const std::vector<Scope*>& step_scopes,
const std::vector<MemoryAttr>& memories, const std::vector<MemoryAttr>& memories,
size_t step_id, size_t step_id,
int offset); int offset);
...@@ -100,15 +100,14 @@ void InitArgument(const ArgumentName& name, Argument* arg); ...@@ -100,15 +100,14 @@ void InitArgument(const ArgumentName& name, Argument* arg);
class RecurrentAlgorithm { class RecurrentAlgorithm {
public: public:
void Run(const std::shared_ptr<Scope>& scope, void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const;
const platform::DeviceContext& dev_ctx) const;
void Init(std::unique_ptr<rnn::Argument> arg) { arg_ = std::move(arg); } void Init(std::unique_ptr<rnn::Argument> arg) { arg_ = std::move(arg); }
/** /**
* InferShape must be called before Run. * InferShape must be called before Run.
*/ */
void InferShape(const std::shared_ptr<Scope>& scope) const; void InferShape(const Scope& scope) const;
protected: protected:
/* /*
...@@ -117,15 +116,13 @@ protected: ...@@ -117,15 +116,13 @@ protected:
* NOTE the scopes are reused in both the forward and backward, so just * NOTE the scopes are reused in both the forward and backward, so just
* create once and expand its size if more steps need. * create once and expand its size if more steps need.
*/ */
void CreateScopes(std::shared_ptr<Scope> scope) const; void CreateScopes(const Scope& scope) const;
inline const std::vector<std::shared_ptr<Scope>>& GetStepScopes( const std::vector<Scope*>& GetStepScopes(const Scope& scope) const {
std::shared_ptr<Scope> scope) const { return *scope.FindVar(arg_->step_scopes)->GetMutable<std::vector<Scope*>>();
return *(scope->GetVariable(arg_->step_scopes))
->GetMutable<std::vector<std::shared_ptr<Scope>>>();
} }
void InitMemories(std::shared_ptr<Scope> step_scopes) const; void InitMemories(Scope* step_scopes) const;
private: private:
std::unique_ptr<rnn::Argument> arg_; std::unique_ptr<rnn::Argument> arg_;
...@@ -146,21 +143,18 @@ class RecurrentGradientAlgorithm { ...@@ -146,21 +143,18 @@ class RecurrentGradientAlgorithm {
public: public:
void Init(std::unique_ptr<rnn::Argument> arg) { arg_ = std::move(arg); } void Init(std::unique_ptr<rnn::Argument> arg) { arg_ = std::move(arg); }
void Run(const std::shared_ptr<Scope>& scope, void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const;
const platform::DeviceContext& dev_ctx) const;
void LinkBootMemoryGradients(std::shared_ptr<Scope> step_scopes) const; void LinkBootMemoryGradients(Scope* step_scopes) const;
/** /**
* InferShape must be called before Run. * InferShape must be called before Run.
*/ */
void InferShape(const std::shared_ptr<Scope>& scope) const; void InferShape(const Scope& scope) const;
protected: protected:
inline const std::vector<std::shared_ptr<Scope>>& GetStepScopes( inline const std::vector<Scope*>& GetStepScopes(const Scope& scope) const {
std::shared_ptr<Scope> scope) const { return *scope.FindVar(arg_->step_scopes)->GetMutable<std::vector<Scope*>>();
return *(scope->GetVariable(arg_->step_scopes))
->GetMutable<std::vector<std::shared_ptr<Scope>>>();
} }
private: private:
...@@ -175,11 +169,11 @@ public: ...@@ -175,11 +169,11 @@ public:
/** /**
* InferShape must be called before Run. * InferShape must be called before Run.
*/ */
virtual void InferShape(const std::shared_ptr<Scope>& scope) const override { virtual void InferShape(const Scope& scope) const override {
alg_.InferShape(scope); alg_.InferShape(scope);
} }
virtual void Run(const std::shared_ptr<Scope>& scope, virtual void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override { const platform::DeviceContext& dev_ctx) const override {
alg_.Run(scope, dev_ctx); alg_.Run(scope, dev_ctx);
} }
...@@ -197,11 +191,11 @@ public: ...@@ -197,11 +191,11 @@ public:
/** /**
* InferShape must be called before Run. * InferShape must be called before Run.
*/ */
virtual void InferShape(const std::shared_ptr<Scope>& scope) const override { virtual void InferShape(const Scope& scope) const override {
alg_.InferShape(scope); alg_.InferShape(scope);
} }
virtual void Run(const std::shared_ptr<Scope>& scope, virtual void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override { const platform::DeviceContext& dev_ctx) const override {
alg_.Run(scope, dev_ctx); alg_.Run(scope, dev_ctx);
} }
......
...@@ -34,41 +34,40 @@ protected: ...@@ -34,41 +34,40 @@ protected:
virtual void TearDown() override {} virtual void TearDown() override {}
void CreateGlobalVariables() { void CreateGlobalVariables() {
scope_ = std::make_shared<Scope>();
// create input, and init content // create input, and init content
LOG(INFO) << "create global variable x"; LOG(INFO) << "create global variable x";
for (auto inlink : std::vector<std::string>{"x", "x0", "x1", "h"}) { for (auto inlink : std::vector<std::string>{"x", "x0", "x1", "h"}) {
Variable* x = scope_->CreateVariable(inlink); Variable* x = scope_.NewVar(inlink);
DDim dims = make_ddim(std::vector<int>{ DDim dims = make_ddim(std::vector<int>{
10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); 10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace()); x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
} }
// create output alias just for test // create output alias just for test
for (auto inlink : std::vector<std::string>{"h@alias"}) { for (auto inlink : std::vector<std::string>{"h@alias"}) {
Variable* x = scope_->CreateVariable(inlink); Variable* x = scope_.NewVar(inlink);
DDim dims = DDim dims =
make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}); make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/});
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace()); x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
} }
LOG(INFO) << "create global variable w"; LOG(INFO) << "create global variable w";
Variable* w = scope_->CreateVariable("rnn/w"); Variable* w = scope_.NewVar("rnn/w");
w->GetMutable<Tensor>()->mutable_data<float>( w->GetMutable<Tensor>()->mutable_data<float>(
make_ddim(std::vector<int>{30, 30}), platform::CPUPlace()); make_ddim(std::vector<int>{30, 30}), platform::CPUPlace());
for (auto boot : std::vector<std::string>{"x_boot", "h_boot"}) { for (auto boot : std::vector<std::string>{"x_boot", "h_boot"}) {
LOG(INFO) << "create global variable " << boot; LOG(INFO) << "create global variable " << boot;
Variable* h_boot = scope_->CreateVariable(boot); Variable* h_boot = scope_.NewVar(boot);
h_boot->GetMutable<Tensor>()->mutable_data<float>( h_boot->GetMutable<Tensor>()->mutable_data<float>(
make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}), make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}),
platform::CPUPlace()); platform::CPUPlace());
} }
LOG(INFO) << "create variable step_scopes"; LOG(INFO) << "create variable step_scopes";
scope_->CreateVariable("step_scopes"); scope_.NewVar("step_scopes");
LOG(INFO) << "create variable h"; LOG(INFO) << "create variable h";
scope_->CreateVariable("h"); scope_.NewVar("h");
} }
void CreateRNNOp() { void CreateRNNOp() {
...@@ -150,7 +149,7 @@ protected: ...@@ -150,7 +149,7 @@ protected:
void CreateStepNet() { void CreateStepNet() {
LOG(INFO) << "create variable step_net"; LOG(INFO) << "create variable step_net";
Variable* var = scope_->CreateVariable("step_net"); Variable* var = scope_.NewVar("step_net");
auto net = var->GetMutable<NetOp>(); auto net = var->GetMutable<NetOp>();
// rnn/s is net's input or output? // rnn/s is net's input or output?
net->inputs_ = {"rnn/h@pre", "rnn/w", "rnn/x"}; net->inputs_ = {"rnn/h@pre", "rnn/w", "rnn/x"};
...@@ -164,7 +163,7 @@ protected: ...@@ -164,7 +163,7 @@ protected:
} }
// father scope // father scope
std::shared_ptr<Scope> scope_; Scope scope_;
std::shared_ptr<OperatorBase> rnn_op_; std::shared_ptr<OperatorBase> rnn_op_;
}; };
...@@ -191,68 +190,64 @@ protected: ...@@ -191,68 +190,64 @@ protected:
virtual void TearDown() override {} virtual void TearDown() override {}
void CreateGlobalVariables() { void CreateGlobalVariables() {
scope_ = std::make_shared<Scope>();
// inputs: x // inputs: x
LOG(INFO) << "create global variable x"; LOG(INFO) << "create global variable x";
Variable* x = scope_->CreateVariable("x"); Variable* x = scope_.NewVar("x");
DDim dims = DDim dims =
make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace()); x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
// inputs: h_boot // inputs: h_boot
LOG(INFO) << "create global variable h_boot"; LOG(INFO) << "create global variable h_boot";
Variable* h_boot = scope_->CreateVariable("h_boot"); Variable* h_boot = scope_.NewVar("h_boot");
h_boot->GetMutable<Tensor>()->mutable_data<float>( h_boot->GetMutable<Tensor>()->mutable_data<float>(
make_ddim({20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace()); make_ddim({20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace());
// inputs: w // inputs: w
LOG(INFO) << "create global variable w"; LOG(INFO) << "create global variable w";
Variable* w = scope_->CreateVariable("rnn/w"); Variable* w = scope_.NewVar("rnn/w");
w->GetMutable<Tensor>()->mutable_data<float>(make_ddim({30, 30}), w->GetMutable<Tensor>()->mutable_data<float>(make_ddim({30, 30}),
platform::CPUPlace()); platform::CPUPlace());
// inputs: h_grad // inputs: h_grad
LOG(INFO) << "create variable h_grad"; LOG(INFO) << "create variable h_grad";
Variable* dh = scope_->CreateVariable("h_grad"); Variable* dh = scope_.NewVar("h_grad");
dh->GetMutable<Tensor>()->mutable_data<float>(make_ddim({10, 20, 30}), dh->GetMutable<Tensor>()->mutable_data<float>(make_ddim({10, 20, 30}),
platform::CPUPlace()); platform::CPUPlace());
// inputs: step_scopes // inputs: step_scopes
LOG(INFO) << "create variable step_scopes"; LOG(INFO) << "create variable step_scopes";
scope_->CreateVariable("step_scopes"); scope_.NewVar("step_scopes");
// inputs: step_net // inputs: step_net
LOG(INFO) << "create variable step_net"; LOG(INFO) << "create variable step_net";
scope_->CreateVariable("step_net"); scope_.NewVar("step_net");
// outputs: w_grad // outputs: w_grad
LOG(INFO) << "create global variable w_grad"; LOG(INFO) << "create global variable w_grad";
scope_->CreateVariable("rnn/w_grad"); scope_.NewVar("rnn/w_grad");
// outputs: x_grad // outputs: x_grad
LOG(INFO) << "create global variable x_grad"; LOG(INFO) << "create global variable x_grad";
scope_->CreateVariable("x_grad"); scope_.NewVar("x_grad");
// outputs: h_boot_grad // outputs: h_boot_grad
LOG(INFO) << "create global variable h_boot_grad"; LOG(INFO) << "create global variable h_boot_grad";
scope_->CreateVariable("h_boot_grad"); scope_.NewVar("h_boot_grad");
} }
void CreateStepScopes() { void CreateStepScopes() {
std::vector<std::shared_ptr<Scope>>* step_scopes = auto step_scopes =
scope_->GetVariable("step_scopes") scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
->GetMutable<std::vector<std::shared_ptr<Scope>>>();
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
auto scope = std::make_shared<Scope>(scope_); auto& scope = scope_.NewScope();
auto pre_t = scope->CreateVariable("rnn/pre_h")->GetMutable<Tensor>(); auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable<Tensor>();
pre_t->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace()); pre_t->mutable_data<float>({20, 30}, platform::CPUPlace());
auto tensor = scope->CreateVariable("rnn/h")->GetMutable<Tensor>(); auto tensor = scope.NewVar("rnn/h")->GetMutable<Tensor>();
tensor->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace()); tensor->mutable_data<float>({20, 30}, platform::CPUPlace());
// for unit test of ConcatOutputs // for unit test of ConcatOutputs
auto xg = scope->CreateVariable("rnn/x_grad")->GetMutable<Tensor>(); auto xg = scope.NewVar("rnn/x_grad")->GetMutable<Tensor>();
xg->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace()); xg->mutable_data<float>({20, 30}, platform::CPUPlace());
step_scopes->push_back(scope); step_scopes->emplace_back(&scope);
} }
// last time step // last time step
auto g = (*step_scopes)[9] auto g = (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable<Tensor>();
->CreateVariable("rnn/h_pre_grad") g->mutable_data<float>({20, 30}, platform::CPUPlace());
->GetMutable<Tensor>();
g->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace());
} }
void CreateRNNGradientAlgorithm() { void CreateRNNGradientAlgorithm() {
...@@ -280,7 +275,7 @@ protected: ...@@ -280,7 +275,7 @@ protected:
void CreateStepNet() { void CreateStepNet() {
LOG(INFO) << "create variable step_net"; LOG(INFO) << "create variable step_net";
Variable* var = scope_->CreateVariable("step_net"); Variable* var = scope_.NewVar("step_net");
auto net = var->GetMutable<NetOp>(); auto net = var->GetMutable<NetOp>();
net->AddOp(OpRegistry::CreateOp("mul", net->AddOp(OpRegistry::CreateOp("mul",
{"rnn/h_pre", "rnn/w", "rnn/s_grad"}, {"rnn/h_pre", "rnn/w", "rnn/s_grad"},
...@@ -300,9 +295,8 @@ protected: ...@@ -300,9 +295,8 @@ protected:
rnn::Link inlink; rnn::Link inlink;
inlink.external = "x"; inlink.external = "x";
inlink.internal = "rnn/x"; inlink.internal = "rnn/x";
std::vector<std::shared_ptr<Scope>>* step_scopes = auto step_scopes =
scope_->GetVariable("step_scopes") scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
->GetMutable<std::vector<std::shared_ptr<Scope>>>();
rnn::SegmentInputs(*step_scopes, std::vector<rnn::Link>{inlink}, 10); rnn::SegmentInputs(*step_scopes, std::vector<rnn::Link>{inlink}, 10);
} }
...@@ -314,15 +308,14 @@ protected: ...@@ -314,15 +308,14 @@ protected:
mem_attr.boot_var = "boot_h"; mem_attr.boot_var = "boot_h";
std::vector<rnn::MemoryAttr> memories; std::vector<rnn::MemoryAttr> memories;
memories.push_back(mem_attr); memories.push_back(mem_attr);
std::vector<std::shared_ptr<Scope>>* step_scopes = auto step_scopes =
scope_->GetVariable("step_scopes") scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
->GetMutable<std::vector<std::shared_ptr<Scope>>>();
for (int i = 1; i < 10; ++i) { for (int i = 1; i < 10; ++i) {
rnn::LinkMemories(*step_scopes, memories, i, -1); rnn::LinkMemories(*step_scopes, memories, i, -1);
} }
} }
std::shared_ptr<Scope> scope_; Scope scope_;
RecurrentGradientAlgorithm rnn_grad_algo_; RecurrentGradientAlgorithm rnn_grad_algo_;
}; };
...@@ -341,14 +334,14 @@ TEST(RecurrentOp, LinkMemories) { ...@@ -341,14 +334,14 @@ TEST(RecurrentOp, LinkMemories) {
// create and init step scopes // create and init step scopes
int len = 10; int len = 10;
std::vector<std::shared_ptr<Scope>> step_scopes; std::vector<Scope*> step_scopes;
for (int i = 0; i < len; ++i) { for (int i = 0; i < len; ++i) {
auto scope = std::make_shared<Scope>(); auto scope = new Scope();
scope->CreateVariable("pre_h"); scope->NewVar("pre_h");
auto tensor = scope->CreateVariable("h")->GetMutable<Tensor>(); auto tensor = scope->NewVar("h")->GetMutable<Tensor>();
float* data = tensor->mutable_data<float>(make_ddim({15, 20}), CPUPlace()); float* data = tensor->mutable_data<float>({15, 20}, CPUPlace());
for (int i = 0; i < 15 * 20; ++i) { for (int j = 0; j < 15 * 20; ++j) {
data[i] = rand() * (1. / (double)RAND_MAX); data[j] = rand() * (1. / (double)RAND_MAX);
} }
step_scopes.push_back(scope); step_scopes.push_back(scope);
} }
...@@ -367,9 +360,9 @@ TEST(RecurrentOp, LinkMemories) { ...@@ -367,9 +360,9 @@ TEST(RecurrentOp, LinkMemories) {
// check // check
for (int i = 0; i < len - 1; ++i) { for (int i = 0; i < len - 1; ++i) {
const float* a = const float* a =
step_scopes[i]->GetVariable("h")->GetMutable<Tensor>()->data<float>(); step_scopes[i]->FindVar("h")->GetMutable<Tensor>()->data<float>();
const float* b = step_scopes[i + 1] const float* b = step_scopes[i + 1]
->GetVariable("pre_h") ->FindVar("pre_h")
->GetMutable<Tensor>() ->GetMutable<Tensor>()
->data<float>(); ->data<float>();
for (size_t i = 0; i < 15 * 20; ++i) { for (size_t i = 0; i < 15 * 20; ++i) {
...@@ -382,19 +375,25 @@ TEST(RecurrentOp, LinkMemories) { ...@@ -382,19 +375,25 @@ TEST(RecurrentOp, LinkMemories) {
} }
// check // check
for (int i = len - 2; i >= 0; --i) { for (int i = len - 2; i >= 0; --i) {
const float* a = step_scopes[i] const float* a =
->GetVariable("pre_h") step_scopes[i]->FindVar("pre_h")->GetMutable<Tensor>()->data<float>();
->GetMutable<Tensor>() const float* b =
->data<float>(); step_scopes[i + 1]->FindVar("h")->GetMutable<Tensor>()->data<float>();
const float* b = step_scopes[i + 1]
->GetVariable("h")
->GetMutable<Tensor>()
->data<float>();
for (size_t i = 0; i < 15 * 20; ++i) { for (size_t i = 0; i < 15 * 20; ++i) {
ASSERT_FLOAT_EQ(a[i], b[i]); ASSERT_FLOAT_EQ(a[i], b[i]);
} }
} }
for (auto s : step_scopes) {
delete s;
}
} }
USE_OP(add_two); USE_OP(add_two);
USE_OP(mul); USE_OP(mul);
// int main() {
// //! TODO(yuyang18): Temporary disable this unit-test because implementation
// //! error.
// return 0;
//}
\ No newline at end of file
...@@ -25,6 +25,10 @@ using OpKernel = framework::OpKernel; ...@@ -25,6 +25,10 @@ using OpKernel = framework::OpKernel;
using InferShapeContext = framework::InferShapeContext; using InferShapeContext = framework::InferShapeContext;
using ExecutionContext = framework::ExecutionContext; using ExecutionContext = framework::ExecutionContext;
using Variable = framework::Variable; using Variable = framework::Variable;
template <typename T,
int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenScalar = framework::EigenScalar<T, MajorType, IndexType>;
template <typename T, template <typename T,
int MajorType = Eigen::RowMajor, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex> typename IndexType = Eigen::DenseIndex>
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once #pragma once
#include <glog/logging.h>
#include <paddle/string/printf.h> #include <paddle/string/printf.h>
#include <sstream> #include <sstream>
#include <stdexcept> #include <stdexcept>
......
...@@ -102,15 +102,18 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -102,15 +102,18 @@ All parameter, weight, gradient are variables in Paddle.
}, },
py::return_value_policy::reference); py::return_value_policy::reference);
py::class_<pd::Scope, std::shared_ptr<pd::Scope>>(m, "Scope") py::class_<pd::Scope>(m, "Scope", "")
.def(py::init<const std::shared_ptr<pd::Scope>&>()) .def("new_var",
.def("get_var", [](pd::Scope& self, const std::string& name) -> pd::Variable* {
&pd::Scope::GetVariable, return self.NewVar(name);
},
py::return_value_policy::reference) py::return_value_policy::reference)
.def("create_var", .def("find_var", &pd::Scope::FindVar, py::return_value_policy::reference)
&pd::Scope::CreateVariable, .def(py::init<>())
.def("new_scope",
[](pd::Scope& self) -> pd::Scope* { return &self.NewScope(); },
py::return_value_policy::reference) py::return_value_policy::reference)
.def("get_var_name", &pd::Scope::GetVariableName); .def("drop_kids", &pd::Scope::DropKids);
//! @note: Be careful! PyBind will return std::string as an unicode, not //! @note: Be careful! PyBind will return std::string as an unicode, not
//! Python str. If you want a str object, you should cast them in Python. //! Python str. If you want a str object, you should cast them in Python.
......
...@@ -5,7 +5,7 @@ Default scope function. ...@@ -5,7 +5,7 @@ Default scope function.
thread-local stack of Scope. Top of that stack is current scope, the bottom thread-local stack of Scope. Top of that stack is current scope, the bottom
of that stack is all scopes' parent. of that stack is all scopes' parent.
Invoking `create_var/get_var` can `create/get` variable in current scope. Invoking `new_var/find_var` can `new/find` variable in current scope.
Invoking `enter_local_scope/leave_local_scope` can create or destroy local Invoking `enter_local_scope/leave_local_scope` can create or destroy local
scope. scope.
...@@ -19,8 +19,8 @@ import threading ...@@ -19,8 +19,8 @@ import threading
__tl_scope__ = threading.local() __tl_scope__ = threading.local()
__all__ = [ __all__ = [
'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'create_var', 'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'new_var',
'get_var', 'scoped_function' 'find_var', 'scoped_function'
] ]
...@@ -33,7 +33,7 @@ def get_cur_scope(): ...@@ -33,7 +33,7 @@ def get_cur_scope():
if cur_scope_stack is None: if cur_scope_stack is None:
__tl_scope__.cur_scope = list() __tl_scope__.cur_scope = list()
if len(__tl_scope__.cur_scope) == 0: if len(__tl_scope__.cur_scope) == 0:
__tl_scope__.cur_scope.append(paddle.v2.framework.core.Scope(None)) __tl_scope__.cur_scope.append(paddle.v2.framework.core.Scope())
return __tl_scope__.cur_scope[-1] return __tl_scope__.cur_scope[-1]
...@@ -42,7 +42,7 @@ def enter_local_scope(): ...@@ -42,7 +42,7 @@ def enter_local_scope():
Enter a new local scope Enter a new local scope
""" """
cur_scope = get_cur_scope() cur_scope = get_cur_scope()
new_scope = paddle.v2.framework.core.Scope(cur_scope) new_scope = cur_scope.new_scope()
__tl_scope__.cur_scope.append(new_scope) __tl_scope__.cur_scope.append(new_scope)
...@@ -51,20 +51,21 @@ def leave_local_scope(): ...@@ -51,20 +51,21 @@ def leave_local_scope():
Leave local scope Leave local scope
""" """
__tl_scope__.cur_scope.pop() __tl_scope__.cur_scope.pop()
get_cur_scope().drop_kids()
def create_var(name): def new_var(name):
""" """
create variable in current scope. create variable in current scope.
""" """
return get_cur_scope().create_var(name) return get_cur_scope().new_var(name)
def get_var(name): def find_var(name):
""" """
get variable in current scope. get variable in current scope.
""" """
return get_cur_scope().get_var(name) return get_cur_scope().find_var(name)
def scoped_function(func): def scoped_function(func):
......
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
from paddle.v2.framework.create_op_creation_methods import op_creations from paddle.v2.framework.create_op_creation_methods import op_creations
from default_scope_funcs import create_var, get_var, get_cur_scope from default_scope_funcs import new_var, find_var, get_cur_scope
__all__ = ['Network'] # Only expose Network __all__ = ['Network'] # Only expose Network
...@@ -29,12 +29,15 @@ class NetworkFunctor(object): ...@@ -29,12 +29,15 @@ class NetworkFunctor(object):
if ipt in kwargs: if ipt in kwargs:
var = kwargs[ipt] var = kwargs[ipt]
if isinstance(var, basestring): if isinstance(var, basestring):
var = create_var(var) tmp = new_var(var)
self.net.var_names[tmp] = var
var = tmp
if not isinstance(var, core.Variable): if not isinstance(var, core.Variable):
raise TypeError( raise TypeError(
"Input of op creation must be string or variable") "Input of op creation must be string or variable")
kwargs[ipt] = get_cur_scope().get_var_name(var) kwargs[ipt] = self.net.var_names[var]
notemp_outputs = self.func.all_not_temp_output_args notemp_outputs = self.func.all_not_temp_output_args
...@@ -49,17 +52,20 @@ class NetworkFunctor(object): ...@@ -49,17 +52,20 @@ class NetworkFunctor(object):
if opt in kwargs: if opt in kwargs:
var = kwargs[opt] var = kwargs[opt]
if isinstance(var, basestring): if isinstance(var, basestring):
var = create_var(var) tmp = new_var(var)
self.net.var_names[tmp] = var
var = tmp
if not isinstance(var, core.Variable): if not isinstance(var, core.Variable):
raise TypeError( raise TypeError(
"Output of op creation must be string or variable") "Output of op creation must be string or variable")
kwargs[opt] = get_cur_scope().get_var_name(var) kwargs[opt] = self.net.var_names[var]
op = self.func(**kwargs) op = self.func(**kwargs)
self.net.net.add_op(op) self.net.net.add_op(op)
lst = [get_var(kwargs[opt]) for opt in notemp_outputs] lst = [find_var(kwargs[opt]) for opt in notemp_outputs]
if len(lst) == 1: if len(lst) == 1:
return lst[0] return lst[0]
elif len(lst) == 0: elif len(lst) == 0:
...@@ -89,6 +95,7 @@ class Network(object): ...@@ -89,6 +95,7 @@ class Network(object):
self.net = core.Net.create() self.net = core.Net.create()
funcs = (func_name for func_name in dir(op_creations) funcs = (func_name for func_name in dir(op_creations)
if not func_name.startswith("__")) if not func_name.startswith("__"))
self.var_names = dict()
# TODO(yuyang18): This code can work, but do not generate a good # TODO(yuyang18): This code can work, but do not generate a good
# docstring, try to give a better way generate function in runtime # docstring, try to give a better way generate function in runtime
......
...@@ -24,13 +24,13 @@ class OpTestMeta(type): ...@@ -24,13 +24,13 @@ class OpTestMeta(type):
func = getattr(creation.op_creations, self.type, None) func = getattr(creation.op_creations, self.type, None)
self.assertIsNotNone(func) self.assertIsNotNone(func)
scope = core.Scope(None) scope = core.Scope()
kwargs = dict() kwargs = dict()
for in_name in func.all_input_args: for in_name in func.all_input_args:
if hasattr(self, in_name): if hasattr(self, in_name):
kwargs[in_name] = in_name kwargs[in_name] = in_name
var = scope.create_var(in_name).get_tensor() var = scope.new_var(in_name).get_tensor()
arr = getattr(self, in_name) arr = getattr(self, in_name)
var.set_dims(arr.shape) var.set_dims(arr.shape)
var.set(arr) var.set(arr)
...@@ -40,7 +40,7 @@ class OpTestMeta(type): ...@@ -40,7 +40,7 @@ class OpTestMeta(type):
for out_name in func.all_output_args: for out_name in func.all_output_args:
if hasattr(self, out_name): if hasattr(self, out_name):
kwargs[out_name] = out_name kwargs[out_name] = out_name
scope.create_var(out_name).get_tensor() scope.new_var(out_name).get_tensor()
for attr_name in func.all_attr_args: for attr_name in func.all_attr_args:
if hasattr(self, attr_name): if hasattr(self, attr_name):
...@@ -54,7 +54,7 @@ class OpTestMeta(type): ...@@ -54,7 +54,7 @@ class OpTestMeta(type):
op.run(scope, ctx) op.run(scope, ctx)
for out_name in func.all_output_args: for out_name in func.all_output_args:
actual = numpy.array(scope.get_var(out_name).get_tensor()) actual = numpy.array(scope.find_var(out_name).get_tensor())
expect = getattr(self, out_name) expect = getattr(self, out_name)
# TODO(qijun) The default decimal is 7, but numpy.dot and eigen.mul # TODO(qijun) The default decimal is 7, but numpy.dot and eigen.mul
# has some diff, and could not pass unittest. So I set decimal 3 here. # has some diff, and could not pass unittest. So I set decimal 3 here.
......
...@@ -7,19 +7,19 @@ class TestDefaultScopeFuncs(unittest.TestCase): ...@@ -7,19 +7,19 @@ class TestDefaultScopeFuncs(unittest.TestCase):
self.assertIsNotNone(get_cur_scope()) self.assertIsNotNone(get_cur_scope())
def test_none_variable(self): def test_none_variable(self):
self.assertIsNone(get_var("test")) self.assertIsNone(find_var("test"))
def test_create_var_get_var(self): def test_create_var_get_var(self):
var_a = create_var("var_a") var_a = new_var("var_a")
self.assertIsNotNone(var_a) self.assertIsNotNone(var_a)
self.assertIsNotNone(get_cur_scope().get_var('var_a')) self.assertIsNotNone(get_cur_scope().find_var('var_a'))
enter_local_scope() enter_local_scope()
self.assertIsNotNone(get_cur_scope().get_var('var_a')) self.assertIsNotNone(get_cur_scope().find_var('var_a'))
leave_local_scope() leave_local_scope()
def test_var_get_int(self): def test_var_get_int(self):
def __new_scope__(): def __new_scope__():
i = create_var("var_i") i = new_var("var_i")
self.assertFalse(i.is_int()) self.assertFalse(i.is_int())
i.set_int(10) i.set_int(10)
self.assertTrue(i.is_int()) self.assertTrue(i.is_int())
......
...@@ -6,13 +6,13 @@ import paddle.v2.framework.create_op_creation_methods as creation ...@@ -6,13 +6,13 @@ import paddle.v2.framework.create_op_creation_methods as creation
class TestFc(unittest.TestCase): class TestFc(unittest.TestCase):
def test_fc(self): def test_fc(self):
scope = core.Scope(None) scope = core.Scope()
x = scope.create_var("X") x = scope.new_var("X")
x_tensor = x.get_tensor() x_tensor = x.get_tensor()
x_tensor.set_dims([1000, 784]) x_tensor.set_dims([1000, 784])
x_tensor.alloc_float() x_tensor.alloc_float()
w = scope.create_var("W") w = scope.new_var("W")
w_tensor = w.get_tensor() w_tensor = w.get_tensor()
w_tensor.set_dims([784, 100]) w_tensor.set_dims([784, 100])
w_tensor.alloc_float() w_tensor.alloc_float()
...@@ -25,10 +25,10 @@ class TestFc(unittest.TestCase): ...@@ -25,10 +25,10 @@ class TestFc(unittest.TestCase):
op = creation.op_creations.fc(X="X", Y="Y", W="W") op = creation.op_creations.fc(X="X", Y="Y", W="W")
for out in op.outputs(): for out in op.outputs():
if scope.get_var(out) is None: if scope.find_var(out) is None:
scope.create_var(out).get_tensor() scope.new_var(out).get_tensor()
tensor = scope.get_var("Y").get_tensor() tensor = scope.find_var("Y").get_tensor()
op.infer_shape(scope) op.infer_shape(scope)
self.assertEqual([1000, 100], tensor.shape()) self.assertEqual([1000, 100], tensor.shape())
......
...@@ -5,29 +5,29 @@ import unittest ...@@ -5,29 +5,29 @@ import unittest
class TestScope(unittest.TestCase): class TestScope(unittest.TestCase):
def test_create_destroy(self): def test_create_destroy(self):
paddle_c = paddle.v2.framework.core paddle_c = paddle.v2.framework.core
scope = paddle_c.Scope(None) scope = paddle_c.Scope()
self.assertIsNotNone(scope) self.assertIsNotNone(scope)
scope_with_parent = paddle_c.Scope(scope) scope_with_parent = scope.new_scope()
self.assertIsNotNone(scope_with_parent) self.assertIsNotNone(scope_with_parent)
def test_none_variable(self): def test_none_variable(self):
paddle_c = paddle.v2.framework.core paddle_c = paddle.v2.framework.core
scope = paddle_c.Scope(None) scope = paddle_c.Scope()
self.assertIsNone(scope.get_var("test")) self.assertIsNone(scope.find_var("test"))
def test_create_var_get_var(self): def test_create_var_get_var(self):
paddle_c = paddle.v2.framework.core paddle_c = paddle.v2.framework.core
scope = paddle_c.Scope(None) scope = paddle_c.Scope()
var_a = scope.create_var("var_a") var_a = scope.new_var("var_a")
self.assertIsNotNone(var_a) self.assertIsNotNone(var_a)
self.assertIsNotNone(scope.get_var('var_a')) self.assertIsNotNone(scope.find_var('var_a'))
scope2 = paddle_c.Scope(scope) scope2 = scope.new_scope()
self.assertIsNotNone(scope2.get_var('var_a')) self.assertIsNotNone(scope2.find_var('var_a'))
def test_var_get_int(self): def test_var_get_int(self):
paddle_c = paddle.v2.framework.core paddle_c = paddle.v2.framework.core
scope = paddle_c.Scope(None) scope = paddle_c.Scope()
var = scope.create_var("test_int") var = scope.new_var("test_int")
var.set_int(10) var.set_int(10)
self.assertTrue(var.is_int()) self.assertTrue(var.is_int())
self.assertEqual(10, var.get_int()) self.assertEqual(10, var.get_int())
......
...@@ -5,8 +5,8 @@ import numpy ...@@ -5,8 +5,8 @@ import numpy
class TestScope(unittest.TestCase): class TestScope(unittest.TestCase):
def test_int_tensor(self): def test_int_tensor(self):
scope = core.Scope(None) scope = core.Scope()
var = scope.create_var("test_tensor") var = scope.new_var("test_tensor")
tensor = var.get_tensor() tensor = var.get_tensor()
tensor.set_dims([1000, 784]) tensor.set_dims([1000, 784])
...@@ -23,8 +23,8 @@ class TestScope(unittest.TestCase): ...@@ -23,8 +23,8 @@ class TestScope(unittest.TestCase):
self.assertEqual(2.0, tensor_array_2[19, 11]) self.assertEqual(2.0, tensor_array_2[19, 11])
def test_float_tensor(self): def test_float_tensor(self):
scope = core.Scope(None) scope = core.Scope()
var = scope.create_var("test_tensor") var = scope.new_var("test_tensor")
tensor = var.get_tensor() tensor = var.get_tensor()
tensor.set_dims([1000, 784]) tensor.set_dims([1000, 784])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册