提交 ae3a9c48 编写于 作者: Y Yi Wang

Add throw_on_error specialization for T*; Update all usages of Scope

上级 f3ff790b
...@@ -37,8 +37,8 @@ Scope is an association of a name to variable. All variables belong to `Scope`. ...@@ -37,8 +37,8 @@ Scope is an association of a name to variable. All variables belong to `Scope`.
```cpp ```cpp
class Scope { class Scope {
public: public:
Variable* CreateVariable(const std::string& name); Variable* NewVar(const std::string& name);
const Variable* GetVariable(const std::string& name) const; const Variable* FindVar(const std::string& name) const;
private: private:
std::unordered_map<std::string, std::unique_ptr<Variable>> vars_; std::unordered_map<std::string, std::unique_ptr<Variable>> vars_;
...@@ -58,12 +58,12 @@ class Scope { ...@@ -58,12 +58,12 @@ class Scope {
public: public:
Scope(const std::shared_ptr<Scope>& scope): parent_(scope) {} Scope(const std::shared_ptr<Scope>& scope): parent_(scope) {}
Variable* GetVariable(const std::string& name) const { Variable* FindVar(const std::string& name) const {
auto it = vars_.find(name); auto it = vars_.find(name);
if (it != vars_.end()) { if (it != vars_.end()) {
return it->second.get(); return it->second.get();
} else if (parent_ != nullptr) { } else if (parent_ != nullptr) {
return parent_->GetVariable(name); return parent_->FindVar(name);
} else { } else {
return nullptr; return nullptr;
} }
...@@ -95,10 +95,10 @@ class Scope { ...@@ -95,10 +95,10 @@ class Scope {
static std::shared_ptr<Scope> Create(const std::shared_ptr<Scope>& parent = nullptr); static std::shared_ptr<Scope> Create(const std::shared_ptr<Scope>& parent = nullptr);
// return nullptr if not found. // return nullptr if not found.
Variable* GetVariable(const std::string& name) const; Variable* FindVar(const std::string& name) const;
// return if already contains same name variable. // return if already contains same name variable.
Variable* CreateVariable(const std::string& name); Variable* NewVar(const std::string& name);
private: private:
std::shared_ptr<Scope> parent_; std::shared_ptr<Scope> parent_;
...@@ -107,11 +107,11 @@ class Scope { ...@@ -107,11 +107,11 @@ class Scope {
``` ```
## Only scope can create a variable ## Only scope can create a variable
To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `CreateVariable` can construct `Variable`. To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `NewVar` can construct `Variable`.
## When scope destroyed, all variables inside this scope should be destroyed together ## When scope destroyed, all variables inside this scope should be destroyed together
The scope hold unique pointers for all variables. User can `GetVariable` from scope, but he should not hold this pointer as a member variable. Because when scope is destroyed, all variables inside this scope will be destroyed together. The scope hold unique pointers for all variables. User can `FindVar` from scope, but he should not hold this pointer as a member variable. Because when scope is destroyed, all variables inside this scope will be destroyed together.
## Sharing a parent scope ## Sharing a parent scope
...@@ -121,4 +121,4 @@ Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shar ...@@ -121,4 +121,4 @@ Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shar
## Orthogonal interface ## Orthogonal interface
`GetVariable` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `CreateVariable` will return a `Error` when there is a name conflict locally. Combine `GetVariable` and `CreateVariable`, we can implement `CreateOrGetVariable` easily. `FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `NewVar` will return a `Error` when there is a name conflict locally. Combine `FindVar` and `NewVar`, we can implement `NewVar` easily.
...@@ -33,15 +33,14 @@ void SegmentInputs(std::vector<std::shared_ptr<Scope>>& step_scopes, ...@@ -33,15 +33,14 @@ void SegmentInputs(std::vector<std::shared_ptr<Scope>>& step_scopes,
PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided."); PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided.");
for (size_t i = 0; i < inlinks.size(); ++i) { for (size_t i = 0; i < inlinks.size(); ++i) {
Tensor* input = Tensor* input =
step_scopes[0]->GetVariable(inlinks[i].external)->GetMutable<Tensor>(); step_scopes[0]->FindVar(inlinks[i].external)->GetMutable<Tensor>();
DDim dims = input->dims(); DDim dims = input->dims();
PADDLE_ENFORCE(static_cast<size_t>(dims[0]) == seq_len, PADDLE_ENFORCE(static_cast<size_t>(dims[0]) == seq_len,
"all the inlinks must have same length"); "all the inlinks must have same length");
DDim step_dims = slice_ddim(dims, 1, dims.size()); DDim step_dims = slice_ddim(dims, 1, dims.size());
for (size_t j = 0; j < seq_len; j++) { for (size_t j = 0; j < seq_len; j++) {
Tensor* step_input = step_scopes[j] Tensor* step_input =
->CreateVariable(inlinks[i].internal) step_scopes[j]->NewVar(inlinks[i].internal)->GetMutable<Tensor>();
->GetMutable<Tensor>();
*step_input = input->Slice<float>(j, j + 1); *step_input = input->Slice<float>(j, j + 1);
step_input->Resize(step_dims); step_input->Resize(step_dims);
} }
...@@ -53,12 +52,12 @@ void ConcatOutputs(std::vector<std::shared_ptr<Scope>>& step_scopes, ...@@ -53,12 +52,12 @@ void ConcatOutputs(std::vector<std::shared_ptr<Scope>>& step_scopes,
const size_t seq_len) { const size_t seq_len) {
for (size_t i = 0; i < outlinks.size(); i++) { for (size_t i = 0; i < outlinks.size(); i++) {
Tensor* output = Tensor* output =
step_scopes[0]->GetVariable(outlinks[i].external)->GetMutable<Tensor>(); step_scopes[0]->FindVar(outlinks[i].external)->GetMutable<Tensor>();
// TODO(qingiqng) remove following code after adding // TODO(qingiqng) remove following code after adding
// InferShape in RecurrentGradientOp // InferShape in RecurrentGradientOp
DDim step_dims = step_scopes[0] DDim step_dims = step_scopes[0]
->GetVariable(outlinks[i].internal) ->FindVar(outlinks[i].internal)
->GetMutable<Tensor>() ->GetMutable<Tensor>()
->dims(); ->dims();
std::vector<int> dims_vec = vectorize(step_dims); std::vector<int> dims_vec = vectorize(step_dims);
...@@ -66,9 +65,8 @@ void ConcatOutputs(std::vector<std::shared_ptr<Scope>>& step_scopes, ...@@ -66,9 +65,8 @@ void ConcatOutputs(std::vector<std::shared_ptr<Scope>>& step_scopes,
output->mutable_data<float>(make_ddim(dims_vec), platform::CPUPlace()); output->mutable_data<float>(make_ddim(dims_vec), platform::CPUPlace());
for (size_t j = 0; j < seq_len; j++) { for (size_t j = 0; j < seq_len; j++) {
Tensor* step_output = step_scopes[j] Tensor* step_output =
->GetVariable(outlinks[i].internal) step_scopes[j]->FindVar(outlinks[i].internal)->GetMutable<Tensor>();
->GetMutable<Tensor>();
// TODO(luotao02) data type and platform::DeviceContext() should set // TODO(luotao02) data type and platform::DeviceContext() should set
// correctly // correctly
(output->Slice<float>(j, j + 1)) (output->Slice<float>(j, j + 1))
...@@ -97,14 +95,14 @@ void LinkMemories(std::vector<std::shared_ptr<Scope>>& scopes, ...@@ -97,14 +95,14 @@ void LinkMemories(std::vector<std::shared_ptr<Scope>>& scopes,
std::shared_ptr<Scope> scope = scopes[step_id]; std::shared_ptr<Scope> scope = scopes[step_id];
std::shared_ptr<Scope> linked_scope = scopes[step_id + offset]; std::shared_ptr<Scope> linked_scope = scopes[step_id + offset];
for (auto& attr : memories) { for (auto& attr : memories) {
auto mem = scope->CreateVariable(attr.pre_var)->GetMutable<Tensor>(); auto mem = scope->NewVar(attr.pre_var)->GetMutable<Tensor>();
// maybe share variable is better? // maybe share variable is better?
auto linked_mem = linked_scope->GetVariable(attr.var)->GetMutable<Tensor>(); auto linked_mem = linked_scope->FindVar(attr.var)->GetMutable<Tensor>();
mem->ShareDataWith<float>(*linked_mem); mem->ShareDataWith<float>(*linked_mem);
// TODO(qingqing) remove following code // TODO(qingqing) remove following code
// the memory of current step should be allocated in step net // the memory of current step should be allocated in step net
auto m = scope->CreateVariable(attr.var)->GetMutable<Tensor>(); auto m = scope->NewVar(attr.var)->GetMutable<Tensor>();
// for unit test, as addOp and mulOp are null currently, if not // for unit test, as addOp and mulOp are null currently, if not
// mutable_data, mem.data() in output will be error. We will // mutable_data, mem.data() in output will be error. We will
// remove this line after merge the correct addOp and mulOp. // remove this line after merge the correct addOp and mulOp.
...@@ -172,7 +170,7 @@ void InitArgument(const ArgumentName& name, ...@@ -172,7 +170,7 @@ void InitArgument(const ArgumentName& name,
} // namespace rnn } // namespace rnn
void RecurrentAlgorithm::InferShape(const std::shared_ptr<Scope>& scope) const { void RecurrentAlgorithm::InferShape(const std::shared_ptr<Scope>& scope) const {
seq_len_ = scope->GetVariable((arg_->inlinks[0]).external) seq_len_ = scope->FindVar((arg_->inlinks[0]).external)
->GetMutable<Tensor>() ->GetMutable<Tensor>()
->dims()[0]; ->dims()[0];
CreateScopes(scope); CreateScopes(scope);
...@@ -187,10 +185,10 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr<Scope>& scope) const { ...@@ -187,10 +185,10 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr<Scope>& scope) const {
InitMemories(step_scopes[0]); InitMemories(step_scopes[0]);
PADDLE_ENFORCE(scope->HasVariable(arg_->step_net), PADDLE_ENFORCE(scope->FindVar(arg_->step_net),
"stepnet [%s] is not in scope.", "stepnet [%s] is not in scope.",
arg_->step_net); arg_->step_net);
Variable* net = scope->GetVariable(arg_->step_net); Variable* net = scope->FindVar(arg_->step_net);
PADDLE_ENFORCE(net != nullptr, "failed to get step net"); PADDLE_ENFORCE(net != nullptr, "failed to get step net");
// If the InferShape is called in OperatorBase's run function, // If the InferShape is called in OperatorBase's run function,
// the rnn op only needs to do InferShape for the first time step // the rnn op only needs to do InferShape for the first time step
...@@ -204,14 +202,14 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr<Scope>& scope) const { ...@@ -204,14 +202,14 @@ void RecurrentAlgorithm::InferShape(const std::shared_ptr<Scope>& scope) const {
auto outlinks = arg_->outlinks; auto outlinks = arg_->outlinks;
for (size_t i = 0; i < outlinks.size(); i++) { for (size_t i = 0; i < outlinks.size(); i++) {
DDim step_dims = step_scopes[0] DDim step_dims = step_scopes[0]
->GetVariable(outlinks[i].internal) ->FindVar(outlinks[i].internal)
->GetMutable<Tensor>() ->GetMutable<Tensor>()
->dims(); ->dims();
std::vector<int> dims_vec = vectorize(step_dims); std::vector<int> dims_vec = vectorize(step_dims);
// now only support fixed length // now only support fixed length
dims_vec.insert(dims_vec.begin(), seq_len_); dims_vec.insert(dims_vec.begin(), seq_len_);
Tensor* output = Tensor* output =
step_scopes[0]->GetVariable(outlinks[i].external)->GetMutable<Tensor>(); step_scopes[0]->FindVar(outlinks[i].external)->GetMutable<Tensor>();
output->Resize(make_ddim(dims_vec)); output->Resize(make_ddim(dims_vec));
} }
} }
...@@ -220,7 +218,7 @@ void RecurrentAlgorithm::Run(const std::shared_ptr<Scope>& scope, ...@@ -220,7 +218,7 @@ void RecurrentAlgorithm::Run(const std::shared_ptr<Scope>& scope,
const platform::DeviceContext& dev_ctx) const { const platform::DeviceContext& dev_ctx) const {
auto step_scopes = GetStepScopes(scope); auto step_scopes = GetStepScopes(scope);
Variable* net = scope->GetVariable(arg_->step_net); Variable* net = scope->FindVar(arg_->step_net);
for (size_t step_id = 0; step_id < seq_len_; step_id++) { for (size_t step_id = 0; step_id < seq_len_; step_id++) {
// the link memory is done in InferShape // the link memory is done in InferShape
// maybe remove following code after testing // maybe remove following code after testing
...@@ -236,7 +234,7 @@ void RecurrentAlgorithm::Run(const std::shared_ptr<Scope>& scope, ...@@ -236,7 +234,7 @@ void RecurrentAlgorithm::Run(const std::shared_ptr<Scope>& scope,
void RecurrentAlgorithm::CreateScopes(std::shared_ptr<Scope> scope) const { void RecurrentAlgorithm::CreateScopes(std::shared_ptr<Scope> scope) const {
// TODO(xxx) Only two scopes are needed for inference, this case will be // TODO(xxx) Only two scopes are needed for inference, this case will be
// supported later. // supported later.
auto step_scopes = scope->GetVariable(arg_->step_scopes) auto step_scopes = scope->FindVar(arg_->step_scopes)
->GetMutable<std::vector<std::shared_ptr<Scope>>>(); ->GetMutable<std::vector<std::shared_ptr<Scope>>>();
if (seq_len_ > step_scopes->size()) { if (seq_len_ > step_scopes->size()) {
...@@ -244,12 +242,12 @@ void RecurrentAlgorithm::CreateScopes(std::shared_ptr<Scope> scope) const { ...@@ -244,12 +242,12 @@ void RecurrentAlgorithm::CreateScopes(std::shared_ptr<Scope> scope) const {
std::shared_ptr<Scope> step_scope = std::make_shared<Scope>(scope); std::shared_ptr<Scope> step_scope = std::make_shared<Scope>(scope);
// Now all variables in scope must be created outside of op. // Now all variables in scope must be created outside of op.
auto net_op = scope->GetVariable(arg_->step_net)->GetMutable<NetOp>(); auto net_op = scope->FindVar(arg_->step_net)->GetMutable<NetOp>();
for (auto& input : net_op->inputs_) { for (auto& input : net_op->inputs_) {
step_scope->CreateVariable(input); step_scope->NewVar(input);
} }
for (auto& output : net_op->outputs_) { for (auto& output : net_op->outputs_) {
step_scope->CreateVariable(output); step_scope->NewVar(output);
} }
step_scopes->push_back(std::make_shared<Scope>(step_scope)); step_scopes->push_back(std::make_shared<Scope>(step_scope));
...@@ -259,21 +257,18 @@ void RecurrentAlgorithm::CreateScopes(std::shared_ptr<Scope> scope) const { ...@@ -259,21 +257,18 @@ void RecurrentAlgorithm::CreateScopes(std::shared_ptr<Scope> scope) const {
void RecurrentAlgorithm::InitMemories(std::shared_ptr<Scope> step_scope) const { void RecurrentAlgorithm::InitMemories(std::shared_ptr<Scope> step_scope) const {
for (auto& attr : arg_->memories) { for (auto& attr : arg_->memories) {
Tensor* pre_mem = Tensor* pre_mem = step_scope->NewVar(attr.pre_var)->GetMutable<Tensor>();
step_scope->CreateVariable(attr.pre_var)->GetMutable<Tensor>(); PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var),
PADDLE_ENFORCE(step_scope->HasVariable(attr.boot_var),
"memory [%s]'s boot variable [%s] not exists", "memory [%s]'s boot variable [%s] not exists",
attr.var, attr.var,
attr.boot_var); attr.boot_var);
Tensor* boot_mem = Tensor* boot_mem = step_scope->FindVar(attr.boot_var)->GetMutable<Tensor>();
step_scope->GetVariable(attr.boot_var)->GetMutable<Tensor>();
pre_mem->ShareDataWith<float>(*boot_mem); pre_mem->ShareDataWith<float>(*boot_mem);
// TODO(qingqing) remove following code // TODO(qingqing) remove following code
// the memory of current step should be allocated in step net // the memory of current step should be allocated in step net
// here for unit test // here for unit test
auto cur_step_mem = auto cur_step_mem = step_scope->NewVar(attr.var)->GetMutable<Tensor>();
step_scope->CreateVariable(attr.var)->GetMutable<Tensor>();
cur_step_mem->mutable_data<float>(boot_mem->dims(), platform::CPUPlace()); cur_step_mem->mutable_data<float>(boot_mem->dims(), platform::CPUPlace());
} }
} }
...@@ -337,9 +332,8 @@ void RecurrentGradientAlgorithm::Run( ...@@ -337,9 +332,8 @@ void RecurrentGradientAlgorithm::Run(
const platform::DeviceContext& dev_ctx) const { const platform::DeviceContext& dev_ctx) const {
auto step_scopes = GetStepScopes(scope); auto step_scopes = GetStepScopes(scope);
rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_);
PADDLE_ENFORCE(scope->HasVariable(arg_->step_net), PADDLE_ENFORCE(scope->FindVar(arg_->step_net), "step net is not in scope.");
"step net is not in scope."); Variable* net = scope->FindVar(arg_->step_net);
Variable* net = scope->GetVariable(arg_->step_net);
PADDLE_ENFORCE(net != nullptr, "failed to get step net"); PADDLE_ENFORCE(net != nullptr, "failed to get step net");
for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) {
if (static_cast<size_t>(step_id) != seq_len_ - 1) { if (static_cast<size_t>(step_id) != seq_len_ - 1) {
...@@ -354,31 +348,29 @@ void RecurrentGradientAlgorithm::Run( ...@@ -354,31 +348,29 @@ void RecurrentGradientAlgorithm::Run(
void RecurrentGradientAlgorithm::LinkBootMemoryGradients( void RecurrentGradientAlgorithm::LinkBootMemoryGradients(
std::shared_ptr<Scope> step_scope) const { std::shared_ptr<Scope> step_scope) const {
for (auto& attr : arg_->memories) { for (auto& attr : arg_->memories) {
Tensor* mem_grad = Tensor* mem_grad = step_scope->NewVar(attr.var)->GetMutable<Tensor>();
step_scope->CreateVariable(attr.var)->GetMutable<Tensor>();
PADDLE_ENFORCE(mem_grad != nullptr, PADDLE_ENFORCE(mem_grad != nullptr,
"boot_tensor should be retrieved before"); "boot_tensor should be retrieved before");
PADDLE_ENFORCE(step_scope->HasVariable(attr.boot_var), PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var),
"memory [%s]'s boot variable [%s] not exists", "memory [%s]'s boot variable [%s] not exists",
attr.var, attr.var,
attr.boot_var); attr.boot_var);
Tensor* boot_mem_grad = Tensor* boot_mem_grad =
step_scope->CreateVariable(attr.boot_var)->GetMutable<Tensor>(); step_scope->NewVar(attr.boot_var)->GetMutable<Tensor>();
boot_mem_grad->ShareDataWith<float>(*mem_grad); boot_mem_grad->ShareDataWith<float>(*mem_grad);
} }
} }
void RecurrentGradientAlgorithm::InferShape( void RecurrentGradientAlgorithm::InferShape(
const std::shared_ptr<Scope>& scope) const { const std::shared_ptr<Scope>& scope) const {
seq_len_ = scope->GetVariable((arg_->inlinks[0]).external) seq_len_ = scope->FindVar((arg_->inlinks[0]).external)
->GetMutable<Tensor>() ->GetMutable<Tensor>()
->dims()[0]; ->dims()[0];
auto step_scopes = GetStepScopes(scope); auto step_scopes = GetStepScopes(scope);
rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_);
PADDLE_ENFORCE(scope->HasVariable(arg_->step_net), PADDLE_ENFORCE(scope->FindVar(arg_->step_net), "step net is not in scope.");
"step net is not in scope."); Variable* net = scope->FindVar(arg_->step_net);
Variable* net = scope->GetVariable(arg_->step_net);
PADDLE_ENFORCE(net != nullptr, "failed to get step net"); PADDLE_ENFORCE(net != nullptr, "failed to get step net");
for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) {
...@@ -391,14 +383,14 @@ void RecurrentGradientAlgorithm::InferShape( ...@@ -391,14 +383,14 @@ void RecurrentGradientAlgorithm::InferShape(
auto outlinks = arg_->outlinks; auto outlinks = arg_->outlinks;
for (size_t i = 0; i < outlinks.size(); i++) { for (size_t i = 0; i < outlinks.size(); i++) {
DDim step_dims = step_scopes[0] DDim step_dims = step_scopes[0]
->GetVariable(outlinks[i].internal) ->FindVar(outlinks[i].internal)
->GetMutable<Tensor>() ->GetMutable<Tensor>()
->dims(); ->dims();
std::vector<int> dims_vec = vectorize(step_dims); std::vector<int> dims_vec = vectorize(step_dims);
// now only support fixed length // now only support fixed length
dims_vec.insert(dims_vec.begin(), seq_len_); dims_vec.insert(dims_vec.begin(), seq_len_);
Tensor* output = Tensor* output =
step_scopes[0]->GetVariable(outlinks[i].external)->GetMutable<Tensor>(); step_scopes[0]->FindVar(outlinks[i].external)->GetMutable<Tensor>();
output->Resize(make_ddim(dims_vec)); output->Resize(make_ddim(dims_vec));
} }
LinkBootMemoryGradients(step_scopes[0]); LinkBootMemoryGradients(step_scopes[0]);
......
...@@ -121,7 +121,7 @@ protected: ...@@ -121,7 +121,7 @@ protected:
inline const std::vector<std::shared_ptr<Scope>>& GetStepScopes( inline const std::vector<std::shared_ptr<Scope>>& GetStepScopes(
std::shared_ptr<Scope> scope) const { std::shared_ptr<Scope> scope) const {
return *(scope->GetVariable(arg_->step_scopes)) return *(scope->FindVar(arg_->step_scopes))
->GetMutable<std::vector<std::shared_ptr<Scope>>>(); ->GetMutable<std::vector<std::shared_ptr<Scope>>>();
} }
...@@ -159,7 +159,7 @@ public: ...@@ -159,7 +159,7 @@ public:
protected: protected:
inline const std::vector<std::shared_ptr<Scope>>& GetStepScopes( inline const std::vector<std::shared_ptr<Scope>>& GetStepScopes(
std::shared_ptr<Scope> scope) const { std::shared_ptr<Scope> scope) const {
return *(scope->GetVariable(arg_->step_scopes)) return *(scope->FindVar(arg_->step_scopes))
->GetMutable<std::vector<std::shared_ptr<Scope>>>(); ->GetMutable<std::vector<std::shared_ptr<Scope>>>();
} }
......
...@@ -38,37 +38,37 @@ protected: ...@@ -38,37 +38,37 @@ protected:
// create input, and init content // create input, and init content
LOG(INFO) << "create global variable x"; LOG(INFO) << "create global variable x";
for (auto inlink : std::vector<std::string>{"x", "x0", "x1", "h"}) { for (auto inlink : std::vector<std::string>{"x", "x0", "x1", "h"}) {
Variable* x = scope_->CreateVariable(inlink); Variable* x = scope_->NewVar(inlink);
DDim dims = make_ddim(std::vector<int>{ DDim dims = make_ddim(std::vector<int>{
10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); 10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace()); x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
} }
// create output alias just for test // create output alias just for test
for (auto inlink : std::vector<std::string>{"h@alias"}) { for (auto inlink : std::vector<std::string>{"h@alias"}) {
Variable* x = scope_->CreateVariable(inlink); Variable* x = scope_->NewVar(inlink);
DDim dims = DDim dims =
make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}); make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/});
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace()); x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
} }
LOG(INFO) << "create global variable w"; LOG(INFO) << "create global variable w";
Variable* w = scope_->CreateVariable("rnn/w"); Variable* w = scope_->NewVar("rnn/w");
w->GetMutable<Tensor>()->mutable_data<float>( w->GetMutable<Tensor>()->mutable_data<float>(
make_ddim(std::vector<int>{30, 30}), platform::CPUPlace()); make_ddim(std::vector<int>{30, 30}), platform::CPUPlace());
for (auto boot : std::vector<std::string>{"x_boot", "h_boot"}) { for (auto boot : std::vector<std::string>{"x_boot", "h_boot"}) {
LOG(INFO) << "create global variable " << boot; LOG(INFO) << "create global variable " << boot;
Variable* h_boot = scope_->CreateVariable(boot); Variable* h_boot = scope_->NewVar(boot);
h_boot->GetMutable<Tensor>()->mutable_data<float>( h_boot->GetMutable<Tensor>()->mutable_data<float>(
make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}), make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}),
platform::CPUPlace()); platform::CPUPlace());
} }
LOG(INFO) << "create variable step_scopes"; LOG(INFO) << "create variable step_scopes";
scope_->CreateVariable("step_scopes"); scope_->NewVar("step_scopes");
LOG(INFO) << "create variable h"; LOG(INFO) << "create variable h";
scope_->CreateVariable("h"); scope_->NewVar("h");
} }
void CreateRNNOp() { void CreateRNNOp() {
...@@ -150,7 +150,7 @@ protected: ...@@ -150,7 +150,7 @@ protected:
void CreateStepNet() { void CreateStepNet() {
LOG(INFO) << "create variable step_net"; LOG(INFO) << "create variable step_net";
Variable* var = scope_->CreateVariable("step_net"); Variable* var = scope_->NewVar("step_net");
auto net = var->GetMutable<NetOp>(); auto net = var->GetMutable<NetOp>();
// rnn/s is net's input or output? // rnn/s is net's input or output?
net->inputs_ = {"rnn/h@pre", "rnn/w", "rnn/x"}; net->inputs_ = {"rnn/h@pre", "rnn/w", "rnn/x"};
...@@ -194,64 +194,62 @@ protected: ...@@ -194,64 +194,62 @@ protected:
scope_ = std::make_shared<Scope>(); scope_ = std::make_shared<Scope>();
// inputs: x // inputs: x
LOG(INFO) << "create global variable x"; LOG(INFO) << "create global variable x";
Variable* x = scope_->CreateVariable("x"); Variable* x = scope_->NewVar("x");
DDim dims = DDim dims =
make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace()); x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
// inputs: h_boot // inputs: h_boot
LOG(INFO) << "create global variable h_boot"; LOG(INFO) << "create global variable h_boot";
Variable* h_boot = scope_->CreateVariable("h_boot"); Variable* h_boot = scope_->NewVar("h_boot");
h_boot->GetMutable<Tensor>()->mutable_data<float>( h_boot->GetMutable<Tensor>()->mutable_data<float>(
make_ddim({20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace()); make_ddim({20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace());
// inputs: w // inputs: w
LOG(INFO) << "create global variable w"; LOG(INFO) << "create global variable w";
Variable* w = scope_->CreateVariable("rnn/w"); Variable* w = scope_->NewVar("rnn/w");
w->GetMutable<Tensor>()->mutable_data<float>(make_ddim({30, 30}), w->GetMutable<Tensor>()->mutable_data<float>(make_ddim({30, 30}),
platform::CPUPlace()); platform::CPUPlace());
// inputs: h_grad // inputs: h_grad
LOG(INFO) << "create variable h_grad"; LOG(INFO) << "create variable h_grad";
Variable* dh = scope_->CreateVariable("h_grad"); Variable* dh = scope_->NewVar("h_grad");
dh->GetMutable<Tensor>()->mutable_data<float>(make_ddim({10, 20, 30}), dh->GetMutable<Tensor>()->mutable_data<float>(make_ddim({10, 20, 30}),
platform::CPUPlace()); platform::CPUPlace());
// inputs: step_scopes // inputs: step_scopes
LOG(INFO) << "create variable step_scopes"; LOG(INFO) << "create variable step_scopes";
scope_->CreateVariable("step_scopes"); scope_->NewVar("step_scopes");
// inputs: step_net // inputs: step_net
LOG(INFO) << "create variable step_net"; LOG(INFO) << "create variable step_net";
scope_->CreateVariable("step_net"); scope_->NewVar("step_net");
// outputs: w_grad // outputs: w_grad
LOG(INFO) << "create global variable w_grad"; LOG(INFO) << "create global variable w_grad";
scope_->CreateVariable("rnn/w_grad"); scope_->NewVar("rnn/w_grad");
// outputs: x_grad // outputs: x_grad
LOG(INFO) << "create global variable x_grad"; LOG(INFO) << "create global variable x_grad";
scope_->CreateVariable("x_grad"); scope_->NewVar("x_grad");
// outputs: h_boot_grad // outputs: h_boot_grad
LOG(INFO) << "create global variable h_boot_grad"; LOG(INFO) << "create global variable h_boot_grad";
scope_->CreateVariable("h_boot_grad"); scope_->NewVar("h_boot_grad");
} }
void CreateStepScopes() { void CreateStepScopes() {
std::vector<std::shared_ptr<Scope>>* step_scopes = std::vector<std::shared_ptr<Scope>>* step_scopes =
scope_->GetVariable("step_scopes") scope_->FindVar("step_scopes")
->GetMutable<std::vector<std::shared_ptr<Scope>>>(); ->GetMutable<std::vector<std::shared_ptr<Scope>>>();
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
auto scope = std::make_shared<Scope>(scope_); auto scope = std::make_shared<Scope>(scope_);
auto pre_t = scope->CreateVariable("rnn/pre_h")->GetMutable<Tensor>(); auto pre_t = scope->NewVar("rnn/pre_h")->GetMutable<Tensor>();
pre_t->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace()); pre_t->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace());
auto tensor = scope->CreateVariable("rnn/h")->GetMutable<Tensor>(); auto tensor = scope->NewVar("rnn/h")->GetMutable<Tensor>();
tensor->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace()); tensor->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace());
// for unit test of ConcatOutputs // for unit test of ConcatOutputs
auto xg = scope->CreateVariable("rnn/x_grad")->GetMutable<Tensor>(); auto xg = scope->NewVar("rnn/x_grad")->GetMutable<Tensor>();
xg->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace()); xg->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace());
step_scopes->push_back(scope); step_scopes->push_back(scope);
} }
// last time step // last time step
auto g = (*step_scopes)[9] auto g = (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable<Tensor>();
->CreateVariable("rnn/h_pre_grad")
->GetMutable<Tensor>();
g->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace()); g->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace());
} }
...@@ -280,7 +278,7 @@ protected: ...@@ -280,7 +278,7 @@ protected:
void CreateStepNet() { void CreateStepNet() {
LOG(INFO) << "create variable step_net"; LOG(INFO) << "create variable step_net";
Variable* var = scope_->CreateVariable("step_net"); Variable* var = scope_->NewVar("step_net");
auto net = var->GetMutable<NetOp>(); auto net = var->GetMutable<NetOp>();
net->AddOp(OpRegistry::CreateOp("mul", net->AddOp(OpRegistry::CreateOp("mul",
{"rnn/h_pre", "rnn/w", "rnn/s_grad"}, {"rnn/h_pre", "rnn/w", "rnn/s_grad"},
...@@ -301,7 +299,7 @@ protected: ...@@ -301,7 +299,7 @@ protected:
inlink.external = "x"; inlink.external = "x";
inlink.internal = "rnn/x"; inlink.internal = "rnn/x";
std::vector<std::shared_ptr<Scope>>* step_scopes = std::vector<std::shared_ptr<Scope>>* step_scopes =
scope_->GetVariable("step_scopes") scope_->FindVar("step_scopes")
->GetMutable<std::vector<std::shared_ptr<Scope>>>(); ->GetMutable<std::vector<std::shared_ptr<Scope>>>();
rnn::SegmentInputs(*step_scopes, std::vector<rnn::Link>{inlink}, 10); rnn::SegmentInputs(*step_scopes, std::vector<rnn::Link>{inlink}, 10);
} }
...@@ -315,7 +313,7 @@ protected: ...@@ -315,7 +313,7 @@ protected:
std::vector<rnn::MemoryAttr> memories; std::vector<rnn::MemoryAttr> memories;
memories.push_back(mem_attr); memories.push_back(mem_attr);
std::vector<std::shared_ptr<Scope>>* step_scopes = std::vector<std::shared_ptr<Scope>>* step_scopes =
scope_->GetVariable("step_scopes") scope_->FindVar("step_scopes")
->GetMutable<std::vector<std::shared_ptr<Scope>>>(); ->GetMutable<std::vector<std::shared_ptr<Scope>>>();
for (int i = 1; i < 10; ++i) { for (int i = 1; i < 10; ++i) {
rnn::LinkMemories(*step_scopes, memories, i, -1); rnn::LinkMemories(*step_scopes, memories, i, -1);
...@@ -344,8 +342,8 @@ TEST(RecurrentOp, LinkMemories) { ...@@ -344,8 +342,8 @@ TEST(RecurrentOp, LinkMemories) {
std::vector<std::shared_ptr<Scope>> step_scopes; std::vector<std::shared_ptr<Scope>> step_scopes;
for (int i = 0; i < len; ++i) { for (int i = 0; i < len; ++i) {
auto scope = std::make_shared<Scope>(); auto scope = std::make_shared<Scope>();
scope->CreateVariable("pre_h"); scope->NewVar("pre_h");
auto tensor = scope->CreateVariable("h")->GetMutable<Tensor>(); auto tensor = scope->NewVar("h")->GetMutable<Tensor>();
float* data = tensor->mutable_data<float>(make_ddim({15, 20}), CPUPlace()); float* data = tensor->mutable_data<float>(make_ddim({15, 20}), CPUPlace());
for (int i = 0; i < 15 * 20; ++i) { for (int i = 0; i < 15 * 20; ++i) {
data[i] = rand() * (1. / (double)RAND_MAX); data[i] = rand() * (1. / (double)RAND_MAX);
...@@ -367,9 +365,9 @@ TEST(RecurrentOp, LinkMemories) { ...@@ -367,9 +365,9 @@ TEST(RecurrentOp, LinkMemories) {
// check // check
for (int i = 0; i < len - 1; ++i) { for (int i = 0; i < len - 1; ++i) {
const float* a = const float* a =
step_scopes[i]->GetVariable("h")->GetMutable<Tensor>()->data<float>(); step_scopes[i]->FindVar("h")->GetMutable<Tensor>()->data<float>();
const float* b = step_scopes[i + 1] const float* b = step_scopes[i + 1]
->GetVariable("pre_h") ->FindVar("pre_h")
->GetMutable<Tensor>() ->GetMutable<Tensor>()
->data<float>(); ->data<float>();
for (size_t i = 0; i < 15 * 20; ++i) { for (size_t i = 0; i < 15 * 20; ++i) {
...@@ -382,14 +380,10 @@ TEST(RecurrentOp, LinkMemories) { ...@@ -382,14 +380,10 @@ TEST(RecurrentOp, LinkMemories) {
} }
// check // check
for (int i = len - 2; i >= 0; --i) { for (int i = len - 2; i >= 0; --i) {
const float* a = step_scopes[i] const float* a =
->GetVariable("pre_h") step_scopes[i]->FindVar("pre_h")->GetMutable<Tensor>()->data<float>();
->GetMutable<Tensor>() const float* b =
->data<float>(); step_scopes[i + 1]->FindVar("h")->GetMutable<Tensor>()->data<float>();
const float* b = step_scopes[i + 1]
->GetVariable("h")
->GetMutable<Tensor>()
->data<float>();
for (size_t i = 0; i < 15 * 20; ++i) { for (size_t i = 0; i < 15 * 20; ++i) {
ASSERT_FLOAT_EQ(a[i], b[i]); ASSERT_FLOAT_EQ(a[i], b[i]);
} }
......
...@@ -127,6 +127,11 @@ inline typename std::enable_if<sizeof...(Args) != 0, void>::type throw_on_error( ...@@ -127,6 +127,11 @@ inline typename std::enable_if<sizeof...(Args) != 0, void>::type throw_on_error(
#endif // PADDLE_ONLY_CPU #endif // PADDLE_ONLY_CPU
template <typename T>
inline void throw_on_error(T* e) {
throw_on_error(e != nullptr, "");
}
template <typename T> template <typename T>
inline void throw_on_error(T e) { inline void throw_on_error(T e) {
throw_on_error(e, ""); throw_on_error(e, "");
......
...@@ -104,13 +104,9 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -104,13 +104,9 @@ All parameter, weight, gradient are variables in Paddle.
py::class_<pd::Scope, std::shared_ptr<pd::Scope>>(m, "Scope") py::class_<pd::Scope, std::shared_ptr<pd::Scope>>(m, "Scope")
.def(py::init<const std::shared_ptr<pd::Scope>&>()) .def(py::init<const std::shared_ptr<pd::Scope>&>())
.def("get_var", .def("get_var", &pd::Scope::FindVar, py::return_value_policy::reference)
&pd::Scope::GetVariable, .def("create_var", &pd::Scope::NewVar, py::return_value_policy::reference)
py::return_value_policy::reference) .def("get_var_name", &pd::Scope::FindVarName);
.def("create_var",
&pd::Scope::CreateVariable,
py::return_value_policy::reference)
.def("get_var_name", &pd::Scope::GetVariableName);
//! @note: Be careful! PyBind will return std::string as an unicode, not //! @note: Be careful! PyBind will return std::string as an unicode, not
//! Python str. If you want a str object, you should cast them in Python. //! Python str. If you want a str object, you should cast them in Python.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册