提交 e593113a 编写于 作者: D dzhwinter 提交者: GitHub

Merge pull request #4725 from dzhwinter/fix/scope

change: NewVar() to Var()
...@@ -243,7 +243,7 @@ class SymbolTable { ...@@ -243,7 +243,7 @@ class SymbolTable {
// TODO determine whether name is generated by python or C++. // TODO determine whether name is generated by python or C++.
// Currently assume that a unique name will be generated by C++ if the // Currently assume that a unique name will be generated by C++ if the
// argument name is left default. // argument name is left default.
VarDesc* NewVar(const string& name=""); VarDesc* Var(const string& name="");
// find a VarDesc by name, if recursive is true, find parent's SymbolTable // find a VarDesc by name, if recursive is true, find parent's SymbolTable
// recursively. // recursively.
......
...@@ -37,7 +37,7 @@ Scope is an association of a name to variable. All variables belong to `Scope`. ...@@ -37,7 +37,7 @@ Scope is an association of a name to variable. All variables belong to `Scope`.
```cpp ```cpp
class Scope { class Scope {
public: public:
Variable* NewVar(const std::string& name); Variable* Var(const std::string& name);
const Variable* FindVar(const std::string& name) const; const Variable* FindVar(const std::string& name) const;
private: private:
...@@ -98,7 +98,7 @@ class Scope { ...@@ -98,7 +98,7 @@ class Scope {
Variable* FindVar(const std::string& name) const; Variable* FindVar(const std::string& name) const;
// return if already contains same name variable. // return if already contains same name variable.
Variable* NewVar(const std::string& name); Variable* Var(const std::string& name);
private: private:
std::shared_ptr<Scope> parent_; std::shared_ptr<Scope> parent_;
...@@ -107,7 +107,7 @@ class Scope { ...@@ -107,7 +107,7 @@ class Scope {
``` ```
## Only scope can create a variable ## Only scope can create a variable
To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `NewVar` can construct `Variable`. To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `Var` can construct `Variable`.
## When scope destroyed, all variables inside this scope should be destroyed together ## When scope destroyed, all variables inside this scope should be destroyed together
...@@ -121,4 +121,4 @@ Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shar ...@@ -121,4 +121,4 @@ Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shar
## Orthogonal interface ## Orthogonal interface
`FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `NewVar` will return an `Error` when there is a name conflict locally. Combine `FindVar` and `NewVar`, we can implement `NewVar` easily. `FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `Var` will return an `Error` when there is a name conflict locally. Combine `FindVar` and `Var`, we can implement `Var` easily.
...@@ -161,7 +161,7 @@ class TensorArray: ...@@ -161,7 +161,7 @@ class TensorArray:
@name: str @name: str
the name of the variable to output. the name of the variable to output.
''' '''
tensor = NewVar(name) tensor = Var(name)
tensor_array_stack(self.name, tensor) tensor_array_stack(self.name, tensor)
return tensor return tensor
......
...@@ -280,12 +280,21 @@ static void CreateGradVarInBlock( ...@@ -280,12 +280,21 @@ static void CreateGradVarInBlock(
auto ops = block_desc->AllOps(); auto ops = block_desc->AllOps();
for (size_t op_index = grad_op_start_index; op_index < ops.size(); for (size_t op_index = grad_op_start_index; op_index < ops.size();
++op_index) { ++op_index) {
// <<<<<<< HEAD
// for (const auto& output : ops[op_index]->Outputs()) {
// for (const auto& real_output : output.second) {
// if (!block_desc->HasVar(real_output)) {
// block_desc->Var(real_output);
// }
// }
// }
// =======
ForEachVarName(ops[op_index]->Outputs(), ForEachVarName(ops[op_index]->Outputs(),
[&](const std::string& grad_var_name) { [&](const std::string& grad_var_name) {
if (block_desc->HasVar(grad_var_name)) { if (block_desc->HasVar(grad_var_name)) {
return false; return false;
} }
block_desc->NewVar(grad_var_name); block_desc->Var(grad_var_name);
auto it = param_name_map.find(grad_var_name); auto it = param_name_map.find(grad_var_name);
if (it == param_name_map.end()) { if (it == param_name_map.end()) {
return false; return false;
...@@ -297,6 +306,7 @@ static void CreateGradVarInBlock( ...@@ -297,6 +306,7 @@ static void CreateGradVarInBlock(
grad_record.op_idx_ = static_cast<int>(op_index); grad_record.op_idx_ = static_cast<int>(op_index);
return false; /* not break */ return false; /* not break */
}); });
// >>>>>>> origin/develop
} }
} }
...@@ -448,7 +458,7 @@ AppendBackward(ProgramDescBind& program_desc, const VarDescBind& target, ...@@ -448,7 +458,7 @@ AppendBackward(ProgramDescBind& program_desc, const VarDescBind& target,
for (auto& ptr : backward_op_descs) { for (auto& ptr : backward_op_descs) {
all_ops.push_back(std::move(ptr)); all_ops.push_back(std::move(ptr));
} }
root_block->NewVar(fill_one_op_out); root_block->Var(fill_one_op_out);
// create grad_var for all blocks in this program // create grad_var for all blocks in this program
CreateGradVarInBlock(&retv, root_block, forward_op_num, grad_to_var); CreateGradVarInBlock(&retv, root_block, forward_op_num, grad_to_var);
......
...@@ -18,19 +18,22 @@ limitations under the License. */ ...@@ -18,19 +18,22 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace framework { namespace framework {
VarDescBind *BlockDescBind::NewVar(const std::string &name) { VarDescBind *BlockDescBind::Var(const std::string &name) {
need_update_ = true; need_update_ = true;
auto it = vars_.find(name); auto it = vars_.find(name);
PADDLE_ENFORCE(it == vars_.end(), "Duplicated variable %s", name); if (it != vars_.end()) {
auto var = new VarDescBind(name); return it->second.get();
}
auto *var = new VarDescBind(name);
vars_[name].reset(var); vars_[name].reset(var);
return var; return var;
} }
VarDescBind *BlockDescBind::Var(const std::string &name) const { VarDescBind *BlockDescBind::FindVar(const std::string &name) const {
auto it = vars_.find(name); auto it = vars_.find(name);
PADDLE_ENFORCE(it != vars_.end(), if (it == vars_.end()) {
"Can not find variable %s in current block.", name); return nullptr;
}
return it->second.get(); return it->second.get();
} }
......
...@@ -40,9 +40,9 @@ class BlockDescBind { ...@@ -40,9 +40,9 @@ class BlockDescBind {
int32_t Parent() const { return desc_->parent_idx(); } int32_t Parent() const { return desc_->parent_idx(); }
VarDescBind *NewVar(const std::string &name_bytes); VarDescBind *Var(const std::string &name_bytes);
VarDescBind *Var(const std::string &name_bytes) const; VarDescBind *FindVar(const std::string &name_bytes) const;
bool HasVar(const std::string &var_name) const; bool HasVar(const std::string &var_name) const;
......
...@@ -66,7 +66,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { ...@@ -66,7 +66,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) {
// Instantiate all the vars in the global scope // Instantiate all the vars in the global scope
for (auto& var : block.vars()) { for (auto& var : block.vars()) {
scope->NewVar(var.name()); scope->Var(var.name());
} }
Scope& local_scope = scope->NewScope(); Scope& local_scope = scope->NewScope();
...@@ -78,7 +78,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { ...@@ -78,7 +78,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) {
for (auto& var : block.ops(i).outputs()) { for (auto& var : block.ops(i).outputs()) {
for (auto& argu : var.arguments()) { for (auto& argu : var.arguments()) {
if (local_scope.FindVar(argu) == nullptr) { if (local_scope.FindVar(argu) == nullptr) {
local_scope.NewVar(argu); local_scope.Var(argu);
} }
} }
} }
......
...@@ -46,10 +46,16 @@ void AddOp(const std::string& type, const VariableNameMap& inputs, ...@@ -46,10 +46,16 @@ void AddOp(const std::string& type, const VariableNameMap& inputs,
// insert output // insert output
for (auto kv : outputs) { for (auto kv : outputs) {
for (auto v : kv.second) { for (auto v : kv.second) {
// <<<<<<< HEAD
// auto var = block->Var(v);
// var->SetType(VarDesc::LOD_TENSOR);
// var->SetDataType(paddle::framework::DataType::FP32);
// =======
if (!block->HasVar(v)) { if (!block->HasVar(v)) {
auto var = block->NewVar(v); auto var = block->Var(v);
var->SetDataType(paddle::framework::DataType::FP32); var->SetDataType(paddle::framework::DataType::FP32);
} }
// >>>>>>> origin/develop
} }
} }
......
...@@ -403,11 +403,11 @@ class CompileTimeInferShapeContext : public InferShapeContext { ...@@ -403,11 +403,11 @@ class CompileTimeInferShapeContext : public InferShapeContext {
private: private:
DDim GetDim(const std::string& name) const override { DDim GetDim(const std::string& name) const override {
return framework::make_ddim(block_.Var(name)->Shape()); return framework::make_ddim(block_.FindVar(name)->Shape());
} }
void SetDim(const std::string& name, const DDim& dim) override { void SetDim(const std::string& name, const DDim& dim) override {
block_.Var(name)->SetShape(framework::vectorize(dim)); block_.FindVar(name)->SetShape(framework::vectorize(dim));
} }
const OpDescBind& op_; const OpDescBind& op_;
......
...@@ -84,7 +84,7 @@ TEST(OperatorBase, all) { ...@@ -84,7 +84,7 @@ TEST(OperatorBase, all) {
paddle::framework::Scope scope; paddle::framework::Scope scope;
auto op = paddle::framework::OpRegistry::CreateOp(op_desc); auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
scope.NewVar("OUT1"); scope.Var("OUT1");
ASSERT_EQ(paddle::framework::op_run_num, 0); ASSERT_EQ(paddle::framework::op_run_num, 0);
op->Run(scope, device_context); op->Run(scope, device_context);
ASSERT_EQ(paddle::framework::op_run_num, 1); ASSERT_EQ(paddle::framework::op_run_num, 1);
...@@ -237,12 +237,12 @@ TEST(OpKernel, multi_inputs) { ...@@ -237,12 +237,12 @@ TEST(OpKernel, multi_inputs) {
paddle::platform::CPUDeviceContext cpu_device_context; paddle::platform::CPUDeviceContext cpu_device_context;
paddle::framework::Scope scope; paddle::framework::Scope scope;
scope.NewVar("x0")->GetMutable<Tensor>(); scope.Var("x0")->GetMutable<Tensor>();
scope.NewVar("x1")->GetMutable<Tensor>(); scope.Var("x1")->GetMutable<Tensor>();
scope.NewVar("x2")->GetMutable<Tensor>(); scope.Var("x2")->GetMutable<Tensor>();
scope.NewVar("k0")->GetMutable<Tensor>(); scope.Var("k0")->GetMutable<Tensor>();
scope.NewVar("y0")->GetMutable<Tensor>(); scope.Var("y0")->GetMutable<Tensor>();
scope.NewVar("y1")->GetMutable<Tensor>(); scope.Var("y1")->GetMutable<Tensor>();
auto op = paddle::framework::OpRegistry::CreateOp(op_desc); auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
op->Run(scope, cpu_device_context); op->Run(scope, cpu_device_context);
......
...@@ -31,7 +31,7 @@ Scope& Scope::NewScope() const { ...@@ -31,7 +31,7 @@ Scope& Scope::NewScope() const {
return *kids_.back(); return *kids_.back();
} }
Variable* Scope::NewVar(const std::string& name) { Variable* Scope::Var(const std::string& name) {
auto iter = vars_.find(name); auto iter = vars_.find(name);
if (iter != vars_.end()) { if (iter != vars_.end()) {
return iter->second; return iter->second;
...@@ -42,8 +42,8 @@ Variable* Scope::NewVar(const std::string& name) { ...@@ -42,8 +42,8 @@ Variable* Scope::NewVar(const std::string& name) {
return v; return v;
} }
Variable* Scope::NewVar() { Variable* Scope::Var() {
return NewVar(string::Sprintf("%p.%d", this, vars_.size())); return Var(string::Sprintf("%p.%d", this, vars_.size()));
} }
Variable* Scope::FindVar(const std::string& name) const { Variable* Scope::FindVar(const std::string& name) const {
...@@ -71,8 +71,8 @@ framework::Scope& GetGlobalScope() { ...@@ -71,8 +71,8 @@ framework::Scope& GetGlobalScope() {
static std::unique_ptr<framework::Scope> g_scope{nullptr}; static std::unique_ptr<framework::Scope> g_scope{nullptr};
std::call_once(feed_variable_flag, [&]() { std::call_once(feed_variable_flag, [&]() {
g_scope.reset(new framework::Scope()); g_scope.reset(new framework::Scope());
g_scope->NewVar("feed_value"); g_scope->Var("feed_value");
g_scope->NewVar("fetch_value"); g_scope->Var("fetch_value");
}); });
return *(g_scope.get()); return *(g_scope.get());
} }
......
...@@ -45,10 +45,10 @@ class Scope { ...@@ -45,10 +45,10 @@ class Scope {
Scope& NewScope() const; Scope& NewScope() const;
/// Create a variable with given name if it doesn't exist. /// Create a variable with given name if it doesn't exist.
Variable* NewVar(const std::string& name); Variable* Var(const std::string& name);
/// Create a variable with a scope-unique name. /// Create a variable with a scope-unique name.
Variable* NewVar(); Variable* Var();
/// Find a variable in the scope or any of its ancestors. Returns /// Find a variable in the scope or any of its ancestors. Returns
/// nullptr if cannot find. /// nullptr if cannot find.
......
...@@ -23,8 +23,8 @@ TEST(Scope, VarsShadowing) { ...@@ -23,8 +23,8 @@ TEST(Scope, VarsShadowing) {
Scope& ss1 = s.NewScope(); Scope& ss1 = s.NewScope();
Scope& ss2 = s.NewScope(); Scope& ss2 = s.NewScope();
Variable* v0 = s.NewVar("a"); Variable* v0 = s.Var("a");
Variable* v1 = ss1.NewVar("a"); Variable* v1 = ss1.Var("a");
EXPECT_NE(v0, v1); EXPECT_NE(v0, v1);
...@@ -40,7 +40,7 @@ TEST(Scope, FindVar) { ...@@ -40,7 +40,7 @@ TEST(Scope, FindVar) {
EXPECT_EQ(nullptr, s.FindVar("a")); EXPECT_EQ(nullptr, s.FindVar("a"));
EXPECT_EQ(nullptr, ss.FindVar("a")); EXPECT_EQ(nullptr, ss.FindVar("a"));
ss.NewVar("a"); ss.Var("a");
EXPECT_EQ(nullptr, s.FindVar("a")); EXPECT_EQ(nullptr, s.FindVar("a"));
EXPECT_NE(nullptr, ss.FindVar("a")); EXPECT_NE(nullptr, ss.FindVar("a"));
...@@ -49,7 +49,7 @@ TEST(Scope, FindVar) { ...@@ -49,7 +49,7 @@ TEST(Scope, FindVar) {
TEST(Scope, FindScope) { TEST(Scope, FindScope) {
Scope s; Scope s;
Scope& ss = s.NewScope(); Scope& ss = s.NewScope();
Variable* v = s.NewVar("a"); Variable* v = s.Var("a");
EXPECT_EQ(&s, s.FindScope(v)); EXPECT_EQ(&s, s.FindScope(v));
EXPECT_EQ(&s, ss.FindScope(v)); EXPECT_EQ(&s, ss.FindScope(v));
......
...@@ -134,7 +134,7 @@ void CondOp::PrepareDataForSubnet( ...@@ -134,7 +134,7 @@ void CondOp::PrepareDataForSubnet(
for (int i = 0; i < BRANCH_NUM; ++i) { for (int i = 0; i < BRANCH_NUM; ++i) {
for (auto& output : (*sub_net_op_[i]).Outputs()) { for (auto& output : (*sub_net_op_[i]).Outputs()) {
for (auto& var_name : output.second) { for (auto& var_name : output.second) {
sub_scopes[i]->NewVar(var_name); sub_scopes[i]->Var(var_name);
} }
} }
} }
......
...@@ -30,7 +30,7 @@ namespace detail { ...@@ -30,7 +30,7 @@ namespace detail {
inline void CreateVariables(Scope& scope, inline void CreateVariables(Scope& scope,
const std::vector<std::string>& var_names) { const std::vector<std::string>& var_names) {
for (const auto& name : var_names) { for (const auto& name : var_names) {
scope.NewVar(name); scope.Var(name);
} }
} }
...@@ -136,7 +136,7 @@ void DynamicRecurrentOp::WriteStepInputs() const { ...@@ -136,7 +136,7 @@ void DynamicRecurrentOp::WriteStepInputs() const {
auto& step_scope = cache_.GetScope(step); auto& step_scope = cache_.GetScope(step);
Variable* var = step_scope.FindVar(item.first); Variable* var = step_scope.FindVar(item.first);
if (var == nullptr) { if (var == nullptr) {
var = step_scope.NewVar(item.first); var = step_scope.Var(item.first);
} }
var->GetMutable<LoDTensor>()->ShareDataWith<value_type>(tensor); var->GetMutable<LoDTensor>()->ShareDataWith<value_type>(tensor);
} }
......
...@@ -36,7 +36,7 @@ void OpDescNewVar(const std::string& param_name, ...@@ -36,7 +36,7 @@ void OpDescNewVar(const std::string& param_name,
// create a LoD tensor in scope with specific dims // create a LoD tensor in scope with specific dims
LoDTensor* CreateVar(Scope& scope, std::string name, framework::DDim dims, LoDTensor* CreateVar(Scope& scope, std::string name, framework::DDim dims,
const platform::Place& place) { const platform::Place& place) {
auto* var = scope.NewVar(name); auto* var = scope.Var(name);
auto* tensor = var->GetMutable<LoDTensor>(); auto* tensor = var->GetMutable<LoDTensor>();
tensor->Resize(dims); tensor->Resize(dims);
tensor->mutable_data<float>(place); tensor->mutable_data<float>(place);
...@@ -85,7 +85,7 @@ class DynamicRecurrentOpTestHelper : public ::testing::Test { ...@@ -85,7 +85,7 @@ class DynamicRecurrentOpTestHelper : public ::testing::Test {
void CreateGlobalVariables() { void CreateGlobalVariables() {
platform::CPUPlace place; platform::CPUPlace place;
scope.NewVar("step_scopes"); scope.Var("step_scopes");
CreateVar(scope, "boot_mem", framework::make_ddim({10, 20}), place); CreateVar(scope, "boot_mem", framework::make_ddim({10, 20}), place);
CreateVar(scope, "out0", framework::make_ddim({10, 20}), place); CreateVar(scope, "out0", framework::make_ddim({10, 20}), place);
auto* in0 = CreateVar(scope, "in0", framework::make_ddim({10, 8}), place); auto* in0 = CreateVar(scope, "in0", framework::make_ddim({10, 8}), place);
......
...@@ -70,14 +70,14 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope, ...@@ -70,14 +70,14 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope,
// the weight are located in parent scope // the weight are located in parent scope
for (auto& var_name : input.second) { for (auto& var_name : input.second) {
if (!step_scope.FindVar(var_name)) { if (!step_scope.FindVar(var_name)) {
step_scope.NewVar(var_name)->GetMutable<LoDTensor>(); step_scope.Var(var_name)->GetMutable<LoDTensor>();
} }
} }
} }
// create stepnet's outputs // create stepnet's outputs
for (const auto& output : (*stepnet_)->Outputs()) { for (const auto& output : (*stepnet_)->Outputs()) {
for (auto& var_name : output.second) { for (auto& var_name : output.second) {
step_scope.NewVar(var_name); step_scope.Var(var_name);
} }
} }
step_scopes->emplace_back(&step_scope); step_scopes->emplace_back(&step_scope);
...@@ -87,7 +87,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope, ...@@ -87,7 +87,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope,
void RecurrentAlgorithm::InitMemories(Scope* step_scope) const { void RecurrentAlgorithm::InitMemories(Scope* step_scope) const {
for (auto& attr : arg_->memories) { for (auto& attr : arg_->memories) {
auto* pre_mem = step_scope->NewVar(attr.pre_var)->GetMutable<LoDTensor>(); auto* pre_mem = step_scope->Var(attr.pre_var)->GetMutable<LoDTensor>();
PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr,
"memory [%s]'s boot variable [%s] not exists", attr.var, "memory [%s]'s boot variable [%s] not exists", attr.var,
attr.boot_var); attr.boot_var);
...@@ -167,9 +167,9 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients( ...@@ -167,9 +167,9 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients(
"memory variable [%s] does not exists", attr.var); "memory variable [%s] does not exists", attr.var);
PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr,
"boot variable [%s] does not exists", attr.boot_var); "boot variable [%s] does not exists", attr.boot_var);
auto* mem_grad = step_scope->NewVar(attr.var)->GetMutable<LoDTensor>(); auto* mem_grad = step_scope->Var(attr.var)->GetMutable<LoDTensor>();
auto* boot_mem_grad = auto* boot_mem_grad =
step_scope->NewVar(attr.boot_var)->GetMutable<LoDTensor>(); step_scope->Var(attr.boot_var)->GetMutable<LoDTensor>();
boot_mem_grad->Resize(mem_grad->dims()); boot_mem_grad->Resize(mem_grad->dims());
boot_mem_grad->ShareDataWith<float>(*mem_grad); boot_mem_grad->ShareDataWith<float>(*mem_grad);
} }
......
...@@ -40,7 +40,7 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes, ...@@ -40,7 +40,7 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes,
f::DDim step_dims = slice_ddim(dims, 1, dims.size()); f::DDim step_dims = slice_ddim(dims, 1, dims.size());
for (size_t j = 0; j < seq_len; j++) { for (size_t j = 0; j < seq_len; j++) {
Tensor* step_input = Tensor* step_input =
step_scopes[j]->NewVar(inlinks[i])->GetMutable<Tensor>(); step_scopes[j]->Var(inlinks[i])->GetMutable<Tensor>();
// The input of operators of each step is Tensor here. // The input of operators of each step is Tensor here.
// Maybe need to modify Slice function. // Maybe need to modify Slice function.
*step_input = input->Slice<float>(j, j + 1); *step_input = input->Slice<float>(j, j + 1);
......
...@@ -145,16 +145,16 @@ void BindBlockDesc(py::module &m) { ...@@ -145,16 +145,16 @@ void BindBlockDesc(py::module &m) {
py::return_value_policy::reference) py::return_value_policy::reference)
.def("prepend_op", &BlockDescBind::PrependOp, .def("prepend_op", &BlockDescBind::PrependOp,
py::return_value_policy::reference) py::return_value_policy::reference)
.def("new_var", .def("var",
[](BlockDescBind &self, py::bytes byte_name) { [](BlockDescBind &self, py::bytes byte_name) {
std::string name = byte_name; std::string name = byte_name;
return self.NewVar(name); return self.Var(name);
}, },
py::return_value_policy::reference) py::return_value_policy::reference)
.def("var", .def("find_var",
[](BlockDescBind &self, py::bytes byte_name) { [](BlockDescBind &self, py::bytes byte_name) {
std::string name = byte_name; std::string name = byte_name;
return self.Var(name); return self.FindVar(name);
}, },
py::return_value_policy::reference) py::return_value_policy::reference)
.def("all_vars", &BlockDescBind::AllVars, .def("all_vars", &BlockDescBind::AllVars,
......
...@@ -164,9 +164,9 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -164,9 +164,9 @@ All parameter, weight, gradient are variables in Paddle.
py::return_value_policy::reference); py::return_value_policy::reference);
py::class_<Scope>(m, "Scope", "") py::class_<Scope>(m, "Scope", "")
.def("new_var", .def("var",
[](Scope &self, const std::string &name) -> Variable * { [](Scope &self, const std::string &name) -> Variable * {
return self.NewVar(name); return self.Var(name);
}, },
py::return_value_policy::reference) py::return_value_policy::reference)
.def("find_var", &Scope::FindVar, py::return_value_policy::reference) .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
......
...@@ -5,7 +5,7 @@ Default scope function. ...@@ -5,7 +5,7 @@ Default scope function.
thread-local stack of Scope. Top of that stack is current scope, the bottom thread-local stack of Scope. Top of that stack is current scope, the bottom
of that stack is all scopes' parent. of that stack is all scopes' parent.
Invoking `new_var/find_var` can `new/find` variable in current scope. Invoking `var/find_var` can `new/find` variable in current scope.
Invoking `enter_local_scope/leave_local_scope` can create or destroy local Invoking `enter_local_scope/leave_local_scope` can create or destroy local
scope. scope.
...@@ -19,7 +19,7 @@ import threading ...@@ -19,7 +19,7 @@ import threading
__tl_scope__ = threading.local() __tl_scope__ = threading.local()
__all__ = [ __all__ = [
'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'new_var', 'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'var',
'find_var', 'scoped_function' 'find_var', 'scoped_function'
] ]
...@@ -54,11 +54,11 @@ def leave_local_scope(): ...@@ -54,11 +54,11 @@ def leave_local_scope():
get_cur_scope().drop_kids() get_cur_scope().drop_kids()
def new_var(name): def var(name):
""" """
create variable in current scope. create variable in current scope.
""" """
return get_cur_scope().new_var(name) return get_cur_scope().var(name)
def find_var(name): def find_var(name):
......
...@@ -20,11 +20,11 @@ class Variable(object): ...@@ -20,11 +20,11 @@ class Variable(object):
if name is None: if name is None:
name = Variable._unique_var_name_() name = Variable._unique_var_name_()
try: is_new_var = False
self.desc = self.block.desc.find_var(name)
if self.desc is None:
self.desc = self.block.desc.var(name) self.desc = self.block.desc.var(name)
is_new_var = False
except core.EnforceNotMet:
self.desc = self.block.desc.new_var(name)
is_new_var = True is_new_var = True
if is_new_var: if is_new_var:
......
...@@ -14,7 +14,7 @@ def create_op(scope, op_type, inputs, outputs, attrs): ...@@ -14,7 +14,7 @@ def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict() kwargs = dict()
def __create_var__(name, var_name): def __create_var__(name, var_name):
scope.new_var(var_name) scope.var(var_name)
kwargs[name].append(var_name) kwargs[name].append(var_name)
for in_name, in_dup in Operator.get_op_inputs(op_type): for in_name, in_dup in Operator.get_op_inputs(op_type):
...@@ -71,7 +71,7 @@ def set_input(scope, op, inputs, place): ...@@ -71,7 +71,7 @@ def set_input(scope, op, inputs, place):
def set_output_grad(scope, op, outputs, place): def set_output_grad(scope, op, outputs, place):
def __set_tensor__(name): def __set_tensor__(name):
out_tensor = scope.find_var(name).get_tensor() out_tensor = scope.find_var(name).get_tensor()
grad_tensor = scope.new_var(grad_var_name(name)).get_tensor() grad_tensor = scope.var(grad_var_name(name)).get_tensor()
out_dtype = out_tensor.dtype() out_dtype = out_tensor.dtype()
if out_dtype == core.DataType.FP64: if out_dtype == core.DataType.FP64:
data = np.ones(out_tensor.shape(), dtype=np.float64) data = np.ones(out_tensor.shape(), dtype=np.float64)
...@@ -169,10 +169,10 @@ def get_numeric_gradient(scope, ...@@ -169,10 +169,10 @@ def get_numeric_gradient(scope,
def get_backward_op(scope, op, no_grad_set): def get_backward_op(scope, op, no_grad_set):
backward_op = core.Operator.backward(op, no_grad_set) backward_op = core.Operator.backward(op, no_grad_set)
for input in backward_op.input_vars(): for input in backward_op.input_vars():
var = scope.new_var(input) var = scope.var(input)
var.get_tensor() var.get_tensor()
for output in backward_op.output_vars(): for output in backward_op.output_vars():
var = scope.new_var(output) var = scope.var(output)
var.get_tensor() var.get_tensor()
return backward_op return backward_op
......
...@@ -39,7 +39,7 @@ class PySimpleCondTest(unittest.TestCase): ...@@ -39,7 +39,7 @@ class PySimpleCondTest(unittest.TestCase):
def create_tensor(scope, name, shape, np_data): def create_tensor(scope, name, shape, np_data):
tensor = scope.new_var(name).get_tensor() tensor = scope.var(name).get_tensor()
tensor.set_dims(shape) tensor.set_dims(shape)
tensor.set(np_data, core.CPUPlace()) tensor.set(np_data, core.CPUPlace())
return tensor return tensor
...@@ -74,9 +74,9 @@ class TestCondOp(unittest.TestCase): ...@@ -74,9 +74,9 @@ class TestCondOp(unittest.TestCase):
create_tensor(self.scope, "X", [10, 1], x_np_data) create_tensor(self.scope, "X", [10, 1], x_np_data)
cond_np_data = self.py_cond.cond.astype("int32") cond_np_data = self.py_cond.cond.astype("int32")
create_tensor(self.scope, "cond", [10, 1], cond_np_data) create_tensor(self.scope, "cond", [10, 1], cond_np_data)
self.scope.new_var("SubScopes") self.scope.var("SubScopes")
self.scope.new_var("IndexTensors") self.scope.var("IndexTensors")
self.scope.new_var("Out") self.scope.var("Out")
def create_cond_op(self): def create_cond_op(self):
self.condop = CondOp( self.condop = CondOp(
......
...@@ -10,7 +10,7 @@ class TestDefaultScopeFuncs(unittest.TestCase): ...@@ -10,7 +10,7 @@ class TestDefaultScopeFuncs(unittest.TestCase):
self.assertIsNone(find_var("test")) self.assertIsNone(find_var("test"))
def test_create_var_get_var(self): def test_create_var_get_var(self):
var_a = new_var("var_a") var_a = var("var_a")
self.assertIsNotNone(var_a) self.assertIsNotNone(var_a)
self.assertIsNotNone(get_cur_scope().find_var('var_a')) self.assertIsNotNone(get_cur_scope().find_var('var_a'))
enter_local_scope() enter_local_scope()
...@@ -19,7 +19,7 @@ class TestDefaultScopeFuncs(unittest.TestCase): ...@@ -19,7 +19,7 @@ class TestDefaultScopeFuncs(unittest.TestCase):
def test_var_get_int(self): def test_var_get_int(self):
def __new_scope__(): def __new_scope__():
i = new_var("var_i") i = var("var_i")
self.assertFalse(i.is_int()) self.assertFalse(i.is_int())
i.set_int(10) i.set_int(10)
self.assertTrue(i.is_int()) self.assertTrue(i.is_int())
......
...@@ -6,7 +6,7 @@ import numpy as np ...@@ -6,7 +6,7 @@ import numpy as np
def create_tensor(scope, name, shape, np_data): def create_tensor(scope, name, shape, np_data):
tensor = scope.new_var(name).get_tensor() tensor = scope.var(name).get_tensor()
tensor.set_dims(shape) tensor.set_dims(shape)
tensor.set(np_data, core.CPUPlace()) tensor.set(np_data, core.CPUPlace())
return tensor return tensor
...@@ -72,8 +72,8 @@ class DynamicRecurrentOpTest(unittest.TestCase): ...@@ -72,8 +72,8 @@ class DynamicRecurrentOpTest(unittest.TestCase):
create_tensor(self.scope, "U", [self.input_dim, self.input_dim], U) create_tensor(self.scope, "U", [self.input_dim, self.input_dim], U)
create_tensor(self.scope, "h_boot", [self.num_sents, self.input_dim], create_tensor(self.scope, "h_boot", [self.num_sents, self.input_dim],
h_boot) h_boot)
self.scope.new_var("step_scopes") self.scope.var("step_scopes")
self.scope.new_var("h@mem") self.scope.var("h@mem")
def create_rnn_op(self): def create_rnn_op(self):
# create RNNOp # create RNNOp
......
...@@ -14,7 +14,7 @@ class TestGaussianRandomOp(unittest.TestCase): ...@@ -14,7 +14,7 @@ class TestGaussianRandomOp(unittest.TestCase):
def gaussian_random_test(self, place): def gaussian_random_test(self, place):
scope = core.Scope() scope = core.Scope()
scope.new_var('Out').get_tensor() scope.var('Out').get_tensor()
op = Operator( op = Operator(
"gaussian_random", "gaussian_random",
......
...@@ -13,14 +13,14 @@ class TestInferShape(unittest.TestCase): ...@@ -13,14 +13,14 @@ class TestInferShape(unittest.TestCase):
shape = [10, 20] shape = [10, 20]
# prepare input/output # prepare input/output
x1 = block.new_var("x1") x1 = block.var("x1")
x1.set_type(core.VarDesc.VarType.LOD_TENSOR) x1.set_type(core.VarDesc.VarType.LOD_TENSOR)
x1.set_shape(shape) x1.set_shape(shape)
x2 = block.new_var("x2") x2 = block.var("x2")
x2.set_type(core.VarDesc.VarType.LOD_TENSOR) x2.set_type(core.VarDesc.VarType.LOD_TENSOR)
x2.set_shape(shape) x2.set_shape(shape)
out = block.new_var("out") out = block.var("out")
out.set_type(core.VarDesc.VarType.LOD_TENSOR) out.set_type(core.VarDesc.VarType.LOD_TENSOR)
# prepare the operator # prepare the operator
...@@ -42,14 +42,14 @@ class TestInferShape(unittest.TestCase): ...@@ -42,14 +42,14 @@ class TestInferShape(unittest.TestCase):
y_shape = [20, 30] y_shape = [20, 30]
# prepare input/output # prepare input/output
x1 = block.new_var("x") x1 = block.var("x")
x1.set_type(core.VarDesc.VarType.LOD_TENSOR) x1.set_type(core.VarDesc.VarType.LOD_TENSOR)
x1.set_shape(x_shape) x1.set_shape(x_shape)
x2 = block.new_var("y") x2 = block.var("y")
x2.set_type(core.VarDesc.VarType.LOD_TENSOR) x2.set_type(core.VarDesc.VarType.LOD_TENSOR)
x2.set_shape(y_shape) x2.set_shape(y_shape)
out = block.new_var("out") out = block.var("out")
out.set_type(core.VarDesc.VarType.LOD_TENSOR) out.set_type(core.VarDesc.VarType.LOD_TENSOR)
# prepare the operator # prepare the operator
......
...@@ -31,7 +31,7 @@ uniq_id = atomic_id().next ...@@ -31,7 +31,7 @@ uniq_id = atomic_id().next
def data_layer(name, dims): def data_layer(name, dims):
var = scope.new_var(name) var = scope.var(name)
tensor = var.get_tensor() tensor = var.get_tensor()
tensor.set_dims(dims) # 1 is batch size holder. tensor.set_dims(dims) # 1 is batch size holder.
return name return name
...@@ -67,7 +67,7 @@ def sgd_optimizer(net, param_name, learning_rate=0.005): ...@@ -67,7 +67,7 @@ def sgd_optimizer(net, param_name, learning_rate=0.005):
# should use operator and add these to the init_network # should use operator and add these to the init_network
def init_param(net, param_name, dims): def init_param(net, param_name, dims):
scope.new_var(param_name) scope.var(param_name)
op = Operator( op = Operator(
"uniform_random", Out=param_name, dims=dims, min=-0.5, max=0.5, seed=10) "uniform_random", Out=param_name, dims=dims, min=-0.5, max=0.5, seed=10)
op.infer_shape(scope) op.infer_shape(scope)
...@@ -104,7 +104,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None): ...@@ -104,7 +104,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
sgd_optimizer(net=optimize_net, param_name=w_name, learning_rate=0.01) sgd_optimizer(net=optimize_net, param_name=w_name, learning_rate=0.01)
pre_activation = name + ".mul.out" pre_activation = name + ".mul.out"
scope.new_var(pre_activation) scope.var(pre_activation)
mul_op = Operator("mul", X=input, Y=w_name, Out=pre_activation) mul_op = Operator("mul", X=input, Y=w_name, Out=pre_activation)
net.append_op(mul_op) net.append_op(mul_op)
...@@ -115,7 +115,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None): ...@@ -115,7 +115,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
sgd_optimizer( sgd_optimizer(
net=optimize_net, param_name=bias_name, learning_rate=0.001) net=optimize_net, param_name=bias_name, learning_rate=0.001)
bias_out = name + ".rowwise_add.out" bias_out = name + ".rowwise_add.out"
scope.new_var(bias_out) scope.var(bias_out)
rowwise_append_op = Operator( rowwise_append_op = Operator(
"rowwise_add", X=pre_activation, b=bias_name, Out=bias_out) "rowwise_add", X=pre_activation, b=bias_name, Out=bias_out)
net.append_op(rowwise_append_op) net.append_op(rowwise_append_op)
...@@ -123,7 +123,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None): ...@@ -123,7 +123,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
activation_op = Operator(act, X=pre_activation, Y=name) activation_op = Operator(act, X=pre_activation, Y=name)
net.append_op(activation_op) net.append_op(activation_op)
scope.new_var(name) scope.var(name)
net.infer_shape(scope) net.infer_shape(scope)
return name return name
...@@ -133,7 +133,7 @@ def cross_entropy_layer(net, input, label): ...@@ -133,7 +133,7 @@ def cross_entropy_layer(net, input, label):
cross_entropy_op = Operator( cross_entropy_op = Operator(
"cross_entropy", X=input, Label=label, Y=cost_name) "cross_entropy", X=input, Label=label, Y=cost_name)
net.append_op(cross_entropy_op) net.append_op(cross_entropy_op)
scope.new_var(cost_name) scope.var(cost_name)
net.infer_shape(scope) net.infer_shape(scope)
return cost_name return cost_name
...@@ -141,10 +141,10 @@ def cross_entropy_layer(net, input, label): ...@@ -141,10 +141,10 @@ def cross_entropy_layer(net, input, label):
def create_backward_net(forward_net): def create_backward_net(forward_net):
net = core.Operator.backward(forward_net, set()) net = core.Operator.backward(forward_net, set())
for input in net.inputs()["all"]: for input in net.inputs()["all"]:
var = scope.new_var(input) var = scope.var(input)
var.get_tensor() var.get_tensor()
for output in net.outputs()["all"]: for output in net.outputs()["all"]:
var = scope.new_var(output) var = scope.var(output)
var.get_tensor() var.get_tensor()
return net return net
......
...@@ -51,7 +51,7 @@ class TestProgram(unittest.TestCase): ...@@ -51,7 +51,7 @@ class TestProgram(unittest.TestCase):
sum_op_desc.set_input("Y", ["b1"]) sum_op_desc.set_input("Y", ["b1"])
sum_op_desc.set_output("Out", ["out2"]) sum_op_desc.set_output("Out", ["out2"])
target = block.new_var("out2") target = block.var("out2")
expect_ops = [ expect_ops = [
"mul", "elementwise_add", "fill_constant", "elementwise_add_grad", "mul", "elementwise_add", "fill_constant", "elementwise_add_grad",
......
...@@ -93,7 +93,7 @@ class TestVarDesc(unittest.TestCase): ...@@ -93,7 +93,7 @@ class TestVarDesc(unittest.TestCase):
def test_shape(self): def test_shape(self):
program_desc = core.ProgramDesc.__create_program_desc__() program_desc = core.ProgramDesc.__create_program_desc__()
block = program_desc.block(0) block = program_desc.block(0)
var = block.new_var('my_var') var = block.var('my_var')
var.set_type(core.VarDesc.VarType.SELECTED_ROWS) var.set_type(core.VarDesc.VarType.SELECTED_ROWS)
src_shape = [3, 2, 10, 8] src_shape = [3, 2, 10, 8]
var.set_shape(src_shape) var.set_shape(src_shape)
...@@ -104,7 +104,7 @@ class TestVarDesc(unittest.TestCase): ...@@ -104,7 +104,7 @@ class TestVarDesc(unittest.TestCase):
def test_data_type(self): def test_data_type(self):
program_desc = core.ProgramDesc.__create_program_desc__() program_desc = core.ProgramDesc.__create_program_desc__()
block = program_desc.block(0) block = program_desc.block(0)
var = block.new_var('my_var') var = block.var('my_var')
var.set_type(core.VarDesc.VarType.LOD_TENSOR) var.set_type(core.VarDesc.VarType.LOD_TENSOR)
var.set_data_type(core.DataType.INT32) var.set_data_type(core.DataType.INT32)
self.assertEqual(core.DataType.INT32, var.data_type()) self.assertEqual(core.DataType.INT32, var.data_type())
...@@ -117,12 +117,12 @@ class TestBlockDesc(unittest.TestCase): ...@@ -117,12 +117,12 @@ class TestBlockDesc(unittest.TestCase):
self.assertIsNotNone(prog) self.assertIsNotNone(prog)
block = prog.block(0) block = prog.block(0)
self.assertIsNotNone(block) self.assertIsNotNone(block)
var1 = block.new_var("var1") var1 = block.var("var1")
var2 = block.new_var("var2") var2 = block.var("var2")
var3 = block.new_var("var3") var3 = block.var("var3")
all_vars = block.all_vars() all_vars = block.all_vars()
self.assertEqual(set(all_vars), set([var1, var2, var3])) self.assertEqual(set(all_vars), set([var1, var2, var3]))
var2_re = block.var("var2") var2_re = block.find_var("var2")
self.assertEqual(var2_re, var2) self.assertEqual(var2_re, var2)
def test_add_op(self): def test_add_op(self):
......
...@@ -66,7 +66,7 @@ class PySimpleRNNTest(unittest.TestCase): ...@@ -66,7 +66,7 @@ class PySimpleRNNTest(unittest.TestCase):
def create_tensor(scope, name, shape, np_data): def create_tensor(scope, name, shape, np_data):
tensor = scope.new_var(name).get_tensor() tensor = scope.var(name).get_tensor()
tensor.set_dims(shape) tensor.set_dims(shape)
tensor.set(np_data, core.CPUPlace()) tensor.set(np_data, core.CPUPlace())
return tensor return tensor
...@@ -125,8 +125,8 @@ class RecurrentOpTest(unittest.TestCase): ...@@ -125,8 +125,8 @@ class RecurrentOpTest(unittest.TestCase):
h_boot_np_data = self.py_rnn.h_boot h_boot_np_data = self.py_rnn.h_boot
create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim], create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim],
h_boot_np_data) h_boot_np_data)
self.scope.new_var("step_scopes") self.scope.var("step_scopes")
self.scope.new_var("h@mem") self.scope.var("h@mem")
def create_rnn_op(self): def create_rnn_op(self):
# create RNNOp # create RNNOp
......
...@@ -18,7 +18,7 @@ class TestScope(unittest.TestCase): ...@@ -18,7 +18,7 @@ class TestScope(unittest.TestCase):
def test_create_var_get_var(self): def test_create_var_get_var(self):
paddle_c = paddle.v2.framework.core paddle_c = paddle.v2.framework.core
scope = paddle_c.Scope() scope = paddle_c.Scope()
var_a = scope.new_var("var_a") var_a = scope.var("var_a")
self.assertIsNotNone(var_a) self.assertIsNotNone(var_a)
self.assertIsNotNone(scope.find_var('var_a')) self.assertIsNotNone(scope.find_var('var_a'))
scope2 = scope.new_scope() scope2 = scope.new_scope()
...@@ -27,7 +27,7 @@ class TestScope(unittest.TestCase): ...@@ -27,7 +27,7 @@ class TestScope(unittest.TestCase):
def test_var_get_int(self): def test_var_get_int(self):
paddle_c = paddle.v2.framework.core paddle_c = paddle.v2.framework.core
scope = paddle_c.Scope() scope = paddle_c.Scope()
var = scope.new_var("test_int") var = scope.var("test_int")
var.set_int(10) var.set_int(10)
self.assertTrue(var.is_int()) self.assertTrue(var.is_int())
self.assertEqual(10, var.get_int()) self.assertEqual(10, var.get_int())
......
...@@ -6,7 +6,7 @@ import numpy ...@@ -6,7 +6,7 @@ import numpy
class TestTensor(unittest.TestCase): class TestTensor(unittest.TestCase):
def test_int_tensor(self): def test_int_tensor(self):
scope = core.Scope() scope = core.Scope()
var = scope.new_var("test_tensor") var = scope.var("test_tensor")
place = core.CPUPlace() place = core.CPUPlace()
tensor = var.get_tensor() tensor = var.get_tensor()
...@@ -25,7 +25,7 @@ class TestTensor(unittest.TestCase): ...@@ -25,7 +25,7 @@ class TestTensor(unittest.TestCase):
def test_float_tensor(self): def test_float_tensor(self):
scope = core.Scope() scope = core.Scope()
var = scope.new_var("test_tensor") var = scope.var("test_tensor")
place = core.CPUPlace() place = core.CPUPlace()
tensor = var.get_tensor() tensor = var.get_tensor()
...@@ -46,7 +46,7 @@ class TestTensor(unittest.TestCase): ...@@ -46,7 +46,7 @@ class TestTensor(unittest.TestCase):
def test_int_lod_tensor(self): def test_int_lod_tensor(self):
place = core.CPUPlace() place = core.CPUPlace()
scope = core.Scope() scope = core.Scope()
var_lod = scope.new_var("test_lod_tensor") var_lod = scope.var("test_lod_tensor")
lod_tensor = var_lod.get_tensor() lod_tensor = var_lod.get_tensor()
lod_tensor.set_dims([4, 4, 6]) lod_tensor.set_dims([4, 4, 6])
...@@ -68,7 +68,7 @@ class TestTensor(unittest.TestCase): ...@@ -68,7 +68,7 @@ class TestTensor(unittest.TestCase):
def test_float_lod_tensor(self): def test_float_lod_tensor(self):
place = core.CPUPlace() place = core.CPUPlace()
scope = core.Scope() scope = core.Scope()
var_lod = scope.new_var("test_lod_tensor") var_lod = scope.var("test_lod_tensor")
lod_tensor = var_lod.get_tensor() lod_tensor = var_lod.get_tensor()
lod_tensor.set_dims([5, 2, 3, 4]) lod_tensor.set_dims([5, 2, 3, 4])
......
...@@ -13,7 +13,7 @@ class TestTensorArray(unittest.TestCase): ...@@ -13,7 +13,7 @@ class TestTensorArray(unittest.TestCase):
# create a LoDTensor # create a LoDTensor
self.scope = core.Scope() self.scope = core.Scope()
var = self.scope.new_var("test_tensor") var = self.scope.var("test_tensor")
self.place = core.CPUPlace() self.place = core.CPUPlace()
tensor = var.get_tensor() tensor = var.get_tensor()
tensor.set_dims([self.batch_size, self.dim]) tensor.set_dims([self.batch_size, self.dim])
...@@ -51,7 +51,7 @@ class TestTensorArray(unittest.TestCase): ...@@ -51,7 +51,7 @@ class TestTensorArray(unittest.TestCase):
self.ta.unstack(self.tensor) self.ta.unstack(self.tensor)
# create a tensor with shape of [1, self.dim] # create a tensor with shape of [1, self.dim]
var = self.scope.new_var("hell") var = self.scope.var("hell")
tensor = var.get_tensor() tensor = var.get_tensor()
tensor.set_dims([1, self.dim]) tensor.set_dims([1, self.dim])
tensor.alloc_float(self.place) tensor.alloc_float(self.place)
...@@ -71,7 +71,7 @@ class TestTensorArray(unittest.TestCase): ...@@ -71,7 +71,7 @@ class TestTensorArray(unittest.TestCase):
self.ta.unstack(self.tensor) self.ta.unstack(self.tensor)
# create a tensor with shape of [1, self.dim] # create a tensor with shape of [1, self.dim]
var = self.scope.new_var("hell") var = self.scope.var("hell")
tensor = var.get_tensor() tensor = var.get_tensor()
tensor.set_dims([1, self.dim]) tensor.set_dims([1, self.dim])
tensor.alloc_float(self.place) tensor.alloc_float(self.place)
......
...@@ -14,7 +14,7 @@ class TestUniformRandomOp(unittest.TestCase): ...@@ -14,7 +14,7 @@ class TestUniformRandomOp(unittest.TestCase):
def uniform_random_test(self, place): def uniform_random_test(self, place):
scope = core.Scope() scope = core.Scope()
scope.new_var('X').get_tensor() scope.var('X').get_tensor()
op = Operator( op = Operator(
"uniform_random", "uniform_random",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册