diff --git a/doc/design/block.md b/doc/design/block.md index 9c812732d6ead76eb3aa2d1b617449c96807f21a..7cbf0d55b1faeb2093ee7cf234d1c2ad1905885b 100644 --- a/doc/design/block.md +++ b/doc/design/block.md @@ -243,7 +243,7 @@ class SymbolTable { // TODO determine whether name is generated by python or C++. // Currently assume that a unique name will be generated by C++ if the // argument name is left default. - VarDesc* NewVar(const string& name=""); + VarDesc* Var(const string& name=""); // find a VarDesc by name, if recursive is true, find parent's SymbolTable // recursively. diff --git a/doc/design/scope.md b/doc/design/scope.md index b1f9bb4378eb5ec6926f1e53f7c1f4fd5674064c..4da76eebb74abcd26ec2b8671399e6bc4fb58574 100644 --- a/doc/design/scope.md +++ b/doc/design/scope.md @@ -37,7 +37,7 @@ Scope is an association of a name to variable. All variables belong to `Scope`. ```cpp class Scope { public: - Variable* NewVar(const std::string& name); + Variable* Var(const std::string& name); const Variable* FindVar(const std::string& name) const; private: @@ -98,7 +98,7 @@ class Scope { Variable* FindVar(const std::string& name) const; // return if already contains same name variable. - Variable* NewVar(const std::string& name); + Variable* Var(const std::string& name); private: std::shared_ptr parent_; @@ -107,7 +107,7 @@ class Scope { ``` ## Only scope can create a variable -To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `NewVar` can construct `Variable`. +To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `Var` can construct `Variable`. ## When scope destroyed, all variables inside this scope should be destroyed together @@ -121,4 +121,4 @@ Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shar ## Orthogonal interface -`FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `NewVar` will return an `Error` when there is a name conflict locally. Combine `FindVar` and `NewVar`, we can implement `NewVar` easily. +`FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `Var` will return an `Error` when there is a name conflict locally. Combine `FindVar` and `Var`, we can implement `Var` easily. diff --git a/doc/design/tensor_array.md b/doc/design/tensor_array.md index 8378e97bf7cfaae54c36b1b92e202b16e4fe1e28..37e4f7b90f94fa3eb015e733999cd84c96b2239c 100644 --- a/doc/design/tensor_array.md +++ b/doc/design/tensor_array.md @@ -161,7 +161,7 @@ class TensorArray: @name: str the name of the variable to output. ''' - tensor = NewVar(name) + tensor = Var(name) tensor_array_stack(self.name, tensor) return tensor diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 719ac7c80a8c57d6a937e05fa0aefa8ba889ecde..ca33a9a50c4137a19e27f510cc91f20e9e9b8449 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -280,12 +280,21 @@ static void CreateGradVarInBlock( auto ops = block_desc->AllOps(); for (size_t op_index = grad_op_start_index; op_index < ops.size(); ++op_index) { + // <<<<<<< HEAD + // for (const auto& output : ops[op_index]->Outputs()) { + // for (const auto& real_output : output.second) { + // if (!block_desc->HasVar(real_output)) { + // block_desc->Var(real_output); + // } + // } + // } + // ======= ForEachVarName(ops[op_index]->Outputs(), [&](const std::string& grad_var_name) { if (block_desc->HasVar(grad_var_name)) { return false; } - block_desc->NewVar(grad_var_name); + block_desc->Var(grad_var_name); auto it = param_name_map.find(grad_var_name); if (it == param_name_map.end()) { return false; @@ -297,6 +306,7 @@ static void CreateGradVarInBlock( grad_record.op_idx_ = static_cast(op_index); return false; /* not break */ }); + // >>>>>>> origin/develop } } @@ -448,7 +458,7 @@ AppendBackward(ProgramDescBind& program_desc, const VarDescBind& target, for (auto& ptr : backward_op_descs) { all_ops.push_back(std::move(ptr)); } - root_block->NewVar(fill_one_op_out); + root_block->Var(fill_one_op_out); // create grad_var for all blocks in this program CreateGradVarInBlock(&retv, root_block, forward_op_num, grad_to_var); diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 4c39975ec94f95d3299efe58474d9db43654ec22..47b75228cdbd2a8b4f0c5ad33aa82f5e43044606 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -18,19 +18,22 @@ limitations under the License. */ namespace paddle { namespace framework { -VarDescBind *BlockDescBind::NewVar(const std::string &name) { +VarDescBind *BlockDescBind::Var(const std::string &name) { need_update_ = true; auto it = vars_.find(name); - PADDLE_ENFORCE(it == vars_.end(), "Duplicated variable %s", name); - auto var = new VarDescBind(name); + if (it != vars_.end()) { + return it->second.get(); + } + auto *var = new VarDescBind(name); vars_[name].reset(var); return var; } -VarDescBind *BlockDescBind::Var(const std::string &name) const { +VarDescBind *BlockDescBind::FindVar(const std::string &name) const { auto it = vars_.find(name); - PADDLE_ENFORCE(it != vars_.end(), - "Can not find variable %s in current block.", name); + if (it == vars_.end()) { + return nullptr; + } return it->second.get(); } diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index 50f88ec2f7f7b5f69c3a56db370b45f628498fe6..9fb88f963283c72e1ec389b72dd2d98049c74f6d 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -40,9 +40,9 @@ class BlockDescBind { int32_t Parent() const { return desc_->parent_idx(); } - VarDescBind *NewVar(const std::string &name_bytes); + VarDescBind *Var(const std::string &name_bytes); - VarDescBind *Var(const std::string &name_bytes) const; + VarDescBind *FindVar(const std::string &name_bytes) const; bool HasVar(const std::string &var_name) const; diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index c388b2198e4fbf75d6584d710e00d3deca93eb51..8e82e28bac478ad93ece3fcec9730c6cbabc392a 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -66,7 +66,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { // Instantiate all the vars in the global scope for (auto& var : block.vars()) { - scope->NewVar(var.name()); + scope->Var(var.name()); } Scope& local_scope = scope->NewScope(); @@ -78,7 +78,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { for (auto& var : block.ops(i).outputs()) { for (auto& argu : var.arguments()) { if (local_scope.FindVar(argu) == nullptr) { - local_scope.NewVar(argu); + local_scope.Var(argu); } } } diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 85312eaa926fe2daff6d9a01bfbbcfa105d03df5..fcd2e47cff57fcc6c177be77d7e14b167a28f4ae 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -46,10 +46,16 @@ void AddOp(const std::string& type, const VariableNameMap& inputs, // insert output for (auto kv : outputs) { for (auto v : kv.second) { + // <<<<<<< HEAD + // auto var = block->Var(v); + // var->SetType(VarDesc::LOD_TENSOR); + // var->SetDataType(paddle::framework::DataType::FP32); + // ======= if (!block->HasVar(v)) { - auto var = block->NewVar(v); + auto var = block->Var(v); var->SetDataType(paddle::framework::DataType::FP32); } + // >>>>>>> origin/develop } } diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 97a142d5f1661704fede858b28ff0d5487c66fab..cf15f9933ab3bc881add3d45b7ca17194a70e0f1 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -403,11 +403,11 @@ class CompileTimeInferShapeContext : public InferShapeContext { private: DDim GetDim(const std::string& name) const override { - return framework::make_ddim(block_.Var(name)->Shape()); + return framework::make_ddim(block_.FindVar(name)->Shape()); } void SetDim(const std::string& name, const DDim& dim) override { - block_.Var(name)->SetShape(framework::vectorize(dim)); + block_.FindVar(name)->SetShape(framework::vectorize(dim)); } const OpDescBind& op_; diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index a02f4668bca2360995cc05206f7f97e027db0907..d7890ac8d0af2171271a0cfccd356563c7604e72 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -84,7 +84,7 @@ TEST(OperatorBase, all) { paddle::framework::Scope scope; auto op = paddle::framework::OpRegistry::CreateOp(op_desc); - scope.NewVar("OUT1"); + scope.Var("OUT1"); ASSERT_EQ(paddle::framework::op_run_num, 0); op->Run(scope, device_context); ASSERT_EQ(paddle::framework::op_run_num, 1); @@ -237,12 +237,12 @@ TEST(OpKernel, multi_inputs) { paddle::platform::CPUDeviceContext cpu_device_context; paddle::framework::Scope scope; - scope.NewVar("x0")->GetMutable(); - scope.NewVar("x1")->GetMutable(); - scope.NewVar("x2")->GetMutable(); - scope.NewVar("k0")->GetMutable(); - scope.NewVar("y0")->GetMutable(); - scope.NewVar("y1")->GetMutable(); + scope.Var("x0")->GetMutable(); + scope.Var("x1")->GetMutable(); + scope.Var("x2")->GetMutable(); + scope.Var("k0")->GetMutable(); + scope.Var("y0")->GetMutable(); + scope.Var("y1")->GetMutable(); auto op = paddle::framework::OpRegistry::CreateOp(op_desc); op->Run(scope, cpu_device_context); diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index 5821bac928ed898971d61a3e2a86f59155d76991..8f8a53eec8f947b088124a3f034fedb17fd86a48 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -31,7 +31,7 @@ Scope& Scope::NewScope() const { return *kids_.back(); } -Variable* Scope::NewVar(const std::string& name) { +Variable* Scope::Var(const std::string& name) { auto iter = vars_.find(name); if (iter != vars_.end()) { return iter->second; @@ -42,8 +42,8 @@ Variable* Scope::NewVar(const std::string& name) { return v; } -Variable* Scope::NewVar() { - return NewVar(string::Sprintf("%p.%d", this, vars_.size())); +Variable* Scope::Var() { + return Var(string::Sprintf("%p.%d", this, vars_.size())); } Variable* Scope::FindVar(const std::string& name) const { @@ -71,8 +71,8 @@ framework::Scope& GetGlobalScope() { static std::unique_ptr g_scope{nullptr}; std::call_once(feed_variable_flag, [&]() { g_scope.reset(new framework::Scope()); - g_scope->NewVar("feed_value"); - g_scope->NewVar("fetch_value"); + g_scope->Var("feed_value"); + g_scope->Var("fetch_value"); }); return *(g_scope.get()); } diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index a8cfb107c25ccd62039db7349cc1c1dbff772f39..a7fce3514b163d78bf96b3cc19d188744a383395 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -45,10 +45,10 @@ class Scope { Scope& NewScope() const; /// Create a variable with given name if it doesn't exist. - Variable* NewVar(const std::string& name); + Variable* Var(const std::string& name); /// Create a variable with a scope-unique name. - Variable* NewVar(); + Variable* Var(); /// Find a variable in the scope or any of its ancestors. Returns /// nullptr if cannot find. diff --git a/paddle/framework/scope_test.cc b/paddle/framework/scope_test.cc index 9d51e355b0f6336d2f875ff2d77266b261baf5ac..7cc5e3510d978fae81d1e36da7ca35d4b3a04098 100644 --- a/paddle/framework/scope_test.cc +++ b/paddle/framework/scope_test.cc @@ -23,8 +23,8 @@ TEST(Scope, VarsShadowing) { Scope& ss1 = s.NewScope(); Scope& ss2 = s.NewScope(); - Variable* v0 = s.NewVar("a"); - Variable* v1 = ss1.NewVar("a"); + Variable* v0 = s.Var("a"); + Variable* v1 = ss1.Var("a"); EXPECT_NE(v0, v1); @@ -40,7 +40,7 @@ TEST(Scope, FindVar) { EXPECT_EQ(nullptr, s.FindVar("a")); EXPECT_EQ(nullptr, ss.FindVar("a")); - ss.NewVar("a"); + ss.Var("a"); EXPECT_EQ(nullptr, s.FindVar("a")); EXPECT_NE(nullptr, ss.FindVar("a")); @@ -49,7 +49,7 @@ TEST(Scope, FindVar) { TEST(Scope, FindScope) { Scope s; Scope& ss = s.NewScope(); - Variable* v = s.NewVar("a"); + Variable* v = s.Var("a"); EXPECT_EQ(&s, s.FindScope(v)); EXPECT_EQ(&s, ss.FindScope(v)); diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index 2737104a205cbc1e18ce4a3a45592a416d38a874..adcd867f502d166f851926fde602dbb3fed9b48e 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -134,7 +134,7 @@ void CondOp::PrepareDataForSubnet( for (int i = 0; i < BRANCH_NUM; ++i) { for (auto& output : (*sub_net_op_[i]).Outputs()) { for (auto& var_name : output.second) { - sub_scopes[i]->NewVar(var_name); + sub_scopes[i]->Var(var_name); } } } diff --git a/paddle/operators/dynamic_recurrent_op.cc b/paddle/operators/dynamic_recurrent_op.cc index 58a5bf3e3651c963eead6dc0b8a3497c65b0eff2..03f33e28d49fdaeccb9b6266359e0b41a1cb847f 100644 --- a/paddle/operators/dynamic_recurrent_op.cc +++ b/paddle/operators/dynamic_recurrent_op.cc @@ -30,7 +30,7 @@ namespace detail { inline void CreateVariables(Scope& scope, const std::vector& var_names) { for (const auto& name : var_names) { - scope.NewVar(name); + scope.Var(name); } } @@ -136,7 +136,7 @@ void DynamicRecurrentOp::WriteStepInputs() const { auto& step_scope = cache_.GetScope(step); Variable* var = step_scope.FindVar(item.first); if (var == nullptr) { - var = step_scope.NewVar(item.first); + var = step_scope.Var(item.first); } var->GetMutable()->ShareDataWith(tensor); } diff --git a/paddle/operators/dynamic_recurrent_op_test.cc b/paddle/operators/dynamic_recurrent_op_test.cc index b849c4541da5d9812f4d86430049c0cbc04f385d..83a5ba36d9af2ef81ebcbb33e056de2e0b98cbc1 100644 --- a/paddle/operators/dynamic_recurrent_op_test.cc +++ b/paddle/operators/dynamic_recurrent_op_test.cc @@ -36,7 +36,7 @@ void OpDescNewVar(const std::string& param_name, // create a LoD tensor in scope with specific dims LoDTensor* CreateVar(Scope& scope, std::string name, framework::DDim dims, const platform::Place& place) { - auto* var = scope.NewVar(name); + auto* var = scope.Var(name); auto* tensor = var->GetMutable(); tensor->Resize(dims); tensor->mutable_data(place); @@ -85,7 +85,7 @@ class DynamicRecurrentOpTestHelper : public ::testing::Test { void CreateGlobalVariables() { platform::CPUPlace place; - scope.NewVar("step_scopes"); + scope.Var("step_scopes"); CreateVar(scope, "boot_mem", framework::make_ddim({10, 20}), place); CreateVar(scope, "out0", framework::make_ddim({10, 20}), place); auto* in0 = CreateVar(scope, "in0", framework::make_ddim({10, 8}), place); diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 00647f55f79d54602f8e755dba059dfaacc9f41e..e3d08378c2f29fa5d84c24ae7cebfcb0e7a53b25 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -70,14 +70,14 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope, // the weight are located in parent scope for (auto& var_name : input.second) { if (!step_scope.FindVar(var_name)) { - step_scope.NewVar(var_name)->GetMutable(); + step_scope.Var(var_name)->GetMutable(); } } } // create stepnet's outputs for (const auto& output : (*stepnet_)->Outputs()) { for (auto& var_name : output.second) { - step_scope.NewVar(var_name); + step_scope.Var(var_name); } } step_scopes->emplace_back(&step_scope); @@ -87,7 +87,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope, void RecurrentAlgorithm::InitMemories(Scope* step_scope) const { for (auto& attr : arg_->memories) { - auto* pre_mem = step_scope->NewVar(attr.pre_var)->GetMutable(); + auto* pre_mem = step_scope->Var(attr.pre_var)->GetMutable(); PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, "memory [%s]'s boot variable [%s] not exists", attr.var, attr.boot_var); @@ -167,9 +167,9 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients( "memory variable [%s] does not exists", attr.var); PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, "boot variable [%s] does not exists", attr.boot_var); - auto* mem_grad = step_scope->NewVar(attr.var)->GetMutable(); + auto* mem_grad = step_scope->Var(attr.var)->GetMutable(); auto* boot_mem_grad = - step_scope->NewVar(attr.boot_var)->GetMutable(); + step_scope->Var(attr.boot_var)->GetMutable(); boot_mem_grad->Resize(mem_grad->dims()); boot_mem_grad->ShareDataWith(*mem_grad); } diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index d264664a99e2af88fc2c35f50476ed4722a9eea0..30b8ddeb5bc4220e261a5c37ac195b0348fef936 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -40,7 +40,7 @@ void SegmentInputs(const std::vector& step_scopes, f::DDim step_dims = slice_ddim(dims, 1, dims.size()); for (size_t j = 0; j < seq_len; j++) { Tensor* step_input = - step_scopes[j]->NewVar(inlinks[i])->GetMutable(); + step_scopes[j]->Var(inlinks[i])->GetMutable(); // The input of operators of each step is Tensor here. // Maybe need to modify Slice function. *step_input = input->Slice(j, j + 1); diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index 2acfc28b66456c4ecf159bc6a714c939e98ecd24..b6327f8500bbbb66575d3bc928b38ab208296a44 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -145,16 +145,16 @@ void BindBlockDesc(py::module &m) { py::return_value_policy::reference) .def("prepend_op", &BlockDescBind::PrependOp, py::return_value_policy::reference) - .def("new_var", + .def("var", [](BlockDescBind &self, py::bytes byte_name) { std::string name = byte_name; - return self.NewVar(name); + return self.Var(name); }, py::return_value_policy::reference) - .def("var", + .def("find_var", [](BlockDescBind &self, py::bytes byte_name) { std::string name = byte_name; - return self.Var(name); + return self.FindVar(name); }, py::return_value_policy::reference) .def("all_vars", &BlockDescBind::AllVars, diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index cc9f7ffe02781cc13105b19bb987207743febdf6..b143cb9f59d3b734bcbec30c8a69dc85b5a10c97 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -164,9 +164,9 @@ All parameter, weight, gradient are variables in Paddle. py::return_value_policy::reference); py::class_(m, "Scope", "") - .def("new_var", + .def("var", [](Scope &self, const std::string &name) -> Variable * { - return self.NewVar(name); + return self.Var(name); }, py::return_value_policy::reference) .def("find_var", &Scope::FindVar, py::return_value_policy::reference) diff --git a/python/paddle/v2/framework/default_scope_funcs.py b/python/paddle/v2/framework/default_scope_funcs.py index 1b5580c8b30f69016f187b1d8710a57b5f7cfa9f..c07f9a6ab96ac86fd6d20fbe0bc560845107f063 100644 --- a/python/paddle/v2/framework/default_scope_funcs.py +++ b/python/paddle/v2/framework/default_scope_funcs.py @@ -5,7 +5,7 @@ Default scope function. thread-local stack of Scope. Top of that stack is current scope, the bottom of that stack is all scopes' parent. -Invoking `new_var/find_var` can `new/find` variable in current scope. +Invoking `var/find_var` can `new/find` variable in current scope. Invoking `enter_local_scope/leave_local_scope` can create or destroy local scope. @@ -19,7 +19,7 @@ import threading __tl_scope__ = threading.local() __all__ = [ - 'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'new_var', + 'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'var', 'find_var', 'scoped_function' ] @@ -54,11 +54,11 @@ def leave_local_scope(): get_cur_scope().drop_kids() -def new_var(name): +def var(name): """ create variable in current scope. """ - return get_cur_scope().new_var(name) + return get_cur_scope().var(name) def find_var(name): diff --git a/python/paddle/v2/framework/framework.py b/python/paddle/v2/framework/framework.py index 01cd9982dc1c8d9869e59c55d0061abef91919ef..acc61e66da1a55ec0123b195ebb52ce0858cdf5e 100644 --- a/python/paddle/v2/framework/framework.py +++ b/python/paddle/v2/framework/framework.py @@ -20,11 +20,11 @@ class Variable(object): if name is None: name = Variable._unique_var_name_() - try: + is_new_var = False + self.desc = self.block.desc.find_var(name) + + if self.desc is None: self.desc = self.block.desc.var(name) - is_new_var = False - except core.EnforceNotMet: - self.desc = self.block.desc.new_var(name) is_new_var = True if is_new_var: diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 81067f38bbf64ac1ab4ccf02aa43b0a38b7d48ad..215fa0b94e423755b7bc3f05a2b14a8c85451202 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -14,7 +14,7 @@ def create_op(scope, op_type, inputs, outputs, attrs): kwargs = dict() def __create_var__(name, var_name): - scope.new_var(var_name) + scope.var(var_name) kwargs[name].append(var_name) for in_name, in_dup in Operator.get_op_inputs(op_type): @@ -71,7 +71,7 @@ def set_input(scope, op, inputs, place): def set_output_grad(scope, op, outputs, place): def __set_tensor__(name): out_tensor = scope.find_var(name).get_tensor() - grad_tensor = scope.new_var(grad_var_name(name)).get_tensor() + grad_tensor = scope.var(grad_var_name(name)).get_tensor() out_dtype = out_tensor.dtype() if out_dtype == core.DataType.FP64: data = np.ones(out_tensor.shape(), dtype=np.float64) @@ -169,10 +169,10 @@ def get_numeric_gradient(scope, def get_backward_op(scope, op, no_grad_set): backward_op = core.Operator.backward(op, no_grad_set) for input in backward_op.input_vars(): - var = scope.new_var(input) + var = scope.var(input) var.get_tensor() for output in backward_op.output_vars(): - var = scope.new_var(output) + var = scope.var(output) var.get_tensor() return backward_op diff --git a/python/paddle/v2/framework/tests/test_cond_op.py b/python/paddle/v2/framework/tests/test_cond_op.py index 76323b5e10c59822b4de82a70ebd57b3e57c8392..2c7bcc4be46683ed9871b888c9dbabf27887be29 100644 --- a/python/paddle/v2/framework/tests/test_cond_op.py +++ b/python/paddle/v2/framework/tests/test_cond_op.py @@ -39,7 +39,7 @@ class PySimpleCondTest(unittest.TestCase): def create_tensor(scope, name, shape, np_data): - tensor = scope.new_var(name).get_tensor() + tensor = scope.var(name).get_tensor() tensor.set_dims(shape) tensor.set(np_data, core.CPUPlace()) return tensor @@ -74,9 +74,9 @@ class TestCondOp(unittest.TestCase): create_tensor(self.scope, "X", [10, 1], x_np_data) cond_np_data = self.py_cond.cond.astype("int32") create_tensor(self.scope, "cond", [10, 1], cond_np_data) - self.scope.new_var("SubScopes") - self.scope.new_var("IndexTensors") - self.scope.new_var("Out") + self.scope.var("SubScopes") + self.scope.var("IndexTensors") + self.scope.var("Out") def create_cond_op(self): self.condop = CondOp( diff --git a/python/paddle/v2/framework/tests/test_default_scope_funcs.py b/python/paddle/v2/framework/tests/test_default_scope_funcs.py index 495863c4562b5a2d6755fb02e21a6b0c845fd7b6..09a9850d054e3d7e6bf6db363fc577bdff8e9f43 100644 --- a/python/paddle/v2/framework/tests/test_default_scope_funcs.py +++ b/python/paddle/v2/framework/tests/test_default_scope_funcs.py @@ -10,7 +10,7 @@ class TestDefaultScopeFuncs(unittest.TestCase): self.assertIsNone(find_var("test")) def test_create_var_get_var(self): - var_a = new_var("var_a") + var_a = var("var_a") self.assertIsNotNone(var_a) self.assertIsNotNone(get_cur_scope().find_var('var_a')) enter_local_scope() @@ -19,7 +19,7 @@ class TestDefaultScopeFuncs(unittest.TestCase): def test_var_get_int(self): def __new_scope__(): - i = new_var("var_i") + i = var("var_i") self.assertFalse(i.is_int()) i.set_int(10) self.assertTrue(i.is_int()) diff --git a/python/paddle/v2/framework/tests/test_dynamic_recurrent_op.py b/python/paddle/v2/framework/tests/test_dynamic_recurrent_op.py index b4629a3adb9a84470843214c7c6d80acde7228cc..2b01e43454e70c12b423db9925837cf336f79935 100644 --- a/python/paddle/v2/framework/tests/test_dynamic_recurrent_op.py +++ b/python/paddle/v2/framework/tests/test_dynamic_recurrent_op.py @@ -6,7 +6,7 @@ import numpy as np def create_tensor(scope, name, shape, np_data): - tensor = scope.new_var(name).get_tensor() + tensor = scope.var(name).get_tensor() tensor.set_dims(shape) tensor.set(np_data, core.CPUPlace()) return tensor @@ -72,8 +72,8 @@ class DynamicRecurrentOpTest(unittest.TestCase): create_tensor(self.scope, "U", [self.input_dim, self.input_dim], U) create_tensor(self.scope, "h_boot", [self.num_sents, self.input_dim], h_boot) - self.scope.new_var("step_scopes") - self.scope.new_var("h@mem") + self.scope.var("step_scopes") + self.scope.var("h@mem") def create_rnn_op(self): # create RNNOp diff --git a/python/paddle/v2/framework/tests/test_gaussian_random_op.py b/python/paddle/v2/framework/tests/test_gaussian_random_op.py index cff5080048bbd34782e52d8b2b7690176f996c99..8b7779667d5e806c06b333527f774c7987ce7e73 100644 --- a/python/paddle/v2/framework/tests/test_gaussian_random_op.py +++ b/python/paddle/v2/framework/tests/test_gaussian_random_op.py @@ -14,7 +14,7 @@ class TestGaussianRandomOp(unittest.TestCase): def gaussian_random_test(self, place): scope = core.Scope() - scope.new_var('Out').get_tensor() + scope.var('Out').get_tensor() op = Operator( "gaussian_random", diff --git a/python/paddle/v2/framework/tests/test_infer_shape.py b/python/paddle/v2/framework/tests/test_infer_shape.py index 9d9fb1c3096ed5329e868235472fd610f0b2e6d3..19bb45acef9a7443a974bf5f11afab5d067321f7 100644 --- a/python/paddle/v2/framework/tests/test_infer_shape.py +++ b/python/paddle/v2/framework/tests/test_infer_shape.py @@ -13,14 +13,14 @@ class TestInferShape(unittest.TestCase): shape = [10, 20] # prepare input/output - x1 = block.new_var("x1") + x1 = block.var("x1") x1.set_type(core.VarDesc.VarType.LOD_TENSOR) x1.set_shape(shape) - x2 = block.new_var("x2") + x2 = block.var("x2") x2.set_type(core.VarDesc.VarType.LOD_TENSOR) x2.set_shape(shape) - out = block.new_var("out") + out = block.var("out") out.set_type(core.VarDesc.VarType.LOD_TENSOR) # prepare the operator @@ -42,14 +42,14 @@ class TestInferShape(unittest.TestCase): y_shape = [20, 30] # prepare input/output - x1 = block.new_var("x") + x1 = block.var("x") x1.set_type(core.VarDesc.VarType.LOD_TENSOR) x1.set_shape(x_shape) - x2 = block.new_var("y") + x2 = block.var("y") x2.set_type(core.VarDesc.VarType.LOD_TENSOR) x2.set_shape(y_shape) - out = block.new_var("out") + out = block.var("out") out.set_type(core.VarDesc.VarType.LOD_TENSOR) # prepare the operator diff --git a/python/paddle/v2/framework/tests/test_mnist.py b/python/paddle/v2/framework/tests/test_mnist.py index 169242b5372ebd28f102e0b450495524c712aabe..c8d54b7c94b7815fa79e5a11f4e159657dc2a6cb 100644 --- a/python/paddle/v2/framework/tests/test_mnist.py +++ b/python/paddle/v2/framework/tests/test_mnist.py @@ -31,7 +31,7 @@ uniq_id = atomic_id().next def data_layer(name, dims): - var = scope.new_var(name) + var = scope.var(name) tensor = var.get_tensor() tensor.set_dims(dims) # 1 is batch size holder. return name @@ -67,7 +67,7 @@ def sgd_optimizer(net, param_name, learning_rate=0.005): # should use operator and add these to the init_network def init_param(net, param_name, dims): - scope.new_var(param_name) + scope.var(param_name) op = Operator( "uniform_random", Out=param_name, dims=dims, min=-0.5, max=0.5, seed=10) op.infer_shape(scope) @@ -104,7 +104,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None): sgd_optimizer(net=optimize_net, param_name=w_name, learning_rate=0.01) pre_activation = name + ".mul.out" - scope.new_var(pre_activation) + scope.var(pre_activation) mul_op = Operator("mul", X=input, Y=w_name, Out=pre_activation) net.append_op(mul_op) @@ -115,7 +115,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None): sgd_optimizer( net=optimize_net, param_name=bias_name, learning_rate=0.001) bias_out = name + ".rowwise_add.out" - scope.new_var(bias_out) + scope.var(bias_out) rowwise_append_op = Operator( "rowwise_add", X=pre_activation, b=bias_name, Out=bias_out) net.append_op(rowwise_append_op) @@ -123,7 +123,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None): activation_op = Operator(act, X=pre_activation, Y=name) net.append_op(activation_op) - scope.new_var(name) + scope.var(name) net.infer_shape(scope) return name @@ -133,7 +133,7 @@ def cross_entropy_layer(net, input, label): cross_entropy_op = Operator( "cross_entropy", X=input, Label=label, Y=cost_name) net.append_op(cross_entropy_op) - scope.new_var(cost_name) + scope.var(cost_name) net.infer_shape(scope) return cost_name @@ -141,10 +141,10 @@ def cross_entropy_layer(net, input, label): def create_backward_net(forward_net): net = core.Operator.backward(forward_net, set()) for input in net.inputs()["all"]: - var = scope.new_var(input) + var = scope.var(input) var.get_tensor() for output in net.outputs()["all"]: - var = scope.new_var(output) + var = scope.var(output) var.get_tensor() return net diff --git a/python/paddle/v2/framework/tests/test_program.py b/python/paddle/v2/framework/tests/test_program.py index c5674382a484a91268e0139ba5588b123531210e..07473d17f76b724b35c49b1a713beeb30d251088 100644 --- a/python/paddle/v2/framework/tests/test_program.py +++ b/python/paddle/v2/framework/tests/test_program.py @@ -51,7 +51,7 @@ class TestProgram(unittest.TestCase): sum_op_desc.set_input("Y", ["b1"]) sum_op_desc.set_output("Out", ["out2"]) - target = block.new_var("out2") + target = block.var("out2") expect_ops = [ "mul", "elementwise_add", "fill_constant", "elementwise_add_grad", diff --git a/python/paddle/v2/framework/tests/test_protobuf_descs.py b/python/paddle/v2/framework/tests/test_protobuf_descs.py index 9b3a21261f02bf90617e60baca6902137520b8bf..c775b1a398dabb096845b4a8730152c682b2f0dd 100644 --- a/python/paddle/v2/framework/tests/test_protobuf_descs.py +++ b/python/paddle/v2/framework/tests/test_protobuf_descs.py @@ -93,7 +93,7 @@ class TestVarDesc(unittest.TestCase): def test_shape(self): program_desc = core.ProgramDesc.__create_program_desc__() block = program_desc.block(0) - var = block.new_var('my_var') + var = block.var('my_var') var.set_type(core.VarDesc.VarType.SELECTED_ROWS) src_shape = [3, 2, 10, 8] var.set_shape(src_shape) @@ -104,7 +104,7 @@ class TestVarDesc(unittest.TestCase): def test_data_type(self): program_desc = core.ProgramDesc.__create_program_desc__() block = program_desc.block(0) - var = block.new_var('my_var') + var = block.var('my_var') var.set_type(core.VarDesc.VarType.LOD_TENSOR) var.set_data_type(core.DataType.INT32) self.assertEqual(core.DataType.INT32, var.data_type()) @@ -117,12 +117,12 @@ class TestBlockDesc(unittest.TestCase): self.assertIsNotNone(prog) block = prog.block(0) self.assertIsNotNone(block) - var1 = block.new_var("var1") - var2 = block.new_var("var2") - var3 = block.new_var("var3") + var1 = block.var("var1") + var2 = block.var("var2") + var3 = block.var("var3") all_vars = block.all_vars() self.assertEqual(set(all_vars), set([var1, var2, var3])) - var2_re = block.var("var2") + var2_re = block.find_var("var2") self.assertEqual(var2_re, var2) def test_add_op(self): diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py index 1f114432c09f29fab6cd56de00dff341785ae0e4..191ce0b0c8d5fb6c4d8037a6c1bfda57c394489e 100644 --- a/python/paddle/v2/framework/tests/test_recurrent_op.py +++ b/python/paddle/v2/framework/tests/test_recurrent_op.py @@ -66,7 +66,7 @@ class PySimpleRNNTest(unittest.TestCase): def create_tensor(scope, name, shape, np_data): - tensor = scope.new_var(name).get_tensor() + tensor = scope.var(name).get_tensor() tensor.set_dims(shape) tensor.set(np_data, core.CPUPlace()) return tensor @@ -125,8 +125,8 @@ class RecurrentOpTest(unittest.TestCase): h_boot_np_data = self.py_rnn.h_boot create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim], h_boot_np_data) - self.scope.new_var("step_scopes") - self.scope.new_var("h@mem") + self.scope.var("step_scopes") + self.scope.var("h@mem") def create_rnn_op(self): # create RNNOp diff --git a/python/paddle/v2/framework/tests/test_scope.py b/python/paddle/v2/framework/tests/test_scope.py index 1ce9454067f91f39f01d9eb4c912857464a3c1cb..14743654792716e4a7ebce5238b142addc86337e 100644 --- a/python/paddle/v2/framework/tests/test_scope.py +++ b/python/paddle/v2/framework/tests/test_scope.py @@ -18,7 +18,7 @@ class TestScope(unittest.TestCase): def test_create_var_get_var(self): paddle_c = paddle.v2.framework.core scope = paddle_c.Scope() - var_a = scope.new_var("var_a") + var_a = scope.var("var_a") self.assertIsNotNone(var_a) self.assertIsNotNone(scope.find_var('var_a')) scope2 = scope.new_scope() @@ -27,7 +27,7 @@ class TestScope(unittest.TestCase): def test_var_get_int(self): paddle_c = paddle.v2.framework.core scope = paddle_c.Scope() - var = scope.new_var("test_int") + var = scope.var("test_int") var.set_int(10) self.assertTrue(var.is_int()) self.assertEqual(10, var.get_int()) diff --git a/python/paddle/v2/framework/tests/test_tensor.py b/python/paddle/v2/framework/tests/test_tensor.py index 8cd93b35d7d1cb7d3b4a19e0e402ef576f1c0982..e0cd2fa8aaf2db2991ad2b9a3053f0d00b509cd4 100644 --- a/python/paddle/v2/framework/tests/test_tensor.py +++ b/python/paddle/v2/framework/tests/test_tensor.py @@ -6,7 +6,7 @@ import numpy class TestTensor(unittest.TestCase): def test_int_tensor(self): scope = core.Scope() - var = scope.new_var("test_tensor") + var = scope.var("test_tensor") place = core.CPUPlace() tensor = var.get_tensor() @@ -25,7 +25,7 @@ class TestTensor(unittest.TestCase): def test_float_tensor(self): scope = core.Scope() - var = scope.new_var("test_tensor") + var = scope.var("test_tensor") place = core.CPUPlace() tensor = var.get_tensor() @@ -46,7 +46,7 @@ class TestTensor(unittest.TestCase): def test_int_lod_tensor(self): place = core.CPUPlace() scope = core.Scope() - var_lod = scope.new_var("test_lod_tensor") + var_lod = scope.var("test_lod_tensor") lod_tensor = var_lod.get_tensor() lod_tensor.set_dims([4, 4, 6]) @@ -68,7 +68,7 @@ class TestTensor(unittest.TestCase): def test_float_lod_tensor(self): place = core.CPUPlace() scope = core.Scope() - var_lod = scope.new_var("test_lod_tensor") + var_lod = scope.var("test_lod_tensor") lod_tensor = var_lod.get_tensor() lod_tensor.set_dims([5, 2, 3, 4]) diff --git a/python/paddle/v2/framework/tests/test_tensor_array.py b/python/paddle/v2/framework/tests/test_tensor_array.py index 11f8a01f9224fcbd6dd6cbc8c37cc81036ad3e07..50b3e09162a24201ee45cbd017dfef8a60f0da78 100644 --- a/python/paddle/v2/framework/tests/test_tensor_array.py +++ b/python/paddle/v2/framework/tests/test_tensor_array.py @@ -13,7 +13,7 @@ class TestTensorArray(unittest.TestCase): # create a LoDTensor self.scope = core.Scope() - var = self.scope.new_var("test_tensor") + var = self.scope.var("test_tensor") self.place = core.CPUPlace() tensor = var.get_tensor() tensor.set_dims([self.batch_size, self.dim]) @@ -51,7 +51,7 @@ class TestTensorArray(unittest.TestCase): self.ta.unstack(self.tensor) # create a tensor with shape of [1, self.dim] - var = self.scope.new_var("hell") + var = self.scope.var("hell") tensor = var.get_tensor() tensor.set_dims([1, self.dim]) tensor.alloc_float(self.place) @@ -71,7 +71,7 @@ class TestTensorArray(unittest.TestCase): self.ta.unstack(self.tensor) # create a tensor with shape of [1, self.dim] - var = self.scope.new_var("hell") + var = self.scope.var("hell") tensor = var.get_tensor() tensor.set_dims([1, self.dim]) tensor.alloc_float(self.place) diff --git a/python/paddle/v2/framework/tests/test_uniform_random_op.py b/python/paddle/v2/framework/tests/test_uniform_random_op.py index 30c59789d395b2b8d4b3019cf769c5bae029d91e..a2d28a65a67b03a6c74348b19ba99cffc55738e9 100644 --- a/python/paddle/v2/framework/tests/test_uniform_random_op.py +++ b/python/paddle/v2/framework/tests/test_uniform_random_op.py @@ -14,7 +14,7 @@ class TestUniformRandomOp(unittest.TestCase): def uniform_random_test(self, place): scope = core.Scope() - scope.new_var('X').get_tensor() + scope.var('X').get_tensor() op = Operator( "uniform_random",