From 4b6b4bc84a9c6e114b10ac8c5aa1d98effecd29d Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Wed, 11 Oct 2017 18:31:34 -0700 Subject: [PATCH] "change GetOrCreate to Var" --- doc/design/block.md | 2 +- doc/design/scope.md | 8 ++++---- doc/design/tensor_array.md | 2 +- paddle/framework/block_desc.cc | 2 +- paddle/framework/block_desc.h | 2 +- paddle/framework/executor.cc | 4 ++-- paddle/framework/executor_test.cc | 2 +- paddle/framework/scope.cc | 10 +++++----- paddle/framework/scope.h | 4 ++-- paddle/operators/cond_op.cc | 2 +- paddle/operators/dynamic_recurrent_op.cc | 6 +++--- paddle/operators/dynamic_recurrent_op_test.cc | 4 ++-- paddle/operators/recurrent_op.cc | 12 +++++------- paddle/operators/rnn/recurrent_op_utils.cc | 2 +- paddle/pybind/protobuf.cc | 4 ++-- paddle/pybind/pybind.cc | 4 ++-- .../paddle/v2/framework/default_scope_funcs.py | 8 ++++---- python/paddle/v2/framework/graph.py | 2 +- python/paddle/v2/framework/tests/op_test.py | 8 ++++---- python/paddle/v2/framework/tests/test_cond_op.py | 8 ++++---- .../framework/tests/test_default_scope_funcs.py | 4 ++-- .../framework/tests/test_gaussian_random_op.py | 2 +- .../v2/framework/tests/test_infer_shape.py | 12 ++++++------ python/paddle/v2/framework/tests/test_mnist.py | 16 ++++++++-------- .../v2/framework/tests/test_protobuf_descs.py | 10 +++++----- .../v2/framework/tests/test_recurrent_op.py | 6 +++--- python/paddle/v2/framework/tests/test_scope.py | 4 ++-- python/paddle/v2/framework/tests/test_tensor.py | 8 ++++---- .../v2/framework/tests/test_tensor_array.py | 6 +++--- .../v2/framework/tests/test_uniform_random_op.py | 2 +- 30 files changed, 82 insertions(+), 84 deletions(-) diff --git a/doc/design/block.md b/doc/design/block.md index 8f53f8d83c..7cbf0d55b1 100644 --- a/doc/design/block.md +++ b/doc/design/block.md @@ -243,7 +243,7 @@ class SymbolTable { // TODO determine whether name is generated by python or C++. // Currently assume that a unique name will be generated by C++ if the // argument name is left default. - VarDesc* GetOrCreateVar(const string& name=""); + VarDesc* Var(const string& name=""); // find a VarDesc by name, if recursive is true, find parent's SymbolTable // recursively. diff --git a/doc/design/scope.md b/doc/design/scope.md index 6a1a32a63e..4da76eebb7 100644 --- a/doc/design/scope.md +++ b/doc/design/scope.md @@ -37,7 +37,7 @@ Scope is an association of a name to variable. All variables belong to `Scope`. ```cpp class Scope { public: - Variable* GetOrCreateVar(const std::string& name); + Variable* Var(const std::string& name); const Variable* FindVar(const std::string& name) const; private: @@ -98,7 +98,7 @@ class Scope { Variable* FindVar(const std::string& name) const; // return if already contains same name variable. - Variable* GetOrCreateVar(const std::string& name); + Variable* Var(const std::string& name); private: std::shared_ptr parent_; @@ -107,7 +107,7 @@ class Scope { ``` ## Only scope can create a variable -To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `GetOrCreateVar` can construct `Variable`. +To ensure `only scope can create a variable`, we should mark `Variable`'s constructor as a private member function, and Scope is a friend class of Variable. And then only `Var` can construct `Variable`. ## When scope destroyed, all variables inside this scope should be destroyed together @@ -121,4 +121,4 @@ Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shar ## Orthogonal interface -`FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `GetOrCreateVar` will return an `Error` when there is a name conflict locally. Combine `FindVar` and `GetOrCreateVar`, we can implement `GetOrCreateVar` easily. +`FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `Var` will return an `Error` when there is a name conflict locally. Combine `FindVar` and `Var`, we can implement `Var` easily. diff --git a/doc/design/tensor_array.md b/doc/design/tensor_array.md index 662d1850b2..37e4f7b90f 100644 --- a/doc/design/tensor_array.md +++ b/doc/design/tensor_array.md @@ -161,7 +161,7 @@ class TensorArray: @name: str the name of the variable to output. ''' - tensor = GetOrCreateVar(name) + tensor = Var(name) tensor_array_stack(self.name, tensor) return tensor diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 1e580a045a..cbdebf1a65 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -18,7 +18,7 @@ limitations under the License. */ namespace paddle { namespace framework { -VarDescBind *BlockDescBind::GetOrCreateVar(const std::string &name) { +VarDescBind *BlockDescBind::Var(const std::string &name) { need_update_ = true; auto it = vars_.find(name); PADDLE_ENFORCE(it == vars_.end(), "Duplicated variable %s", name); diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index c25a9b6e0b..40e6cb0967 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -48,7 +48,7 @@ class BlockDescBind { int32_t Parent() const { return desc_->parent_idx(); } - VarDescBind *GetOrCreateVar(const std::string &name_bytes); + VarDescBind *Var(const std::string &name_bytes); VarDescBind *Var(const std::string &name_bytes) const; diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index f888fb73ca..8e82e28bac 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -66,7 +66,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { // Instantiate all the vars in the global scope for (auto& var : block.vars()) { - scope->GetOrCreateVar(var.name()); + scope->Var(var.name()); } Scope& local_scope = scope->NewScope(); @@ -78,7 +78,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { for (auto& var : block.ops(i).outputs()) { for (auto& argu : var.arguments()) { if (local_scope.FindVar(argu) == nullptr) { - local_scope.GetOrCreateVar(argu); + local_scope.Var(argu); } } } diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index e9b0712706..34382c830f 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -34,7 +34,7 @@ void AddOp(const std::string& type, const VariableNameMap& inputs, // insert output for (auto kv : outputs) { for (auto v : kv.second) { - auto var = block->GetOrCreateVar(v); + auto var = block->Var(v); var->SetDataType(paddle::framework::DataType::FP32); } } diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index df0d169038..8f8a53eec8 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -31,7 +31,7 @@ Scope& Scope::NewScope() const { return *kids_.back(); } -Variable* Scope::GetOrCreateVar(const std::string& name) { +Variable* Scope::Var(const std::string& name) { auto iter = vars_.find(name); if (iter != vars_.end()) { return iter->second; @@ -42,8 +42,8 @@ Variable* Scope::GetOrCreateVar(const std::string& name) { return v; } -Variable* Scope::GetOrCreateVar() { - return GetOrCreateVar(string::Sprintf("%p.%d", this, vars_.size())); +Variable* Scope::Var() { + return Var(string::Sprintf("%p.%d", this, vars_.size())); } Variable* Scope::FindVar(const std::string& name) const { @@ -71,8 +71,8 @@ framework::Scope& GetGlobalScope() { static std::unique_ptr g_scope{nullptr}; std::call_once(feed_variable_flag, [&]() { g_scope.reset(new framework::Scope()); - g_scope->GetOrCreateVar("feed_value"); - g_scope->GetOrCreateVar("fetch_value"); + g_scope->Var("feed_value"); + g_scope->Var("fetch_value"); }); return *(g_scope.get()); } diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index 5cc1700651..a7fce3514b 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -45,10 +45,10 @@ class Scope { Scope& NewScope() const; /// Create a variable with given name if it doesn't exist. - Variable* GetOrCreateVar(const std::string& name); + Variable* Var(const std::string& name); /// Create a variable with a scope-unique name. - Variable* GetOrCreateVar(); + Variable* Var(); /// Find a variable in the scope or any of its ancestors. Returns /// nullptr if cannot find. diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index e82d643ef9..adcd867f50 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -134,7 +134,7 @@ void CondOp::PrepareDataForSubnet( for (int i = 0; i < BRANCH_NUM; ++i) { for (auto& output : (*sub_net_op_[i]).Outputs()) { for (auto& var_name : output.second) { - sub_scopes[i]->GetOrCreateVar(var_name); + sub_scopes[i]->Var(var_name); } } } diff --git a/paddle/operators/dynamic_recurrent_op.cc b/paddle/operators/dynamic_recurrent_op.cc index 8a8a623f28..f0f7265ce9 100644 --- a/paddle/operators/dynamic_recurrent_op.cc +++ b/paddle/operators/dynamic_recurrent_op.cc @@ -29,7 +29,7 @@ namespace detail { inline void CreateVariables(Scope& scope, const std::vector& var_names) { for (const auto& name : var_names) { - scope.GetOrCreateVar(name); + scope.Var(name); } } @@ -112,7 +112,7 @@ void DynamicRecurrentOp::WriteStepInputs() const { auto& step_scope = cache_.GetScope(step); Variable* var = step_scope.FindVar(item.first); if (var == nullptr) { - var = step_scope.GetOrCreateVar(item.first); + var = step_scope.Var(item.first); } var->GetMutable()->ShareDataWith(tensor); } @@ -125,7 +125,7 @@ void DynamicRecurrentOp::WriteStepOutputs() const { for (auto& item : step_outputs_) { auto* var = scope.FindVar(item.first); if (var == nullptr) { - var = scope.GetOrCreateVar(item.first); + var = scope.Var(item.first); } auto* tensor = var->GetMutable(); item.second.WriteShared(step, *tensor); diff --git a/paddle/operators/dynamic_recurrent_op_test.cc b/paddle/operators/dynamic_recurrent_op_test.cc index 3ab5958835..d8ba0bc9f1 100644 --- a/paddle/operators/dynamic_recurrent_op_test.cc +++ b/paddle/operators/dynamic_recurrent_op_test.cc @@ -36,7 +36,7 @@ void OpDescNewVar(const std::string& param_name, // create a LoD tensor in scope with specific dims LoDTensor* CreateVar(Scope& scope, std::string name, framework::DDim dims, const platform::Place& place) { - auto* var = scope.GetOrCreateVar(name); + auto* var = scope.Var(name); auto* tensor = var->GetMutable(); tensor->Resize(dims); tensor->mutable_data(place); @@ -85,7 +85,7 @@ class DynamicRecurrentOpTestHelper : public ::testing::Test { void CreateGlobalVariables() { platform::CPUPlace place; - scope.GetOrCreateVar("step_scopes"); + scope.Var("step_scopes"); CreateVar(scope, "boot_mem", framework::make_ddim({10, 20}), place); // auto* out0 = CreateVar(scope, "out0", framework::make_ddim({10, 20}), place); diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 261c7b8ca0..e3d08378c2 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -70,14 +70,14 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope, // the weight are located in parent scope for (auto& var_name : input.second) { if (!step_scope.FindVar(var_name)) { - step_scope.GetOrCreateVar(var_name)->GetMutable(); + step_scope.Var(var_name)->GetMutable(); } } } // create stepnet's outputs for (const auto& output : (*stepnet_)->Outputs()) { for (auto& var_name : output.second) { - step_scope.GetOrCreateVar(var_name); + step_scope.Var(var_name); } } step_scopes->emplace_back(&step_scope); @@ -87,8 +87,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope, void RecurrentAlgorithm::InitMemories(Scope* step_scope) const { for (auto& attr : arg_->memories) { - auto* pre_mem = - step_scope->GetOrCreateVar(attr.pre_var)->GetMutable(); + auto* pre_mem = step_scope->Var(attr.pre_var)->GetMutable(); PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, "memory [%s]'s boot variable [%s] not exists", attr.var, attr.boot_var); @@ -168,10 +167,9 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients( "memory variable [%s] does not exists", attr.var); PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, "boot variable [%s] does not exists", attr.boot_var); - auto* mem_grad = - step_scope->GetOrCreateVar(attr.var)->GetMutable(); + auto* mem_grad = step_scope->Var(attr.var)->GetMutable(); auto* boot_mem_grad = - step_scope->GetOrCreateVar(attr.boot_var)->GetMutable(); + step_scope->Var(attr.boot_var)->GetMutable(); boot_mem_grad->Resize(mem_grad->dims()); boot_mem_grad->ShareDataWith(*mem_grad); } diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index 1d5f2d73ab..30b8ddeb5b 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -40,7 +40,7 @@ void SegmentInputs(const std::vector& step_scopes, f::DDim step_dims = slice_ddim(dims, 1, dims.size()); for (size_t j = 0; j < seq_len; j++) { Tensor* step_input = - step_scopes[j]->GetOrCreateVar(inlinks[i])->GetMutable(); + step_scopes[j]->Var(inlinks[i])->GetMutable(); // The input of operators of each step is Tensor here. // Maybe need to modify Slice function. *step_input = input->Slice(j, j + 1); diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index dd6102bf38..3759e8d61f 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -134,10 +134,10 @@ void BindBlockDesc(py::module &m) { py::return_value_policy::reference) .def("prepend_op", &BlockDescBind::PrependOp, py::return_value_policy::reference) - .def("get_or_create", + .def("var", [](BlockDescBind &self, py::bytes byte_name) { std::string name = byte_name; - return self.GetOrCreateVar(name); + return self.Var(name); }, py::return_value_policy::reference) .def("var", diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 60e80a5c9b..0fd356780b 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -163,9 +163,9 @@ All parameter, weight, gradient are variables in Paddle. py::return_value_policy::reference); py::class_(m, "Scope", "") - .def("get_or_create", + .def("var", [](Scope &self, const std::string &name) -> Variable * { - return self.GetOrCreateVar(name); + return self.Var(name); }, py::return_value_policy::reference) .def("find_var", &Scope::FindVar, py::return_value_policy::reference) diff --git a/python/paddle/v2/framework/default_scope_funcs.py b/python/paddle/v2/framework/default_scope_funcs.py index 3f3bedf61a..c07f9a6ab9 100644 --- a/python/paddle/v2/framework/default_scope_funcs.py +++ b/python/paddle/v2/framework/default_scope_funcs.py @@ -5,7 +5,7 @@ Default scope function. thread-local stack of Scope. Top of that stack is current scope, the bottom of that stack is all scopes' parent. -Invoking `get_or_create/find_var` can `new/find` variable in current scope. +Invoking `var/find_var` can `new/find` variable in current scope. Invoking `enter_local_scope/leave_local_scope` can create or destroy local scope. @@ -19,7 +19,7 @@ import threading __tl_scope__ = threading.local() __all__ = [ - 'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'get_or_create', + 'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'var', 'find_var', 'scoped_function' ] @@ -54,11 +54,11 @@ def leave_local_scope(): get_cur_scope().drop_kids() -def get_or_create(name): +def var(name): """ create variable in current scope. """ - return get_cur_scope().get_or_create(name) + return get_cur_scope().var(name) def find_var(name): diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py index c4afda4148..31c6f97656 100644 --- a/python/paddle/v2/framework/graph.py +++ b/python/paddle/v2/framework/graph.py @@ -22,7 +22,7 @@ class Variable(object): self.desc = self.block.desc.var(name) is_new_var = False except core.EnforceNotMet: - self.desc = self.block.desc.get_or_create(name) + self.desc = self.block.desc.var(name) is_new_var = True if shape is not None: diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 364ce7baae..215fa0b94e 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -14,7 +14,7 @@ def create_op(scope, op_type, inputs, outputs, attrs): kwargs = dict() def __create_var__(name, var_name): - scope.get_or_create(var_name) + scope.var(var_name) kwargs[name].append(var_name) for in_name, in_dup in Operator.get_op_inputs(op_type): @@ -71,7 +71,7 @@ def set_input(scope, op, inputs, place): def set_output_grad(scope, op, outputs, place): def __set_tensor__(name): out_tensor = scope.find_var(name).get_tensor() - grad_tensor = scope.get_or_create(grad_var_name(name)).get_tensor() + grad_tensor = scope.var(grad_var_name(name)).get_tensor() out_dtype = out_tensor.dtype() if out_dtype == core.DataType.FP64: data = np.ones(out_tensor.shape(), dtype=np.float64) @@ -169,10 +169,10 @@ def get_numeric_gradient(scope, def get_backward_op(scope, op, no_grad_set): backward_op = core.Operator.backward(op, no_grad_set) for input in backward_op.input_vars(): - var = scope.get_or_create(input) + var = scope.var(input) var.get_tensor() for output in backward_op.output_vars(): - var = scope.get_or_create(output) + var = scope.var(output) var.get_tensor() return backward_op diff --git a/python/paddle/v2/framework/tests/test_cond_op.py b/python/paddle/v2/framework/tests/test_cond_op.py index 5029138cb9..2c7bcc4be4 100644 --- a/python/paddle/v2/framework/tests/test_cond_op.py +++ b/python/paddle/v2/framework/tests/test_cond_op.py @@ -39,7 +39,7 @@ class PySimpleCondTest(unittest.TestCase): def create_tensor(scope, name, shape, np_data): - tensor = scope.get_or_create(name).get_tensor() + tensor = scope.var(name).get_tensor() tensor.set_dims(shape) tensor.set(np_data, core.CPUPlace()) return tensor @@ -74,9 +74,9 @@ class TestCondOp(unittest.TestCase): create_tensor(self.scope, "X", [10, 1], x_np_data) cond_np_data = self.py_cond.cond.astype("int32") create_tensor(self.scope, "cond", [10, 1], cond_np_data) - self.scope.get_or_create("SubScopes") - self.scope.get_or_create("IndexTensors") - self.scope.get_or_create("Out") + self.scope.var("SubScopes") + self.scope.var("IndexTensors") + self.scope.var("Out") def create_cond_op(self): self.condop = CondOp( diff --git a/python/paddle/v2/framework/tests/test_default_scope_funcs.py b/python/paddle/v2/framework/tests/test_default_scope_funcs.py index 2a3f766a81..09a9850d05 100644 --- a/python/paddle/v2/framework/tests/test_default_scope_funcs.py +++ b/python/paddle/v2/framework/tests/test_default_scope_funcs.py @@ -10,7 +10,7 @@ class TestDefaultScopeFuncs(unittest.TestCase): self.assertIsNone(find_var("test")) def test_create_var_get_var(self): - var_a = get_or_create("var_a") + var_a = var("var_a") self.assertIsNotNone(var_a) self.assertIsNotNone(get_cur_scope().find_var('var_a')) enter_local_scope() @@ -19,7 +19,7 @@ class TestDefaultScopeFuncs(unittest.TestCase): def test_var_get_int(self): def __new_scope__(): - i = get_or_create("var_i") + i = var("var_i") self.assertFalse(i.is_int()) i.set_int(10) self.assertTrue(i.is_int()) diff --git a/python/paddle/v2/framework/tests/test_gaussian_random_op.py b/python/paddle/v2/framework/tests/test_gaussian_random_op.py index 6393210383..8b7779667d 100644 --- a/python/paddle/v2/framework/tests/test_gaussian_random_op.py +++ b/python/paddle/v2/framework/tests/test_gaussian_random_op.py @@ -14,7 +14,7 @@ class TestGaussianRandomOp(unittest.TestCase): def gaussian_random_test(self, place): scope = core.Scope() - scope.get_or_create('Out').get_tensor() + scope.var('Out').get_tensor() op = Operator( "gaussian_random", diff --git a/python/paddle/v2/framework/tests/test_infer_shape.py b/python/paddle/v2/framework/tests/test_infer_shape.py index b4451beefc..d23f13be49 100644 --- a/python/paddle/v2/framework/tests/test_infer_shape.py +++ b/python/paddle/v2/framework/tests/test_infer_shape.py @@ -13,12 +13,12 @@ class TestInferShape(unittest.TestCase): shape = [10, 20] # prepare input/output - x1 = block.get_or_create("x1") + x1 = block.var("x1") x1.set_shape(shape) - x2 = block.get_or_create("x2") + x2 = block.var("x2") x2.set_shape(shape) - out = block.get_or_create("out") + out = block.var("out") # prepare the operator sum_op_desc = block.append_op() @@ -39,12 +39,12 @@ class TestInferShape(unittest.TestCase): y_shape = [20, 30] # prepare input/output - x1 = block.get_or_create("x") + x1 = block.var("x") x1.set_shape(x_shape) - x2 = block.get_or_create("y") + x2 = block.var("y") x2.set_shape(y_shape) - out = block.get_or_create("out") + out = block.var("out") # prepare the operator mul_op_desc = block.append_op() diff --git a/python/paddle/v2/framework/tests/test_mnist.py b/python/paddle/v2/framework/tests/test_mnist.py index e0d2b67953..c8d54b7c94 100644 --- a/python/paddle/v2/framework/tests/test_mnist.py +++ b/python/paddle/v2/framework/tests/test_mnist.py @@ -31,7 +31,7 @@ uniq_id = atomic_id().next def data_layer(name, dims): - var = scope.get_or_create(name) + var = scope.var(name) tensor = var.get_tensor() tensor.set_dims(dims) # 1 is batch size holder. return name @@ -67,7 +67,7 @@ def sgd_optimizer(net, param_name, learning_rate=0.005): # should use operator and add these to the init_network def init_param(net, param_name, dims): - scope.get_or_create(param_name) + scope.var(param_name) op = Operator( "uniform_random", Out=param_name, dims=dims, min=-0.5, max=0.5, seed=10) op.infer_shape(scope) @@ -104,7 +104,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None): sgd_optimizer(net=optimize_net, param_name=w_name, learning_rate=0.01) pre_activation = name + ".mul.out" - scope.get_or_create(pre_activation) + scope.var(pre_activation) mul_op = Operator("mul", X=input, Y=w_name, Out=pre_activation) net.append_op(mul_op) @@ -115,7 +115,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None): sgd_optimizer( net=optimize_net, param_name=bias_name, learning_rate=0.001) bias_out = name + ".rowwise_add.out" - scope.get_or_create(bias_out) + scope.var(bias_out) rowwise_append_op = Operator( "rowwise_add", X=pre_activation, b=bias_name, Out=bias_out) net.append_op(rowwise_append_op) @@ -123,7 +123,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None): activation_op = Operator(act, X=pre_activation, Y=name) net.append_op(activation_op) - scope.get_or_create(name) + scope.var(name) net.infer_shape(scope) return name @@ -133,7 +133,7 @@ def cross_entropy_layer(net, input, label): cross_entropy_op = Operator( "cross_entropy", X=input, Label=label, Y=cost_name) net.append_op(cross_entropy_op) - scope.get_or_create(cost_name) + scope.var(cost_name) net.infer_shape(scope) return cost_name @@ -141,10 +141,10 @@ def cross_entropy_layer(net, input, label): def create_backward_net(forward_net): net = core.Operator.backward(forward_net, set()) for input in net.inputs()["all"]: - var = scope.get_or_create(input) + var = scope.var(input) var.get_tensor() for output in net.outputs()["all"]: - var = scope.get_or_create(output) + var = scope.var(output) var.get_tensor() return net diff --git a/python/paddle/v2/framework/tests/test_protobuf_descs.py b/python/paddle/v2/framework/tests/test_protobuf_descs.py index cbff8e9f98..5e7652faf9 100644 --- a/python/paddle/v2/framework/tests/test_protobuf_descs.py +++ b/python/paddle/v2/framework/tests/test_protobuf_descs.py @@ -93,7 +93,7 @@ class TestVarDesc(unittest.TestCase): def test_shape(self): program_desc = core.ProgramDesc.__create_program_desc__() block = program_desc.block(0) - var = block.get_or_create('my_var') + var = block.var('my_var') src_shape = [3, 2, 10, 8] var.set_shape(src_shape) res_shape = var.shape() @@ -102,7 +102,7 @@ class TestVarDesc(unittest.TestCase): def test_data_type(self): program_desc = core.ProgramDesc.__create_program_desc__() block = program_desc.block(0) - var = block.get_or_create('my_var') + var = block.var('my_var') var.set_data_type(core.DataType.INT32) self.assertEqual(core.DataType.INT32, var.data_type()) @@ -113,9 +113,9 @@ class TestBlockDesc(unittest.TestCase): self.assertIsNotNone(prog) block = prog.block(0) self.assertIsNotNone(block) - var1 = block.get_or_create("var1") - var2 = block.get_or_create("var2") - var3 = block.get_or_create("var3") + var1 = block.var("var1") + var2 = block.var("var2") + var3 = block.var("var3") all_vars = block.all_vars() self.assertEqual(set(all_vars), set([var1, var2, var3])) var2_re = block.var("var2") diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py index 267687f4b5..191ce0b0c8 100644 --- a/python/paddle/v2/framework/tests/test_recurrent_op.py +++ b/python/paddle/v2/framework/tests/test_recurrent_op.py @@ -66,7 +66,7 @@ class PySimpleRNNTest(unittest.TestCase): def create_tensor(scope, name, shape, np_data): - tensor = scope.get_or_create(name).get_tensor() + tensor = scope.var(name).get_tensor() tensor.set_dims(shape) tensor.set(np_data, core.CPUPlace()) return tensor @@ -125,8 +125,8 @@ class RecurrentOpTest(unittest.TestCase): h_boot_np_data = self.py_rnn.h_boot create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim], h_boot_np_data) - self.scope.get_or_create("step_scopes") - self.scope.get_or_create("h@mem") + self.scope.var("step_scopes") + self.scope.var("h@mem") def create_rnn_op(self): # create RNNOp diff --git a/python/paddle/v2/framework/tests/test_scope.py b/python/paddle/v2/framework/tests/test_scope.py index d32c4cf052..1474365479 100644 --- a/python/paddle/v2/framework/tests/test_scope.py +++ b/python/paddle/v2/framework/tests/test_scope.py @@ -18,7 +18,7 @@ class TestScope(unittest.TestCase): def test_create_var_get_var(self): paddle_c = paddle.v2.framework.core scope = paddle_c.Scope() - var_a = scope.get_or_create("var_a") + var_a = scope.var("var_a") self.assertIsNotNone(var_a) self.assertIsNotNone(scope.find_var('var_a')) scope2 = scope.new_scope() @@ -27,7 +27,7 @@ class TestScope(unittest.TestCase): def test_var_get_int(self): paddle_c = paddle.v2.framework.core scope = paddle_c.Scope() - var = scope.get_or_create("test_int") + var = scope.var("test_int") var.set_int(10) self.assertTrue(var.is_int()) self.assertEqual(10, var.get_int()) diff --git a/python/paddle/v2/framework/tests/test_tensor.py b/python/paddle/v2/framework/tests/test_tensor.py index c8eea18609..e0cd2fa8aa 100644 --- a/python/paddle/v2/framework/tests/test_tensor.py +++ b/python/paddle/v2/framework/tests/test_tensor.py @@ -6,7 +6,7 @@ import numpy class TestTensor(unittest.TestCase): def test_int_tensor(self): scope = core.Scope() - var = scope.get_or_create("test_tensor") + var = scope.var("test_tensor") place = core.CPUPlace() tensor = var.get_tensor() @@ -25,7 +25,7 @@ class TestTensor(unittest.TestCase): def test_float_tensor(self): scope = core.Scope() - var = scope.get_or_create("test_tensor") + var = scope.var("test_tensor") place = core.CPUPlace() tensor = var.get_tensor() @@ -46,7 +46,7 @@ class TestTensor(unittest.TestCase): def test_int_lod_tensor(self): place = core.CPUPlace() scope = core.Scope() - var_lod = scope.get_or_create("test_lod_tensor") + var_lod = scope.var("test_lod_tensor") lod_tensor = var_lod.get_tensor() lod_tensor.set_dims([4, 4, 6]) @@ -68,7 +68,7 @@ class TestTensor(unittest.TestCase): def test_float_lod_tensor(self): place = core.CPUPlace() scope = core.Scope() - var_lod = scope.get_or_create("test_lod_tensor") + var_lod = scope.var("test_lod_tensor") lod_tensor = var_lod.get_tensor() lod_tensor.set_dims([5, 2, 3, 4]) diff --git a/python/paddle/v2/framework/tests/test_tensor_array.py b/python/paddle/v2/framework/tests/test_tensor_array.py index c9b0756d26..50b3e09162 100644 --- a/python/paddle/v2/framework/tests/test_tensor_array.py +++ b/python/paddle/v2/framework/tests/test_tensor_array.py @@ -13,7 +13,7 @@ class TestTensorArray(unittest.TestCase): # create a LoDTensor self.scope = core.Scope() - var = self.scope.get_or_create("test_tensor") + var = self.scope.var("test_tensor") self.place = core.CPUPlace() tensor = var.get_tensor() tensor.set_dims([self.batch_size, self.dim]) @@ -51,7 +51,7 @@ class TestTensorArray(unittest.TestCase): self.ta.unstack(self.tensor) # create a tensor with shape of [1, self.dim] - var = self.scope.get_or_create("hell") + var = self.scope.var("hell") tensor = var.get_tensor() tensor.set_dims([1, self.dim]) tensor.alloc_float(self.place) @@ -71,7 +71,7 @@ class TestTensorArray(unittest.TestCase): self.ta.unstack(self.tensor) # create a tensor with shape of [1, self.dim] - var = self.scope.get_or_create("hell") + var = self.scope.var("hell") tensor = var.get_tensor() tensor.set_dims([1, self.dim]) tensor.alloc_float(self.place) diff --git a/python/paddle/v2/framework/tests/test_uniform_random_op.py b/python/paddle/v2/framework/tests/test_uniform_random_op.py index b9cbcee7e0..a2d28a65a6 100644 --- a/python/paddle/v2/framework/tests/test_uniform_random_op.py +++ b/python/paddle/v2/framework/tests/test_uniform_random_op.py @@ -14,7 +14,7 @@ class TestUniformRandomOp(unittest.TestCase): def uniform_random_test(self, place): scope = core.Scope() - scope.get_or_create('X').get_tensor() + scope.var('X').get_tensor() op = Operator( "uniform_random", -- GitLab