提交 5d6a3eee 编写于 作者: D dongzhihong

new_var to get_or_create

上级 0af45b5f
......@@ -134,7 +134,7 @@ void BindBlockDesc(py::module &m) {
py::return_value_policy::reference)
.def("prepend_op", &BlockDescBind::PrependOp,
py::return_value_policy::reference)
.def("new_var",
.def("get_or_create",
[](BlockDescBind &self, py::bytes byte_name) {
std::string name = byte_name;
return self.GetOrCreateVar(name);
......
......@@ -163,7 +163,7 @@ All parameter, weight, gradient are variables in Paddle.
py::return_value_policy::reference);
py::class_<Scope>(m, "Scope", "")
.def("new_var",
.def("get_or_create",
[](Scope &self, const std::string &name) -> Variable * {
return self.GetOrCreateVar(name);
},
......
......@@ -5,7 +5,7 @@ Default scope function.
thread-local stack of Scope. Top of that stack is current scope, the bottom
of that stack is all scopes' parent.
Invoking `new_var/find_var` can `new/find` variable in current scope.
Invoking `get_or_create/find_var` can `new/find` variable in current scope.
Invoking `enter_local_scope/leave_local_scope` can create or destroy local
scope.
......@@ -19,7 +19,7 @@ import threading
__tl_scope__ = threading.local()
__all__ = [
'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'new_var',
'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'get_or_create',
'find_var', 'scoped_function'
]
......@@ -54,11 +54,11 @@ def leave_local_scope():
get_cur_scope().drop_kids()
def new_var(name):
def get_or_create(name):
"""
create variable in current scope.
"""
return get_cur_scope().new_var(name)
return get_cur_scope().get_or_create(name)
def find_var(name):
......
......@@ -22,7 +22,7 @@ class Variable(object):
self.desc = self.block.desc.var(name)
is_new_var = False
except core.EnforceNotMet:
self.desc = self.block.desc.new_var(name)
self.desc = self.block.desc.get_or_create(name)
is_new_var = True
if shape is not None:
......
......@@ -14,7 +14,7 @@ def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict()
def __create_var__(name, var_name):
scope.new_var(var_name)
scope.get_or_create(var_name)
kwargs[name].append(var_name)
for in_name, in_dup in Operator.get_op_inputs(op_type):
......@@ -71,7 +71,7 @@ def set_input(scope, op, inputs, place):
def set_output_grad(scope, op, outputs, place):
def __set_tensor__(name):
out_tensor = scope.find_var(name).get_tensor()
grad_tensor = scope.new_var(grad_var_name(name)).get_tensor()
grad_tensor = scope.get_or_create(grad_var_name(name)).get_tensor()
out_dtype = out_tensor.dtype()
if out_dtype == core.DataType.FP64:
data = np.ones(out_tensor.shape(), dtype=np.float64)
......@@ -169,10 +169,10 @@ def get_numeric_gradient(scope,
def get_backward_op(scope, op, no_grad_set):
backward_op = core.Operator.backward(op, no_grad_set)
for input in backward_op.input_vars():
var = scope.new_var(input)
var = scope.get_or_create(input)
var.get_tensor()
for output in backward_op.output_vars():
var = scope.new_var(output)
var = scope.get_or_create(output)
var.get_tensor()
return backward_op
......
......@@ -39,7 +39,7 @@ class PySimpleCondTest(unittest.TestCase):
def create_tensor(scope, name, shape, np_data):
tensor = scope.new_var(name).get_tensor()
tensor = scope.get_or_create(name).get_tensor()
tensor.set_dims(shape)
tensor.set(np_data, core.CPUPlace())
return tensor
......@@ -74,9 +74,9 @@ class TestCondOp(unittest.TestCase):
create_tensor(self.scope, "X", [10, 1], x_np_data)
cond_np_data = self.py_cond.cond.astype("int32")
create_tensor(self.scope, "cond", [10, 1], cond_np_data)
self.scope.new_var("SubScopes")
self.scope.new_var("IndexTensors")
self.scope.new_var("Out")
self.scope.get_or_create("SubScopes")
self.scope.get_or_create("IndexTensors")
self.scope.get_or_create("Out")
def create_cond_op(self):
self.condop = CondOp(
......
......@@ -10,7 +10,7 @@ class TestDefaultScopeFuncs(unittest.TestCase):
self.assertIsNone(find_var("test"))
def test_create_var_get_var(self):
var_a = new_var("var_a")
var_a = get_or_create("var_a")
self.assertIsNotNone(var_a)
self.assertIsNotNone(get_cur_scope().find_var('var_a'))
enter_local_scope()
......@@ -19,7 +19,7 @@ class TestDefaultScopeFuncs(unittest.TestCase):
def test_var_get_int(self):
def __new_scope__():
i = new_var("var_i")
i = get_or_create("var_i")
self.assertFalse(i.is_int())
i.set_int(10)
self.assertTrue(i.is_int())
......
......@@ -14,7 +14,7 @@ class TestGaussianRandomOp(unittest.TestCase):
def gaussian_random_test(self, place):
scope = core.Scope()
scope.new_var('Out').get_tensor()
scope.get_or_create('Out').get_tensor()
op = Operator(
"gaussian_random",
......
......@@ -13,12 +13,12 @@ class TestInferShape(unittest.TestCase):
shape = [10, 20]
# prepare input/output
x1 = block.new_var("x1")
x1 = block.get_or_create("x1")
x1.set_shape(shape)
x2 = block.new_var("x2")
x2 = block.get_or_create("x2")
x2.set_shape(shape)
out = block.new_var("out")
out = block.get_or_create("out")
# prepare the operator
sum_op_desc = block.append_op()
......@@ -39,12 +39,12 @@ class TestInferShape(unittest.TestCase):
y_shape = [20, 30]
# prepare input/output
x1 = block.new_var("x")
x1 = block.get_or_create("x")
x1.set_shape(x_shape)
x2 = block.new_var("y")
x2 = block.get_or_create("y")
x2.set_shape(y_shape)
out = block.new_var("out")
out = block.get_or_create("out")
# prepare the operator
mul_op_desc = block.append_op()
......
......@@ -31,7 +31,7 @@ uniq_id = atomic_id().next
def data_layer(name, dims):
var = scope.new_var(name)
var = scope.get_or_create(name)
tensor = var.get_tensor()
tensor.set_dims(dims) # 1 is batch size holder.
return name
......@@ -67,7 +67,7 @@ def sgd_optimizer(net, param_name, learning_rate=0.005):
# should use operator and add these to the init_network
def init_param(net, param_name, dims):
scope.new_var(param_name)
scope.get_or_create(param_name)
op = Operator(
"uniform_random", Out=param_name, dims=dims, min=-0.5, max=0.5, seed=10)
op.infer_shape(scope)
......@@ -104,7 +104,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
sgd_optimizer(net=optimize_net, param_name=w_name, learning_rate=0.01)
pre_activation = name + ".mul.out"
scope.new_var(pre_activation)
scope.get_or_create(pre_activation)
mul_op = Operator("mul", X=input, Y=w_name, Out=pre_activation)
net.append_op(mul_op)
......@@ -115,7 +115,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
sgd_optimizer(
net=optimize_net, param_name=bias_name, learning_rate=0.001)
bias_out = name + ".rowwise_add.out"
scope.new_var(bias_out)
scope.get_or_create(bias_out)
rowwise_append_op = Operator(
"rowwise_add", X=pre_activation, b=bias_name, Out=bias_out)
net.append_op(rowwise_append_op)
......@@ -123,7 +123,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
activation_op = Operator(act, X=pre_activation, Y=name)
net.append_op(activation_op)
scope.new_var(name)
scope.get_or_create(name)
net.infer_shape(scope)
return name
......@@ -133,7 +133,7 @@ def cross_entropy_layer(net, input, label):
cross_entropy_op = Operator(
"cross_entropy", X=input, Label=label, Y=cost_name)
net.append_op(cross_entropy_op)
scope.new_var(cost_name)
scope.get_or_create(cost_name)
net.infer_shape(scope)
return cost_name
......@@ -141,10 +141,10 @@ def cross_entropy_layer(net, input, label):
def create_backward_net(forward_net):
net = core.Operator.backward(forward_net, set())
for input in net.inputs()["all"]:
var = scope.new_var(input)
var = scope.get_or_create(input)
var.get_tensor()
for output in net.outputs()["all"]:
var = scope.new_var(output)
var = scope.get_or_create(output)
var.get_tensor()
return net
......
......@@ -93,7 +93,7 @@ class TestVarDesc(unittest.TestCase):
def test_shape(self):
program_desc = core.ProgramDesc.__create_program_desc__()
block = program_desc.block(0)
var = block.new_var('my_var')
var = block.get_or_create('my_var')
src_shape = [3, 2, 10, 8]
var.set_shape(src_shape)
res_shape = var.shape()
......@@ -102,7 +102,7 @@ class TestVarDesc(unittest.TestCase):
def test_data_type(self):
program_desc = core.ProgramDesc.__create_program_desc__()
block = program_desc.block(0)
var = block.new_var('my_var')
var = block.get_or_create('my_var')
var.set_data_type(core.DataType.INT32)
self.assertEqual(core.DataType.INT32, var.data_type())
......@@ -113,9 +113,9 @@ class TestBlockDesc(unittest.TestCase):
self.assertIsNotNone(prog)
block = prog.block(0)
self.assertIsNotNone(block)
var1 = block.new_var("var1")
var2 = block.new_var("var2")
var3 = block.new_var("var3")
var1 = block.get_or_create("var1")
var2 = block.get_or_create("var2")
var3 = block.get_or_create("var3")
all_vars = block.all_vars()
self.assertEqual(set(all_vars), set([var1, var2, var3]))
var2_re = block.var("var2")
......
......@@ -66,7 +66,7 @@ class PySimpleRNNTest(unittest.TestCase):
def create_tensor(scope, name, shape, np_data):
tensor = scope.new_var(name).get_tensor()
tensor = scope.get_or_create(name).get_tensor()
tensor.set_dims(shape)
tensor.set(np_data, core.CPUPlace())
return tensor
......@@ -125,8 +125,8 @@ class RecurrentOpTest(unittest.TestCase):
h_boot_np_data = self.py_rnn.h_boot
create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim],
h_boot_np_data)
self.scope.new_var("step_scopes")
self.scope.new_var("h@mem")
self.scope.get_or_create("step_scopes")
self.scope.get_or_create("h@mem")
def create_rnn_op(self):
# create RNNOp
......
......@@ -18,7 +18,7 @@ class TestScope(unittest.TestCase):
def test_create_var_get_var(self):
paddle_c = paddle.v2.framework.core
scope = paddle_c.Scope()
var_a = scope.new_var("var_a")
var_a = scope.get_or_create("var_a")
self.assertIsNotNone(var_a)
self.assertIsNotNone(scope.find_var('var_a'))
scope2 = scope.new_scope()
......@@ -27,7 +27,7 @@ class TestScope(unittest.TestCase):
def test_var_get_int(self):
paddle_c = paddle.v2.framework.core
scope = paddle_c.Scope()
var = scope.new_var("test_int")
var = scope.get_or_create("test_int")
var.set_int(10)
self.assertTrue(var.is_int())
self.assertEqual(10, var.get_int())
......
......@@ -6,7 +6,7 @@ import numpy
class TestTensor(unittest.TestCase):
def test_int_tensor(self):
scope = core.Scope()
var = scope.new_var("test_tensor")
var = scope.get_or_create("test_tensor")
place = core.CPUPlace()
tensor = var.get_tensor()
......@@ -25,7 +25,7 @@ class TestTensor(unittest.TestCase):
def test_float_tensor(self):
scope = core.Scope()
var = scope.new_var("test_tensor")
var = scope.get_or_create("test_tensor")
place = core.CPUPlace()
tensor = var.get_tensor()
......@@ -46,7 +46,7 @@ class TestTensor(unittest.TestCase):
def test_int_lod_tensor(self):
place = core.CPUPlace()
scope = core.Scope()
var_lod = scope.new_var("test_lod_tensor")
var_lod = scope.get_or_create("test_lod_tensor")
lod_tensor = var_lod.get_tensor()
lod_tensor.set_dims([4, 4, 6])
......@@ -68,7 +68,7 @@ class TestTensor(unittest.TestCase):
def test_float_lod_tensor(self):
place = core.CPUPlace()
scope = core.Scope()
var_lod = scope.new_var("test_lod_tensor")
var_lod = scope.get_or_create("test_lod_tensor")
lod_tensor = var_lod.get_tensor()
lod_tensor.set_dims([5, 2, 3, 4])
......
......@@ -13,7 +13,7 @@ class TestTensorArray(unittest.TestCase):
# create a LoDTensor
self.scope = core.Scope()
var = self.scope.new_var("test_tensor")
var = self.scope.get_or_create("test_tensor")
self.place = core.CPUPlace()
tensor = var.get_tensor()
tensor.set_dims([self.batch_size, self.dim])
......@@ -51,7 +51,7 @@ class TestTensorArray(unittest.TestCase):
self.ta.unstack(self.tensor)
# create a tensor with shape of [1, self.dim]
var = self.scope.new_var("hell")
var = self.scope.get_or_create("hell")
tensor = var.get_tensor()
tensor.set_dims([1, self.dim])
tensor.alloc_float(self.place)
......@@ -71,7 +71,7 @@ class TestTensorArray(unittest.TestCase):
self.ta.unstack(self.tensor)
# create a tensor with shape of [1, self.dim]
var = self.scope.new_var("hell")
var = self.scope.get_or_create("hell")
tensor = var.get_tensor()
tensor.set_dims([1, self.dim])
tensor.alloc_float(self.place)
......
......@@ -14,7 +14,7 @@ class TestUniformRandomOp(unittest.TestCase):
def uniform_random_test(self, place):
scope = core.Scope()
scope.new_var('X').get_tensor()
scope.get_or_create('X').get_tensor()
op = Operator(
"uniform_random",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册