提交 ee22a436 编写于 作者: F fengjiayi 提交者: GitHub

Merge pull request #4684 from reyoung/feature/parameter

Feature/parameter
...@@ -22,7 +22,7 @@ Whenever we create a block, we need to set its parent block to the current block ...@@ -22,7 +22,7 @@ Whenever we create a block, we need to set its parent block to the current block
```python ```python
class Program(objects): class Program(objects):
def __init__(self): def __init__(self):
self.proto = core.NewProgram() # a C++ ProgramDesc pointer. self.desc = core.NewProgram() # a C++ ProgramDesc pointer.
self.blocks = vector<Block>() self.blocks = vector<Block>()
self.blocks.append(Block(self, -1)) # the global block self.blocks.append(Block(self, -1)) # the global block
self.current_block = 0 # initialized to the global block self.current_block = 0 # initialized to the global block
...@@ -57,7 +57,7 @@ A [Block](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.m ...@@ -57,7 +57,7 @@ A [Block](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.m
```python ```python
class Block(objects): class Block(objects):
def __init__(self, program, parent_idx): def __init__(self, program, parent_idx):
self.proto = core.NewBlock(program.proto) self.desc = core.NewBlock(program.desc)
self.program = program self.program = program
self.vars = map<string, Variable>() self.vars = map<string, Variable>()
self.ops = vector<Operator>() self.ops = vector<Operator>()
...@@ -98,11 +98,11 @@ class Operator(object): ...@@ -98,11 +98,11 @@ class Operator(object):
outputs,# dict<stirng, Variable> outputs,# dict<stirng, Variable>
attrs # dict<string, Any> attrs # dict<string, Any>
): ):
self.proto = core.NewOpDesc(block.proto, type, inputs, outputs, attrs) self.desc = core.NewOpDesc(block.desc, type, inputs, outputs, attrs)
core.infer_shape(self.proto, inputs, outputs) core.infer_shape(self.desc, inputs, outputs)
def type(self): def type(self):
return self.proto.type() return self.desc.type()
``` ```
`Operator` creates the `OpDesc` message in C++ space, so that it can call the `InferShape` function, which is in C++. `Operator` creates the `OpDesc` message in C++ space, so that it can call the `InferShape` function, which is in C++.
...@@ -124,7 +124,7 @@ class Variable(object): ...@@ -124,7 +124,7 @@ class Variable(object):
name = unique_name_generator() name = unique_name_generator()
self.name = name self.name = name
self.block = block self.block = block
self.proto = core.NewVarDesc(block.proto, name, shape, lod_level) self.desc = core.NewVarDesc(block.desc, name, shape, lod_level)
self.writer = None self.writer = None
``` ```
......
...@@ -32,5 +32,13 @@ std::vector<int64_t> VarDescBind::Shape() const { ...@@ -32,5 +32,13 @@ std::vector<int64_t> VarDescBind::Shape() const {
DataType VarDescBind::GetDataType() const { DataType VarDescBind::GetDataType() const {
return desc_.lod_tensor().data_type(); return desc_.lod_tensor().data_type();
} }
void VarDescBind::SetLoDLevel(int32_t lod_level) {
desc_.mutable_lod_tensor()->set_lod_level(lod_level);
}
int32_t VarDescBind::GetLodLevel() const {
return desc_.lod_tensor().lod_level();
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -66,6 +66,10 @@ class VarDescBind { ...@@ -66,6 +66,10 @@ class VarDescBind {
DataType GetDataType() const; DataType GetDataType() const;
void SetLoDLevel(int32_t lod_level);
int32_t GetLodLevel() const;
private: private:
VarDesc desc_; VarDesc desc_;
}; };
......
...@@ -166,7 +166,9 @@ void BindVarDsec(py::module &m) { ...@@ -166,7 +166,9 @@ void BindVarDsec(py::module &m) {
.def("set_shape", &VarDescBind::SetShape) .def("set_shape", &VarDescBind::SetShape)
.def("set_data_type", &VarDescBind::SetDataType) .def("set_data_type", &VarDescBind::SetDataType)
.def("shape", &VarDescBind::Shape, py::return_value_policy::reference) .def("shape", &VarDescBind::Shape, py::return_value_policy::reference)
.def("data_type", &VarDescBind::GetDataType); .def("data_type", &VarDescBind::GetDataType)
.def("lod_level", &VarDescBind::GetLodLevel)
.def("set_lod_level", &VarDescBind::SetLoDLevel);
} }
void BindOpDesc(py::module &m) { void BindOpDesc(py::module &m) {
......
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import collections import collections
import numpy as np
import copy
__all__ = ['Block', 'Variable', 'Program', 'Operator'] __all__ = ['Block', 'Variable', 'Program', 'Operator']
class Variable(object): class Variable(object):
def __init__(self, block, name=None, shape=None, dtype=None, def __init__(self,
lod_level=None): block,
name=None,
shape=None,
dtype=None,
lod_level=None,
**kwargs):
self.block = block self.block = block
if name is None: if name is None:
name = Variable._unique_var_name_() name = Variable._unique_var_name_()
self.proto = self.block.proto.new_var(name) try:
self.desc = self.block.desc.var(name)
is_new_var = False
except core.EnforceNotMet:
self.desc = self.block.desc.new_var(name)
is_new_var = True
if shape is not None: if shape is not None:
self.proto.set_shape(shape) if is_new_var:
self.desc.set_shape(shape)
else:
old_shape = self.shape
shape = tuple(shape)
if shape != old_shape:
raise ValueError(
"Variable {0} has been created before. the previous "
"shape is {1}; the new shape is {2}. They are not "
"matched.".format(self.name, old_shape, shape))
if dtype is not None: if dtype is not None:
# TODO(yuyang18): Convert dtype from numpy.dtype if not isinstance(dtype, core.DataType):
self.proto.set_data_type(dtype) dtype = Variable._convert_np_dtype_to_dtype_(dtype)
if is_new_var:
self.desc.set_data_type(dtype)
else:
old_dtype = self.data_type()
if dtype != old_shape:
raise ValueError("Variable {0} has been created before. "
"The previous data type is {1}; the new "
"data type is {2}. They are not "
"matched.".format(self.name, old_dtype,
dtype))
if lod_level is not None: if lod_level is not None:
# TODO(yuyang18): set_lod_level is not defined. if is_new_var:
self.proto.set_lod_level(lod_level) self.desc.set_lod_level(lod_level)
else:
if lod_level != self.lod_level:
raise ValueError("Variable {0} has been created before. "
"The previous lod_level is {1}; the new "
"lod_level is {2}. They are not "
"matched".format(self.name, self.lod_level,
lod_level))
self.block.vars[name] = self self.block.vars[name] = self
self.op = None self.op = None
# TODO(yuyang18): Get methods @property
def name(self):
return self.desc.name()
@property
def shape(self):
# convert to tuple, make it as same as numpy API.
return tuple(self.desc.shape())
@property
def data_type(self):
return self.desc.data_type()
@property
def lod_level(self):
return self.desc.lod_level()
@staticmethod @staticmethod
def _unique_var_name_(): def _unique_var_name_():
uid = core.unique_integer() # unique during whole process. uid = core.unique_integer() # unique during whole process.
return "_generated_var_%d" % uid return "_generated_var_%d" % uid
@staticmethod
def _convert_np_dtype_to_dtype_(np_dtype):
dtype = np.dtype(np_dtype)
if dtype == np.float32:
return core.DataType.FP32
elif dtype == np.float64:
return core.DataType.FP64
elif dtype == np.float16:
return core.DataType.FP16
elif dtype == np.int32:
return core.DataType.INT32
elif dtype == np.int16:
return core.DataType.INT16
elif dtype == np.int64:
return core.DataType.INT64
elif dtype == np.bool:
return core.DataType.BOOL
else:
raise ValueError("Not supported numpy dtype " + str(dtype))
class Operator(object): class Operator(object):
def __init__(self, def __init__(self,
block, block,
proto, desc,
type=None, type=None,
inputs=None, inputs=None,
outputs=None, outputs=None,
attrs=None): attrs=None):
self.block = block self.block = block
self.proto = proto self.desc = desc
if type is not None: if type is not None:
# TODO. # TODO.
pass pass
...@@ -63,31 +134,35 @@ class Operator(object): ...@@ -63,31 +134,35 @@ class Operator(object):
class Block(object): class Block(object):
def __init__(self, program, idx): def __init__(self, program, idx):
self.proto = program.proto.block(idx) self.desc = program.desc.block(idx)
self.vars = dict() # var_name --> var self.vars = dict() # var_name --> var
self.ops = collections.deque() # operator list self.ops = collections.deque() # operator list
self.program = program self.program = program
@property @property
def parent_idx(self): def parent_idx(self):
return self.proto.parent return self.desc.parent
@property @property
def idx(self): def idx(self):
return self.proto.id return self.desc.id
def create_var(self, *args, **kwargs): def create_var(self, *args, **kwargs):
return Variable(self, *args, **kwargs) return Variable(self, *args, **kwargs)
def create_parameter(self, *args, **kwargs):
global_block = self.program.global_block()
return Parameter(global_block, *args, **kwargs)
def append_op(self, *args, **kwargs): def append_op(self, *args, **kwargs):
op_proto = self.proto.append_op() op_desc = self.desc.append_op()
op = Operator(self, op_proto, *args, **kwargs) op = Operator(self, op_desc, *args, **kwargs)
self.ops.append(op) self.ops.append(op)
return op return op
def prepend_op(self, *args, **kwargs): def prepend_op(self, *args, **kwargs):
op_proto = self.proto.prepend_op() op_desc = self.desc.prepend_op()
op = Operator(self, op_proto, *args, **kwargs) op = Operator(self, op_desc, *args, **kwargs)
self.ops.appendleft(op) self.ops.appendleft(op)
return op return op
...@@ -104,7 +179,7 @@ class Program(object): ...@@ -104,7 +179,7 @@ class Program(object):
def __init__(self): def __init__(self):
assert not hasattr(self.__class__, assert not hasattr(self.__class__,
'_instance'), 'Do not call constructor directly!' '_instance'), 'Do not call constructor directly!'
self.proto = core.ProgramDesc.instance() self.desc = core.ProgramDesc.instance()
self.blocks = [Block(self, 0)] self.blocks = [Block(self, 0)]
self.current_block_idx = 0 self.current_block_idx = 0
...@@ -116,7 +191,7 @@ class Program(object): ...@@ -116,7 +191,7 @@ class Program(object):
def create_block(self): def create_block(self):
new_block_idx = len(self.blocks) new_block_idx = len(self.blocks)
self.proto.append_block(self.current_block().proto) self.desc.append_block(self.current_block().desc)
self.current_block_idx = new_block_idx self.current_block_idx = new_block_idx
self.blocks.append(Block(self, self.current_block_idx)) self.blocks.append(Block(self, self.current_block_idx))
return self.current_block() return self.current_block()
...@@ -125,5 +200,41 @@ class Program(object): ...@@ -125,5 +200,41 @@ class Program(object):
self.current_block_idx = self.current_block().parent_idx self.current_block_idx = self.current_block().parent_idx
class Parameter(Variable):
def __init__(self, block, shape, dtype, **kwargs):
if shape is None or dtype is None:
raise ValueError("Parameter must set shape and dtype")
if len(shape) == 0:
raise ValueError("Parameter shape cannot be empty")
for each in shape:
if each < 0:
raise ValueError("Parameter shape should not be related with "
"batch-size")
Variable.__init__(self, block, shape=shape, dtype=dtype, **kwargs)
self.trainable = kwargs.get('trainable', True)
self.init_attr = kwargs.get('initialize_attr', {
'type': 'uniform_random',
'min': -1.0,
'max': 1.0
})
self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})
self._append_initialize_ops_()
def _append_initialize_ops_(self):
attr = copy.deepcopy(self.init_attr)
op_type = attr.pop('type', None)
block = self.block
assert isinstance(block, Block)
shape = self.shape
attr['dims'] = shape
attr['data_type'] = int(self.data_type)
op = block.prepend_op(
type=op_type, inputs=None, outputs={'Out': [self]}, attrs=attr)
self.op = op
# program is a global instance. # program is a global instance.
g_program = Program.instance() g_program = Program.instance()
import unittest
from paddle.v2.framework.graph import g_program
import paddle.v2.framework.core as core
class TestParameter(unittest.TestCase):
def test_param(self):
b = g_program.create_block()
param = b.create_parameter(
name='fc.w',
shape=[784, 100],
dtype='float32',
initialize_attr={
'type': 'uniform_random',
'seed': 13,
'min': -5.0,
'max': 5.0
})
self.assertIsNotNone(param)
self.assertEqual('fc.w', param.name)
self.assertEqual((784, 100), param.shape)
self.assertEqual(core.DataType.FP32, param.data_type)
self.assertEqual(0, param.block.idx)
if __name__ == '__main__':
unittest.main()
import unittest
from paddle.v2.framework.graph import Variable, g_program
import paddle.v2.framework.core as core
import numpy as np
class TestVariable(unittest.TestCase):
def test_np_dtype_convert(self):
DT = core.DataType
convert = Variable._convert_np_dtype_to_dtype_
self.assertEqual(DT.FP32, convert(np.float32))
self.assertEqual(DT.FP16, convert("float16"))
self.assertEqual(DT.FP64, convert("float64"))
self.assertEqual(DT.INT32, convert("int32"))
self.assertEqual(DT.INT16, convert("int16"))
self.assertEqual(DT.INT64, convert("int64"))
self.assertEqual(DT.BOOL, convert("bool"))
self.assertRaises(ValueError, lambda: convert("int8"))
def test_var(self):
b = g_program.current_block()
w = b.create_var(
dtype="float64", shape=[784, 100], lod_level=0, name="fc.w")
self.assertEqual(core.DataType.FP64, w.data_type)
self.assertEqual((784, 100), w.shape)
self.assertEqual("fc.w", w.name)
self.assertEqual(0, w.lod_level)
w = b.create_var(name='fc.w')
self.assertEqual(core.DataType.FP64, w.data_type)
self.assertEqual((784, 100), w.shape)
self.assertEqual("fc.w", w.name)
self.assertEqual(0, w.lod_level)
self.assertRaises(ValueError,
lambda: b.create_var(name="fc.w", shape=(24, 100)))
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册