提交 9903e49f 编写于 作者: Q QI JUN 提交者: fengjiayi

add test_fit_a_line (#4936)

* add test_fit_a_line

* Update

* fix persistable bug

* fix elementwise add bug

* set correct attr for bias op in fc layer

* set correct attr for bias op in fc layer

* Update

1. Add init_program to hold initializers
2. bug fix

* add test_fit_a_line

* fix persistable bug

* fix elementwise add bug

* fix type

* add gitignore

* Complete fit_a_line test

* revert code

* Clean up

* Revert "revert code"

This reverts commit eb1aa015.

* Refine

* Fix unit test
上级 92369177
......@@ -53,10 +53,10 @@ class UniformRandomOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE(
ctx->Attrs().Get<float>("min") < ctx->Attrs().Get<float>("max"),
"uniform_random's min must less then max");
auto& dims = ctx->Attrs().Get<std::vector<int>>("dims");
auto& shape = ctx->Attrs().Get<std::vector<int>>("shape");
std::vector<int64_t> temp;
temp.reserve(dims.size());
for (auto dim : dims) {
temp.reserve(shape.size());
for (auto dim : shape) {
temp.push_back(static_cast<int64_t>(dim));
}
ctx->SetOutputDim("Out", framework::make_ddim(temp));
......@@ -78,7 +78,7 @@ class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(Uniform random operator.
Used to initialize tensor with uniform random generator.
)DOC");
AddAttr<std::vector<int>>("dims", "the dimension of random tensor");
AddAttr<std::vector<int>>("shape", "the dimension of random tensor");
AddAttr<float>("min", "Minimum value of uniform random").SetDefault(-1.0f);
AddAttr<float>("max", "Maximun value of uniform random").SetDefault(1.0f);
AddAttr<int>("seed",
......
......@@ -15,7 +15,7 @@ class Variable(object):
shape=None,
dtype=None,
lod_level=None,
persistable=False,
persistable=None,
**kwargs):
self.block = block
......@@ -343,6 +343,8 @@ class Block(object):
def create_parameter(self, *args, **kwargs):
global_block = self.program.global_block()
param = Parameter(global_block, *args, **kwargs)
if 'init_attr' in kwargs:
self._prepend_initialize_ops_(param, kwargs['init_attr'])
return param
def append_op(self, *args, **kwargs):
......@@ -401,6 +403,17 @@ class Block(object):
for index in range(len(self.ops)):
assert self.ops[index].desc == ops_in_cpp[index]
def _prepend_initialize_ops_(self, param, init_attr):
op_type = init_attr['type']
init_attr['shape'] = param.shape
init_attr['data_type'] = int(param.data_type)
op = self.prepend_op(
type=op_type,
inputs=None,
outputs={'Out': [param]},
attrs=init_attr)
param.op = op
class Program(object):
def __init__(self):
......@@ -475,27 +488,10 @@ class Parameter(Variable):
Variable.__init__(
self, block, persistable=True, shape=shape, dtype=dtype, **kwargs)
self.trainable = kwargs.get('trainable', True)
self.init_attr = kwargs.get('initialize_attr', {
'type': 'uniform_random',
'min': -1.0,
'max': 1.0
})
self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})
self._append_initialize_ops_()
def _append_initialize_ops_(self):
attr = self.init_attr
op_type = attr.pop('type', None)
block = self.block
assert isinstance(block, Block)
shape = self.shape
attr['dims'] = shape
attr['data_type'] = int(self.data_type)
op = block.prepend_op(
type=op_type, inputs=None, outputs={'Out': [self]}, attrs=attr)
self.op = op
# program is a global instance.
g_program = Program()
g_init_program = Program()
from paddle.v2.framework.framework import Variable, OpProtoHolder, g_program
from paddle.v2.framework.framework import Variable, OpProtoHolder, g_program, g_init_program
import paddle.v2.framework.core as core
import copy
import itertools
......@@ -29,6 +29,14 @@ class LayerHelper(object):
else:
return prog
@property
def init_program(self):
prog = self.kwargs.get('init_program', None)
if prog is None:
return g_init_program
else:
return prog
def append_op(self, *args, **kwargs):
return self.program.current_block().append_op(*args, **kwargs)
......@@ -66,16 +74,14 @@ class LayerHelper(object):
actual = self.kwargs.get('param_attr', None)
return actual if actual is not None else default
def bias_attr(self, shape, dtype):
def bias_attr(self):
bias_attr = self.kwargs.get('bias_attr', None)
if bias_attr is True:
bias_attr = {
'name': None,
'init_attr': {
'type': 'fill_constant',
'value': 0.0,
'shape': shape,
'dataType': dtype
'value': 0.0
}
}
return bias_attr
......@@ -113,22 +119,27 @@ class LayerHelper(object):
def create_parameter(self, attr, shape, dtype, suffix='w'):
if attr['name'] is None:
attr['name'] = unique_name(".".join([self.name, suffix]))
return self.program.global_block().create_parameter(
self.init_program.global_block().create_parameter(
name=attr['name'],
dtype=dtype,
shape=shape,
initialize_attr=attr['init_attr'])
init_attr=attr['init_attr'])
return self.program.global_block().create_parameter(
name=attr['name'], dtype=dtype, shape=shape)
def create_tmp_variable(self, dtype):
return self.program.current_block().create_var(
name=unique_name(".".join([self.name, 'tmp'])), dtype=dtype)
name=unique_name(".".join([self.name, 'tmp'])),
dtype=dtype,
persistable=False)
def create_global_variable(self, *args, **kwargs):
return self.program.global_block().create_var(*args, **kwargs)
return self.program.global_block().create_var(
*args, persistable=False, **kwargs)
def append_bias_op(self, input_var):
size = list(input_var.shape[1:])
bias_attr = self.bias_attr(size, dtype=input_var.data_type)
bias_attr = self.bias_attr()
if not bias_attr:
return input_var
......
......@@ -13,7 +13,8 @@ def fc(input,
name=None,
act=None,
num_flatten_dims=1,
program=None):
program=None,
init_program=None):
# create helper
helper = LayerHelper('fc', **locals())
......@@ -59,7 +60,8 @@ def data(name,
data_type='float32',
type=core.VarDesc.VarType.LOD_TENSOR,
append_batch_size=True,
program=None):
program=None,
init_program=None):
helper = LayerHelper('data', **locals())
if append_batch_size:
shape = [-1] + shape # append batch size as -1
......@@ -160,7 +162,8 @@ def conv2d(input,
padding=None,
bias_attr=None,
param_attr=None,
program=None):
program=None,
init_program=None):
helper = LayerHelper('conv2d', **locals())
dtype = helper.input_dtype()
......@@ -207,7 +210,8 @@ def pool2d(input,
pool_stride=[1, 1],
pool_padding=[0, 0],
global_pooling=False,
program=None):
program=None,
init_program=None):
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
......
import paddle.v2 as paddle
import paddle.v2.framework.layers as layers
import paddle.v2.framework.core as core
import paddle.v2.framework.optimizer as optimizer
from paddle.v2.framework.framework import Program, g_program
from paddle.v2.framework.executor import Executor
import numpy as np
init_program = Program()
program = Program()
x = layers.data(
name='x',
shape=[13],
data_type='float32',
program=program,
init_program=init_program)
y_predict = layers.fc(input=x,
size=1,
act=None,
program=program,
init_program=init_program)
y = layers.data(
name='y',
shape=[1],
data_type='float32',
program=program,
init_program=init_program)
cost = layers.square_error_cost(
input=y_predict, label=y, program=program, init_program=init_program)
avg_cost = layers.mean(x=cost, program=program, init_program=init_program)
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
opts = sgd_optimizer.minimize(avg_cost)
BATCH_SIZE = 20
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.uci_housing.train(), buf_size=500),
batch_size=BATCH_SIZE)
place = core.CPUPlace()
exe = Executor(place)
exe.run(init_program, feed={}, fetch_list=[])
PASS_NUM = 100
for pass_id in range(PASS_NUM):
for data in train_reader():
x_data = np.array(map(lambda x: x[0], data)).astype("float32")
y_data = np.array(map(lambda x: x[1], data)).astype("float32")
tensor_x = core.LoDTensor()
tensor_x.set(x_data, place)
# print tensor_x.get_dims()
tensor_y = core.LoDTensor()
tensor_y.set(y_data, place)
# print tensor_y.get_dims()
outs = exe.run(program,
feed={'x': tensor_x,
'y': tensor_y},
fetch_list=[avg_cost])
out = np.array(outs[0])
if out[0] < 10.0:
exit(0) # if avg cost less than 10.0, we think our code is good.
exit(1)
......@@ -19,7 +19,7 @@ class TestUniformRandomOp(unittest.TestCase):
op = Operator(
"uniform_random",
Out='X',
dims=[1000, 784],
shape=[1000, 784],
min=-5.0,
max=10.0,
seed=10)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册