提交 133f1005 编写于 作者: M minqiyang

Complete the unittest of optimizers

test=develop
上级 2547f9d1
...@@ -97,17 +97,23 @@ class Conv2D(layers.PyLayer): ...@@ -97,17 +97,23 @@ class Conv2D(layers.PyLayer):
persistable=True, persistable=True,
type=core.VarDesc.VarType.RAW) type=core.VarDesc.VarType.RAW)
self._pre_bias = self._helper.create_variable_for_type_inference( self._bias_param = self._helper.create_parameter(
dtype=self._dtype) attr=self._helper.bias_attr,
shape=[num_filter_channels],
dtype=self._dtype,
is_bias=True)
def forward(self, input): def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op( self._helper.append_op(
type=self._l_type, type=self._l_type,
inputs={ inputs={
'Input': input, 'Input': input,
'Filter': self._filter_param, 'Filter': self._filter_param,
}, },
outputs={"Output": self._pre_bias}, outputs={"Output": pre_bias},
attrs={ attrs={
'strides': self._stride, 'strides': self._stride,
'paddings': self._padding, 'paddings': self._padding,
...@@ -117,11 +123,17 @@ class Conv2D(layers.PyLayer): ...@@ -117,11 +123,17 @@ class Conv2D(layers.PyLayer):
'use_mkldnn': False, 'use_mkldnn': False,
}) })
self._pre_act = self._helper.append_bias_op( pre_act = self._helper.create_variable_for_type_inference(
self._pre_bias, dim_start=1, dim_end=2) dtype=self._dtype)
out = self._helper.append_activation(self._pre_act) self._helper.append_op(
return out type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self._bias_param]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
return self._helper.append_activation(pre_act)
class Pool2D(layers.PyLayer): class Pool2D(layers.PyLayer):
...@@ -162,14 +174,13 @@ class Pool2D(layers.PyLayer): ...@@ -162,14 +174,13 @@ class Pool2D(layers.PyLayer):
self._exclusive = exclusive self._exclusive = exclusive
self._l_type = 'pool2d' self._l_type = 'pool2d'
self._pool_out = self._helper.create_variable_for_type_inference(
self._dtype)
def forward(self, input): def forward(self, input):
pool_out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op( self._helper.append_op(
type=self._l_type, type=self._l_type,
inputs={"X": input}, inputs={"X": input},
outputs={"Out": self._pool_out}, outputs={"Out": pool_out},
attrs={ attrs={
"pooling_type": self._pool_type, "pooling_type": self._pool_type,
"ksize": self._pool_size, "ksize": self._pool_size,
...@@ -181,7 +192,7 @@ class Pool2D(layers.PyLayer): ...@@ -181,7 +192,7 @@ class Pool2D(layers.PyLayer):
"use_mkldnn": False, "use_mkldnn": False,
"exclusive": self._exclusive, "exclusive": self._exclusive,
}) })
return self._pool_out return pool_out
class FC(layers.PyLayer): class FC(layers.PyLayer):
...@@ -203,8 +214,6 @@ class FC(layers.PyLayer): ...@@ -203,8 +214,6 @@ class FC(layers.PyLayer):
shape=[size_in, size_out], shape=[size_in, size_out],
dtype=self._dtype, dtype=self._dtype,
is_bias=False) is_bias=False)
self._tmp = self._helper.create_variable_for_type_inference(self._dtype)
self._out = self._helper.create_variable_for_type_inference(self._dtype)
def _build_once(self, input): def _build_once(self, input):
if self._size_in != -1: if self._size_in != -1:
...@@ -221,19 +230,21 @@ class FC(layers.PyLayer): ...@@ -221,19 +230,21 @@ class FC(layers.PyLayer):
is_bias=False) is_bias=False)
def forward(self, input): def forward(self, input):
tmp = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op( self._helper.append_op(
type="mul", type="mul",
inputs={"X": input, inputs={"X": input,
"Y": self._w}, "Y": self._w},
outputs={"Out": self._tmp}, outputs={"Out": tmp},
attrs={ attrs={
"x_num_col_dims": self._num_flatten_dims, "x_num_col_dims": self._num_flatten_dims,
"y_num_col_dims": 1 "y_num_col_dims": 1
}) })
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op( self._helper.append_op(
type="sum", type="sum",
inputs={"X": [self._tmp]}, inputs={"X": [tmp]},
outputs={"Out": self._out}, outputs={"Out": out},
attrs={"use_mkldnn": False}) attrs={"use_mkldnn": False})
return self._out return out
...@@ -19,16 +19,7 @@ import numpy as np ...@@ -19,16 +19,7 @@ import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.layers.nn import FC from paddle.fluid.layers.nn import FC
from test_imperative_base import new_program_scope
@contextlib.contextmanager
def new_program_scope():
prog = fluid.Program()
startup_prog = fluid.Program()
scope = fluid.core.Scope()
with fluid.scope_guard(scope):
with fluid.program_guard(prog, startup_prog):
yield
class MyLayer(fluid.imperative.PyLayer): class MyLayer(fluid.imperative.PyLayer):
......
...@@ -15,12 +15,15 @@ ...@@ -15,12 +15,15 @@
import contextlib import contextlib
import unittest import unittest
import numpy as np import numpy as np
import six
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
from paddle.fluid.imperative.nn import Conv2D, Pool2D, FC from paddle.fluid.imperative.nn import Conv2D, Pool2D, FC
from paddle.fluid.imperative.base import to_variable from paddle.fluid.imperative.base import to_variable
from test_imperative_base import new_program_scope
class SimpleImgConvPool(fluid.imperative.PyLayer): class SimpleImgConvPool(fluid.imperative.PyLayer):
...@@ -97,21 +100,93 @@ class MNIST(fluid.imperative.PyLayer): ...@@ -97,21 +100,93 @@ class MNIST(fluid.imperative.PyLayer):
class TestImperativeMnist(unittest.TestCase): class TestImperativeMnist(unittest.TestCase):
def test_mnist_cpu_float32(self): def test_mnist_cpu_float32(self):
seed = 90
with fluid.imperative.guard(): with fluid.imperative.guard():
mnist = MNIST() fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
mnist = Conv2D(1, 20, 5)
sgd = SGDOptimizer(learning_rate=1e-3) sgd = SGDOptimizer(learning_rate=1e-3)
train_reader = paddle.batch(
paddle.dataset.mnist.train(), batch_size=128)
dy_param_value = {}
for param in fluid.default_main_program().global_block(
).all_parameters():
dy_param_value[param.name] = param._numpy()
for batch_id, data in enumerate(train_reader()):
if batch_id >= 1:
break
x_data = np.array(
[x[0].reshape(1, 28, 28) for x in data]).astype('float32')
y_data = np.array([x[1] for x in data]).astype('int64').reshape(
128, 1)
for i in range(2):
x_data = np.random.rand(128, 1, 28, 28).astype('float32')
img = to_variable(x_data) img = to_variable(x_data)
y_data = np.random.rand(128, 1).astype('int64')
label = to_variable(y_data) label = to_variable(y_data)
label._stop_gradient = True label._stop_gradient = True
predict = mnist(img) cost = mnist(img)
out = fluid.layers.cross_entropy(predict, label) loss = fluid.layers.reduce_mean(cost)
out._backward() dy_out = loss._numpy()
sgd.minimize(out)
loss._backward()
sgd.minimize(loss)
dy_filter_param = mnist._filter_param._numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
exe = fluid.Executor(fluid.CPUPlace())
mnist = Conv2D(1, 20, 5)
sgd = SGDOptimizer(learning_rate=1e-3)
train_reader = paddle.batch(
paddle.dataset.mnist.train(), batch_size=128)
img = fluid.layers.data(
name='pixel', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
cost = mnist(img)
loss = fluid.layers.reduce_mean(cost)
sgd.minimize(loss)
# initialize params and fetch them
static_param_value = {}
static_param_name_list = []
for param in fluid.default_startup_program().global_block(
).all_parameters():
static_param_name_list.append(param.name)
out = exe.run(fluid.default_startup_program(),
fetch_list=static_param_name_list)
for i in range(len(static_param_name_list)):
static_param_value[static_param_name_list[i]] = out[i]
for batch_id, data in enumerate(train_reader()):
if batch_id >= 1:
break
x_data = np.array(
[x[0].reshape(1, 28, 28) for x in data]).astype('float32')
y_data = np.array([x[1] for x in data]).astype('int64').reshape(
[128, 1])
static_out, static_filter_param = exe.run(
fluid.default_main_program(),
feed={"pixel": x_data,
"label": y_data},
fetch_list=[loss.name, mnist._filter_param.name])
for key, value in six.iteritems(static_param_value):
self.assertTrue(np.allclose(value.all(), dy_param_value[key].all()))
self.assertTrue(np.allclose(static_out.all(), dy_out.all()))
self.assertTrue(
np.allclose(static_filter_param.all(), dy_filter_param.all()))
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册