未验证 提交 353244f4 编写于 作者: J Jiabin Yang 提交者: GitHub

test=develop, add FC and test (#16604)

* test=develop, add FC and test

* test=develop, refine code
上级 bd193781
...@@ -65,7 +65,7 @@ class LayerObjectHelper(LayerHelperBase): ...@@ -65,7 +65,7 @@ class LayerObjectHelper(LayerHelperBase):
def _input(self, inputs_in): def _input(self, inputs_in):
inputs = self._multiple_input(inputs_in) inputs = self._multiple_input(inputs_in)
if len(inputs) != 1: if len(inputs) != 1:
raise "{0} layer only takes one input".format(self.layer_type) raise "{0} layer only takes one input in".format(self.layer_type)
return inputs[0] return inputs[0]
def _multiple_param_attr(self, length, param_attr_in=None): def _multiple_param_attr(self, length, param_attr_in=None):
...@@ -74,7 +74,8 @@ class LayerObjectHelper(LayerHelperBase): ...@@ -74,7 +74,8 @@ class LayerObjectHelper(LayerHelperBase):
param_attr = [param_attr] param_attr = [param_attr]
if len(param_attr) != 1 and len(param_attr) != length: if len(param_attr) != 1 and len(param_attr) != length:
raise ValueError("parameter number mismatch") raise ValueError("parameter number mismatch in {}".format(
self.name))
elif len(param_attr) == 1 and length != 1: elif len(param_attr) == 1 and length != 1:
tmp = [None] * length tmp = [None] * length
for i in six.moves.range(length): for i in six.moves.range(length):
...@@ -91,6 +92,10 @@ class LayerObjectHelper(LayerHelperBase): ...@@ -91,6 +92,10 @@ class LayerObjectHelper(LayerHelperBase):
Returns input, param_attr Returns input, param_attr
""" """
param_attr_in = ParamAttr._to_attr(param_attr_in)
if isinstance(param_attr_in, bool):
raise ValueError('Param_attr should not be False in {}'.format(
self.name))
inputs = inputs_in if (inputs_in is not None) else [] inputs = inputs_in if (inputs_in is not None) else []
inputs = self._multiple_input(inputs) inputs = self._multiple_input(inputs)
param_attrs = self._multiple_param_attr(len(inputs), param_attr_in) param_attrs = self._multiple_param_attr(len(inputs), param_attr_in)
...@@ -112,8 +117,8 @@ class LayerObjectHelper(LayerHelperBase): ...@@ -112,8 +117,8 @@ class LayerObjectHelper(LayerHelperBase):
if dtype is None: if dtype is None:
dtype = each.dtype dtype = each.dtype
elif dtype != each.dtype: elif dtype != each.dtype:
raise ValueError("Data Type mismatch: %d to %d" % raise ValueError("Data Type mismatch: %d to %d in %s" %
(dtype, each.dtype)) (dtype, each.dtype, self.name))
return dtype return dtype
def get_parameter(self, name): def get_parameter(self, name):
...@@ -126,7 +131,8 @@ class LayerObjectHelper(LayerHelperBase): ...@@ -126,7 +131,8 @@ class LayerObjectHelper(LayerHelperBase):
""" """
param = self.main_program.global_block().var(name) param = self.main_program.global_block().var(name)
if not isinstance(param, Parameter): if not isinstance(param, Parameter):
raise ValueError("no Parameter name %s found" % name) raise ValueError("no Parameter name %s found in %s" %
(name, self.name))
return param return param
def append_bias_op(self, def append_bias_op(self,
...@@ -184,7 +190,8 @@ class LayerObjectHelper(LayerHelperBase): ...@@ -184,7 +190,8 @@ class LayerObjectHelper(LayerHelperBase):
if isinstance(act, six.string_types): if isinstance(act, six.string_types):
act = {'type': act} act = {'type': act}
else: else:
raise TypeError(str(act) + " should be unicode or str") raise TypeError(
str(act) + " should be unicode or str in %s ", self.name)
if (use_cudnn is not None) and use_cudnn: if (use_cudnn is not None) and use_cudnn:
act['use_cudnn'] = use_cudnn act['use_cudnn'] = use_cudnn
...@@ -211,5 +218,6 @@ class LayerObjectHelper(LayerHelperBase): ...@@ -211,5 +218,6 @@ class LayerObjectHelper(LayerHelperBase):
""" """
param = param param = param
if not isinstance(param, cls): if not isinstance(param, cls):
raise TypeError("The input {0} parameter of method {1} must be {2}", raise TypeError(
param, self.layer_type, cls.__name__) "The input {0} parameter of method {1} must be {2}, in layer {3}",
param, self.layer_type, cls.__name__, self.name)
...@@ -20,7 +20,7 @@ import numpy as np ...@@ -20,7 +20,7 @@ import numpy as np
from .. import core from .. import core
from ..layers import utils from ..layers import utils
from . import layers from . import layers
from ..framework import Variable, OpProtoHolder from ..framework import Variable, OpProtoHolder, Parameter
from ..layers import layer_function_generator from ..layers import layer_function_generator
from ..param_attr import ParamAttr from ..param_attr import ParamAttr
from ..initializer import Normal, Constant, NumpyArrayInitializer from ..initializer import Normal, Constant, NumpyArrayInitializer
...@@ -213,46 +213,69 @@ class FC(layers.Layer): ...@@ -213,46 +213,69 @@ class FC(layers.Layer):
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._act = act self._act = act
self.__w = list()
def _build_once(self, input): @property
input_shape = input.shape def _w(self, i=0):
param_shape = [ return self.__w[i]
reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:], 1)
] + [self._size]
self._w = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=False)
if self._bias_attr: @_w.setter
size = list([self._size]) def _w(self, value, i=0):
self._b = self.create_parameter( assert isinstance(value, Parameter)
attr=self._bias_attr, self.__w[i] = value
shape=size,
dtype=self._dtype,
is_bias=True)
else:
self._b = None
def forward(self, input): def _build_once(self, input):
tmp = self._helper.create_variable_for_type_inference(self._dtype) i = 0
self._helper.append_op( for inp, param in self._helper.iter_inputs_and_params(input,
type="mul", self._param_attr):
inputs={"X": input, input_shape = inp.shape
"Y": self._w},
outputs={"Out": tmp}, param_shape = [
attrs={ reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:],
"x_num_col_dims": self._num_flatten_dims, 1)
"y_num_col_dims": 1 ] + [self._size]
}) self.__w.append(
self.add_parameter(
'_w%d' % i,
self.create_parameter(
attr=param,
shape=param_shape,
dtype=self._dtype,
is_bias=False)))
i += 1
size = list([self._size])
self._b = self.create_parameter(
attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True)
pre_bias = self._helper.create_variable_for_type_inference(self._dtype) def forward(self, input):
self._helper.append_op( mul_results = list()
type="sum", i = 0
inputs={"X": [tmp]}, for inp, param in self._helper.iter_inputs_and_params(input,
outputs={"Out": pre_bias}, self._param_attr):
attrs={"use_mkldnn": False}) tmp = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="mul",
inputs={"X": inp,
"Y": self.__w[i]},
outputs={"Out": tmp},
attrs={
"x_num_col_dims": self._num_flatten_dims,
"y_num_col_dims": 1
})
i += 1
mul_results.append(tmp)
if len(mul_results) == 1:
pre_bias = mul_results[0]
else:
pre_bias = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type="sum",
inputs={"X": mul_results},
outputs={"Out": pre_bias},
attrs={"use_mkldnn": False})
if self._b: if self._b:
pre_activation = self._helper.create_variable_for_type_inference( pre_activation = self._helper.create_variable_for_type_inference(
......
...@@ -76,6 +76,41 @@ class LayerTest(unittest.TestCase): ...@@ -76,6 +76,41 @@ class LayerTest(unittest.TestCase):
class TestLayer(LayerTest): class TestLayer(LayerTest):
def test_fc(self):
# pdb.set_trace()
inp = np.ones([3, 32, 32], dtype='float32')
with self.static_graph():
t = layers.data(
name='data',
shape=[3, 32, 32],
dtype='float32',
append_batch_size=False)
ret = layers.fc(t, size=4, bias_attr=False, num_flatten_dims=1)
ret2 = layers.fc(ret, size=4)
static_ret = self.get_static_graph_result(
feed={'data': inp}, fetch_list=[ret2])[0]
with self.static_graph():
t = layers.data(
name='data',
shape=[3, 32, 32],
dtype='float32',
append_batch_size=False)
fc1 = nn.FC('fc1', size=4, bias_attr=False, num_flatten_dims=1)
fc2 = nn.FC('fc2', size=4)
ret = fc1(t)
ret2 = fc2(ret)
static_ret2 = self.get_static_graph_result(
feed={'data': inp}, fetch_list=[ret2])[0]
with self.dynamic_graph():
t = base.to_variable(inp)
fc1 = nn.FC('fc1', size=4, bias_attr=False, num_flatten_dims=1)
fc2 = nn.FC('fc2', size=4)
ret = fc1(t)
dy_ret = fc2(ret)
self.assertTrue(np.array_equal(static_ret, static_ret2))
self.assertTrue(np.array_equal(static_ret, dy_ret._numpy()))
def test_layer_norm(self): def test_layer_norm(self):
inp = np.ones([3, 32, 32], dtype='float32') inp = np.ones([3, 32, 32], dtype='float32')
with self.static_graph(): with self.static_graph():
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册