提交 e377d759 编写于 作者: M minqiyang

Add UT for most layers without params

test=develop
上级 2839e227
...@@ -404,7 +404,7 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> { ...@@ -404,7 +404,7 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> {
int batch_size = logits->dims()[0]; int batch_size = logits->dims()[0];
int feature_size = logits->dims()[1]; int feature_size = logits->dims()[1];
auto* logits_data = logits->data<T>(); auto* logits_data = logits->data<T>();
auto* labels_data = labels->data<T>(); auto* labels_data = labels->data<int64_t>();
SoftmaxWithCrossEntropyFusedKernel( SoftmaxWithCrossEntropyFusedKernel(
logits_data, labels_data, softmax_data, loss_data, batch_size, logits_data, labels_data, softmax_data, loss_data, batch_size,
feature_size, context.cuda_device_context().stream()); feature_size, context.cuda_device_context().stream());
......
...@@ -47,7 +47,7 @@ class Conv2D(layers.Layer): ...@@ -47,7 +47,7 @@ class Conv2D(layers.Layer):
bias_attr=None, bias_attr=None,
dtype=core.VarDesc.VarType.FP32): dtype=core.VarDesc.VarType.FP32):
assert param_attr is not False, "param_attr should not be False here." assert param_attr is not False, "param_attr should not be False here."
super(Conv2D, self).__init__(name_scope) super(Conv2D, self).__init__(name_scope, dtype)
self._groups = groups self._groups = groups
self._stride = utils.convert_to_list(stride, 2, 'stride') self._stride = utils.convert_to_list(stride, 2, 'stride')
self._padding = utils.convert_to_list(padding, 2, 'padding') self._padding = utils.convert_to_list(padding, 2, 'padding')
...@@ -205,7 +205,7 @@ class FC(layers.Layer): ...@@ -205,7 +205,7 @@ class FC(layers.Layer):
num_flatten_dims=1, num_flatten_dims=1,
dtype=core.VarDesc.VarType.FP32, dtype=core.VarDesc.VarType.FP32,
act=None): act=None):
super(FC, self).__init__(name_scope) super(FC, self).__init__(name_scope, dtype)
self._size = size self._size = size
self._num_flatten_dims = num_flatten_dims self._num_flatten_dims = num_flatten_dims
...@@ -310,7 +310,7 @@ class BatchNorm(layers.Layer): ...@@ -310,7 +310,7 @@ class BatchNorm(layers.Layer):
do_model_average_for_mean_and_var=False, do_model_average_for_mean_and_var=False,
fuse_with_relu=False, fuse_with_relu=False,
use_global_stats=False): use_global_stats=False):
super(BatchNorm, self).__init__(name_scope) super(BatchNorm, self).__init__(name_scope, dtype)
self._param_attr = param_attr self._param_attr = param_attr
self._param_attr = bias_attr self._param_attr = bias_attr
self._act = act self._act = act
...@@ -462,7 +462,7 @@ class Embedding(layers.Layer): ...@@ -462,7 +462,7 @@ class Embedding(layers.Layer):
param_attr=None, param_attr=None,
dtype='float32'): dtype='float32'):
super(Embedding, self).__init__(name_scope) super(Embedding, self).__init__(name_scope, dtype)
self._size = size self._size = size
self._is_sparse = is_sparse self._is_sparse = is_sparse
self._is_distributed = is_distributed self._is_distributed = is_distributed
...@@ -563,7 +563,7 @@ class LayerNorm(layers.Layer): ...@@ -563,7 +563,7 @@ class LayerNorm(layers.Layer):
>>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1) >>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
""" """
super(LayerNorm, self).__init__(name_scope) super(LayerNorm, self).__init__(name_scope, dtype)
self._scale = scale self._scale = scale
self._shift = shift self._shift = shift
self._begin_norm_axis = begin_norm_axis self._begin_norm_axis = begin_norm_axis
...@@ -710,7 +710,7 @@ class GRUUnit(layers.Layer): ...@@ -710,7 +710,7 @@ class GRUUnit(layers.Layer):
gate_activation='sigmoid', gate_activation='sigmoid',
origin_mode=False, origin_mode=False,
dtype='float32'): dtype='float32'):
super(GRUUnit, self).__init__(name_scope) super(GRUUnit, self).__init__(name_scope, dtype)
activation_dict = dict( activation_dict = dict(
identity=0, identity=0,
...@@ -840,7 +840,7 @@ class NCE(layers.Layer): ...@@ -840,7 +840,7 @@ class NCE(layers.Layer):
custom_dist=None, custom_dist=None,
seed=0, seed=0,
is_sparse=False): is_sparse=False):
super(NCE, self).__init__(name_scope) super(NCE, self).__init__(name_scope, dtype)
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._num_total_classes = num_total_classes self._num_total_classes = num_total_classes
...@@ -1013,7 +1013,7 @@ class PRelu(layers.Layer): ...@@ -1013,7 +1013,7 @@ class PRelu(layers.Layer):
def __init__(self, name_scope, mode, param_attr=None): def __init__(self, name_scope, mode, param_attr=None):
super(PRelu, self).__init__(name_scope) super(PRelu, self).__init__(name_scope, dtype)
self._mode = mode self._mode = mode
self._param_attr = param_attr self._param_attr = param_attr
if self._mode not in ['all', 'channel', 'element']: if self._mode not in ['all', 'channel', 'element']:
...@@ -1090,7 +1090,7 @@ class BilinearTensorProduct(layers.Layer): ...@@ -1090,7 +1090,7 @@ class BilinearTensorProduct(layers.Layer):
act=None, act=None,
param_attr=None, param_attr=None,
bias_attr=None): bias_attr=None):
super(BilinearTensorProduct, self).__init__(name_scope) super(BilinearTensorProduct, self).__init__(name_scope, dtype)
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._act = act self._act = act
...@@ -1260,7 +1260,7 @@ class Conv2DTranspose(layers.Layer): ...@@ -1260,7 +1260,7 @@ class Conv2DTranspose(layers.Layer):
bias_attr=None, bias_attr=None,
use_cudnn=True, use_cudnn=True,
act=None): act=None):
super(Conv2DTranspose, self).__init__(name_scope) super(Conv2DTranspose, self).__init__(name_scope, dtype)
assert param_attr is not False, "param_attr should not be False in conv2d_transpose." assert param_attr is not False, "param_attr should not be False in conv2d_transpose."
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
...@@ -1388,7 +1388,7 @@ class SequenceConv(layers.Layer): ...@@ -1388,7 +1388,7 @@ class SequenceConv(layers.Layer):
bias_attr=None, bias_attr=None,
param_attr=None, param_attr=None,
act=None): act=None):
super(SequenceConv, self).__init__(name_scope) super(SequenceConv, self).__init__(name_scope, dtype)
self._num_filters = num_filters self._num_filters = num_filters
self._filter_size = filter_size self._filter_size = filter_size
self._filter_stride = filter_stride self._filter_stride = filter_stride
......
...@@ -480,6 +480,8 @@ def dynamic_lstm(input, ...@@ -480,6 +480,8 @@ def dynamic_lstm(input,
forward, _ = fluid.layers.dynamic_lstm( forward, _ = fluid.layers.dynamic_lstm(
input=forward_proj, size=hidden_dim * 4, use_peepholes=False) input=forward_proj, size=hidden_dim * 4, use_peepholes=False)
""" """
assert _in_dygraph_mode(
) is not True, "please use lstm instead of dynamic_lstm in dygraph mode!"
assert bias_attr is not False, "bias_attr should not be False in dynamic_lstmp." assert bias_attr is not False, "bias_attr should not be False in dynamic_lstmp."
helper = LayerHelper('lstm', **locals()) helper = LayerHelper('lstm', **locals())
size = size // 4 size = size // 4
...@@ -864,6 +866,9 @@ def dynamic_lstmp(input, ...@@ -864,6 +866,9 @@ def dynamic_lstmp(input,
proj_activation="tanh") proj_activation="tanh")
""" """
assert _in_dygraph_mode(
) is not True, "please use lstm instead of dynamic_lstmp in dygraph mode!"
assert bias_attr is not False, "bias_attr should not be False in dynamic_lstmp." assert bias_attr is not False, "bias_attr should not be False in dynamic_lstmp."
helper = LayerHelper('lstmp', **locals()) helper = LayerHelper('lstmp', **locals())
size = size // 4 size = size // 4
...@@ -1035,6 +1040,9 @@ def dynamic_gru(input, ...@@ -1035,6 +1040,9 @@ def dynamic_gru(input,
hidden = fluid.layers.dynamic_gru(input=x, size=hidden_dim) hidden = fluid.layers.dynamic_gru(input=x, size=hidden_dim)
""" """
assert _in_dygraph_mode(
) is not True, "please use gru instead of dynamic_gru in dygraph mode!"
helper = LayerHelper('gru', **locals()) helper = LayerHelper('gru', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
...@@ -1751,6 +1759,8 @@ def sequence_conv(input, ...@@ -1751,6 +1759,8 @@ def sequence_conv(input,
Variable: output of sequence_conv Variable: output of sequence_conv
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_conv', **locals()) helper = LayerHelper('sequence_conv', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
filter_shape = [filter_size * input.shape[1], num_filters] filter_shape = [filter_size * input.shape[1], num_filters]
...@@ -1810,6 +1820,8 @@ def sequence_softmax(input, use_cudnn=False, name=None): ...@@ -1810,6 +1820,8 @@ def sequence_softmax(input, use_cudnn=False, name=None):
dtype='float32', lod_level=1) dtype='float32', lod_level=1)
x_sequence_softmax = fluid.layers.sequence_softmax(input=x) x_sequence_softmax = fluid.layers.sequence_softmax(input=x)
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_softmax', **locals()) helper = LayerHelper('sequence_softmax', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
softmax_out = helper.create_variable_for_type_inference(dtype) softmax_out = helper.create_variable_for_type_inference(dtype)
...@@ -2302,6 +2314,8 @@ def sequence_pool(input, pool_type, is_test=False): ...@@ -2302,6 +2314,8 @@ def sequence_pool(input, pool_type, is_test=False):
last_x = fluid.layers.sequence_pool(input=x, pool_type='last') last_x = fluid.layers.sequence_pool(input=x, pool_type='last')
first_x = fluid.layers.sequence_pool(input=x, pool_type='first') first_x = fluid.layers.sequence_pool(input=x, pool_type='first')
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_pool', **locals()) helper = LayerHelper('sequence_pool', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype) pool_out = helper.create_variable_for_type_inference(dtype)
...@@ -2341,6 +2355,8 @@ def sequence_concat(input, name=None): ...@@ -2341,6 +2355,8 @@ def sequence_concat(input, name=None):
out = fluid.layers.sequence_concat(input=[seq1, seq2, seq3]) out = fluid.layers.sequence_concat(input=[seq1, seq2, seq3])
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_concat', **locals()) helper = LayerHelper('sequence_concat', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op( helper.append_op(
...@@ -2468,6 +2484,8 @@ def sequence_slice(input, offset, length, name=None): ...@@ -2468,6 +2484,8 @@ def sequence_slice(input, offset, length, name=None):
subseqs = fluid.layers.sequence_slice(input=seqs, offset=offset, subseqs = fluid.layers.sequence_slice(input=seqs, offset=offset,
length=length) length=length)
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper("sequence_slice", **locals()) helper = LayerHelper("sequence_slice", **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
...@@ -3927,6 +3945,8 @@ def sequence_expand(x, y, ref_level=-1, name=None): ...@@ -3927,6 +3945,8 @@ def sequence_expand(x, y, ref_level=-1, name=None):
dtype='float32', lod_level=1) dtype='float32', lod_level=1)
out = layers.sequence_expand(x=x, y=y, ref_level=0) out = layers.sequence_expand(x=x, y=y, ref_level=0)
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_expand', input=x, **locals()) helper = LayerHelper('sequence_expand', input=x, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
tmp = helper.create_variable_for_type_inference(dtype) tmp = helper.create_variable_for_type_inference(dtype)
...@@ -3993,6 +4013,8 @@ def sequence_expand_as(x, y, name=None): ...@@ -3993,6 +4013,8 @@ def sequence_expand_as(x, y, name=None):
dtype='float32', lod_level=1) dtype='float32', lod_level=1)
out = layers.sequence_expand_as(x=x, y=y) out = layers.sequence_expand_as(x=x, y=y)
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_expand_as', input=x, **locals()) helper = LayerHelper('sequence_expand_as', input=x, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
tmp = helper.create_variable_for_type_inference(dtype) tmp = helper.create_variable_for_type_inference(dtype)
...@@ -4039,6 +4061,8 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): ...@@ -4039,6 +4061,8 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
out = fluid.layers.sequence_pad(x=x, pad_value=pad_value) out = fluid.layers.sequence_pad(x=x, pad_value=pad_value)
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_pad', input=x, **locals()) helper = LayerHelper('sequence_pad', input=x, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
...@@ -4105,6 +4129,8 @@ def sequence_unpad(x, length, name=None): ...@@ -4105,6 +4129,8 @@ def sequence_unpad(x, length, name=None):
out = fluid.layers.sequence_unpad(x=x, length=len) out = fluid.layers.sequence_unpad(x=x, length=len)
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_unpad', input=x, **locals()) helper = LayerHelper('sequence_unpad', input=x, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
...@@ -5278,6 +5304,8 @@ def sequence_reshape(input, new_dim): ...@@ -5278,6 +5304,8 @@ def sequence_reshape(input, new_dim):
x = fluid.layers.data(shape=[5, 20], dtype='float32', lod_level=1) x = fluid.layers.data(shape=[5, 20], dtype='float32', lod_level=1)
x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10) x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10)
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_reshape', **locals()) helper = LayerHelper('sequence_reshape', **locals())
out = helper.create_variable_for_type_inference(helper.input_dtype()) out = helper.create_variable_for_type_inference(helper.input_dtype())
helper.append_op( helper.append_op(
...@@ -5812,6 +5840,8 @@ def im2sequence(input, ...@@ -5812,6 +5840,8 @@ def im2sequence(input,
input=layer, stride=[1, 1], filter_size=[2, 2]) input=layer, stride=[1, 1], filter_size=[2, 2])
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
if isinstance(filter_size, int): if isinstance(filter_size, int):
filter_size = [filter_size, filter_size] filter_size = [filter_size, filter_size]
...@@ -6228,7 +6258,7 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): ...@@ -6228,7 +6258,7 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
}, },
outputs={'Diff': diff, outputs={'Diff': diff,
'Out': loss}, 'Out': loss},
attrs={'sigma': sigma}) attrs={'sigma': sigma if sigma is not None else 1.0})
return loss return loss
...@@ -7589,6 +7619,8 @@ def sequence_scatter(input, index, updates, name=None): ...@@ -7589,6 +7619,8 @@ def sequence_scatter(input, index, updates, name=None):
output = fluid.layers.sequence_scatter(input, index, updates) output = fluid.layers.sequence_scatter(input, index, updates)
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_scatter', **locals()) helper = LayerHelper('sequence_scatter', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
...@@ -8677,6 +8709,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None): ...@@ -8677,6 +8709,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
x = fluid.layers.data(shape[30, 1], dtype='int32', lod_level=1) x = fluid.layers.data(shape[30, 1], dtype='int32', lod_level=1)
out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0) out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0)
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_enumerate', **locals()) helper = LayerHelper('sequence_enumerate', **locals())
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(
helper.input_dtype(), stop_gradient=True) helper.input_dtype(), stop_gradient=True)
...@@ -8716,6 +8750,8 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None): ...@@ -8716,6 +8750,8 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
Variable: The output sequence mask. Variable: The output sequence mask.
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_mask', **locals()) helper = LayerHelper('sequence_mask', **locals())
if name is None: if name is None:
...@@ -9766,6 +9802,8 @@ def sequence_reverse(x, name=None): ...@@ -9766,6 +9802,8 @@ def sequence_reverse(x, name=None):
Returns: Returns:
out(${y_type}): ${y_comment} out(${y_type}): ${y_comment}
""" """
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper("sequence_reverse", **locals()) helper = LayerHelper("sequence_reverse", **locals())
if name is None: if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册