提交 e377d759 编写于 作者: M minqiyang

Add UT for most layers without params

test=develop
上级 2839e227
......@@ -404,7 +404,7 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> {
int batch_size = logits->dims()[0];
int feature_size = logits->dims()[1];
auto* logits_data = logits->data<T>();
auto* labels_data = labels->data<T>();
auto* labels_data = labels->data<int64_t>();
SoftmaxWithCrossEntropyFusedKernel(
logits_data, labels_data, softmax_data, loss_data, batch_size,
feature_size, context.cuda_device_context().stream());
......
......@@ -47,7 +47,7 @@ class Conv2D(layers.Layer):
bias_attr=None,
dtype=core.VarDesc.VarType.FP32):
assert param_attr is not False, "param_attr should not be False here."
super(Conv2D, self).__init__(name_scope)
super(Conv2D, self).__init__(name_scope, dtype)
self._groups = groups
self._stride = utils.convert_to_list(stride, 2, 'stride')
self._padding = utils.convert_to_list(padding, 2, 'padding')
......@@ -205,7 +205,7 @@ class FC(layers.Layer):
num_flatten_dims=1,
dtype=core.VarDesc.VarType.FP32,
act=None):
super(FC, self).__init__(name_scope)
super(FC, self).__init__(name_scope, dtype)
self._size = size
self._num_flatten_dims = num_flatten_dims
......@@ -310,7 +310,7 @@ class BatchNorm(layers.Layer):
do_model_average_for_mean_and_var=False,
fuse_with_relu=False,
use_global_stats=False):
super(BatchNorm, self).__init__(name_scope)
super(BatchNorm, self).__init__(name_scope, dtype)
self._param_attr = param_attr
self._param_attr = bias_attr
self._act = act
......@@ -462,7 +462,7 @@ class Embedding(layers.Layer):
param_attr=None,
dtype='float32'):
super(Embedding, self).__init__(name_scope)
super(Embedding, self).__init__(name_scope, dtype)
self._size = size
self._is_sparse = is_sparse
self._is_distributed = is_distributed
......@@ -563,7 +563,7 @@ class LayerNorm(layers.Layer):
>>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
"""
super(LayerNorm, self).__init__(name_scope)
super(LayerNorm, self).__init__(name_scope, dtype)
self._scale = scale
self._shift = shift
self._begin_norm_axis = begin_norm_axis
......@@ -710,7 +710,7 @@ class GRUUnit(layers.Layer):
gate_activation='sigmoid',
origin_mode=False,
dtype='float32'):
super(GRUUnit, self).__init__(name_scope)
super(GRUUnit, self).__init__(name_scope, dtype)
activation_dict = dict(
identity=0,
......@@ -840,7 +840,7 @@ class NCE(layers.Layer):
custom_dist=None,
seed=0,
is_sparse=False):
super(NCE, self).__init__(name_scope)
super(NCE, self).__init__(name_scope, dtype)
self._param_attr = param_attr
self._bias_attr = bias_attr
self._num_total_classes = num_total_classes
......@@ -1013,7 +1013,7 @@ class PRelu(layers.Layer):
def __init__(self, name_scope, mode, param_attr=None):
super(PRelu, self).__init__(name_scope)
super(PRelu, self).__init__(name_scope, dtype)
self._mode = mode
self._param_attr = param_attr
if self._mode not in ['all', 'channel', 'element']:
......@@ -1090,7 +1090,7 @@ class BilinearTensorProduct(layers.Layer):
act=None,
param_attr=None,
bias_attr=None):
super(BilinearTensorProduct, self).__init__(name_scope)
super(BilinearTensorProduct, self).__init__(name_scope, dtype)
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
......@@ -1260,7 +1260,7 @@ class Conv2DTranspose(layers.Layer):
bias_attr=None,
use_cudnn=True,
act=None):
super(Conv2DTranspose, self).__init__(name_scope)
super(Conv2DTranspose, self).__init__(name_scope, dtype)
assert param_attr is not False, "param_attr should not be False in conv2d_transpose."
self._param_attr = param_attr
self._bias_attr = bias_attr
......@@ -1388,7 +1388,7 @@ class SequenceConv(layers.Layer):
bias_attr=None,
param_attr=None,
act=None):
super(SequenceConv, self).__init__(name_scope)
super(SequenceConv, self).__init__(name_scope, dtype)
self._num_filters = num_filters
self._filter_size = filter_size
self._filter_stride = filter_stride
......
......@@ -480,6 +480,8 @@ def dynamic_lstm(input,
forward, _ = fluid.layers.dynamic_lstm(
input=forward_proj, size=hidden_dim * 4, use_peepholes=False)
"""
assert _in_dygraph_mode(
) is not True, "please use lstm instead of dynamic_lstm in dygraph mode!"
assert bias_attr is not False, "bias_attr should not be False in dynamic_lstmp."
helper = LayerHelper('lstm', **locals())
size = size // 4
......@@ -864,6 +866,9 @@ def dynamic_lstmp(input,
proj_activation="tanh")
"""
assert _in_dygraph_mode(
) is not True, "please use lstm instead of dynamic_lstmp in dygraph mode!"
assert bias_attr is not False, "bias_attr should not be False in dynamic_lstmp."
helper = LayerHelper('lstmp', **locals())
size = size // 4
......@@ -1035,6 +1040,9 @@ def dynamic_gru(input,
hidden = fluid.layers.dynamic_gru(input=x, size=hidden_dim)
"""
assert _in_dygraph_mode(
) is not True, "please use gru instead of dynamic_gru in dygraph mode!"
helper = LayerHelper('gru', **locals())
dtype = helper.input_dtype()
......@@ -1751,6 +1759,8 @@ def sequence_conv(input,
Variable: output of sequence_conv
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_conv', **locals())
dtype = helper.input_dtype()
filter_shape = [filter_size * input.shape[1], num_filters]
......@@ -1810,6 +1820,8 @@ def sequence_softmax(input, use_cudnn=False, name=None):
dtype='float32', lod_level=1)
x_sequence_softmax = fluid.layers.sequence_softmax(input=x)
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_softmax', **locals())
dtype = helper.input_dtype()
softmax_out = helper.create_variable_for_type_inference(dtype)
......@@ -2302,6 +2314,8 @@ def sequence_pool(input, pool_type, is_test=False):
last_x = fluid.layers.sequence_pool(input=x, pool_type='last')
first_x = fluid.layers.sequence_pool(input=x, pool_type='first')
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_pool', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
......@@ -2341,6 +2355,8 @@ def sequence_concat(input, name=None):
out = fluid.layers.sequence_concat(input=[seq1, seq2, seq3])
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_concat', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
......@@ -2468,6 +2484,8 @@ def sequence_slice(input, offset, length, name=None):
subseqs = fluid.layers.sequence_slice(input=seqs, offset=offset,
length=length)
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper("sequence_slice", **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
......@@ -3927,6 +3945,8 @@ def sequence_expand(x, y, ref_level=-1, name=None):
dtype='float32', lod_level=1)
out = layers.sequence_expand(x=x, y=y, ref_level=0)
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_expand', input=x, **locals())
dtype = helper.input_dtype()
tmp = helper.create_variable_for_type_inference(dtype)
......@@ -3993,6 +4013,8 @@ def sequence_expand_as(x, y, name=None):
dtype='float32', lod_level=1)
out = layers.sequence_expand_as(x=x, y=y)
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_expand_as', input=x, **locals())
dtype = helper.input_dtype()
tmp = helper.create_variable_for_type_inference(dtype)
......@@ -4039,6 +4061,8 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
out = fluid.layers.sequence_pad(x=x, pad_value=pad_value)
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_pad', input=x, **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
......@@ -4105,6 +4129,8 @@ def sequence_unpad(x, length, name=None):
out = fluid.layers.sequence_unpad(x=x, length=len)
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_unpad', input=x, **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
......@@ -5278,6 +5304,8 @@ def sequence_reshape(input, new_dim):
x = fluid.layers.data(shape=[5, 20], dtype='float32', lod_level=1)
x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10)
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_reshape', **locals())
out = helper.create_variable_for_type_inference(helper.input_dtype())
helper.append_op(
......@@ -5812,6 +5840,8 @@ def im2sequence(input,
input=layer, stride=[1, 1], filter_size=[2, 2])
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
if isinstance(filter_size, int):
filter_size = [filter_size, filter_size]
......@@ -6228,7 +6258,7 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
},
outputs={'Diff': diff,
'Out': loss},
attrs={'sigma': sigma})
attrs={'sigma': sigma if sigma is not None else 1.0})
return loss
......@@ -7589,6 +7619,8 @@ def sequence_scatter(input, index, updates, name=None):
output = fluid.layers.sequence_scatter(input, index, updates)
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_scatter', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
......@@ -8677,6 +8709,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
x = fluid.layers.data(shape[30, 1], dtype='int32', lod_level=1)
out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0)
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_enumerate', **locals())
out = helper.create_variable_for_type_inference(
helper.input_dtype(), stop_gradient=True)
......@@ -8716,6 +8750,8 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
Variable: The output sequence mask.
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_mask', **locals())
if name is None:
......@@ -9766,6 +9802,8 @@ def sequence_reverse(x, name=None):
Returns:
out(${y_type}): ${y_comment}
"""
assert not _in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper("sequence_reverse", **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册