Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
e377d759
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e377d759
编写于
4月 02, 2019
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add UT for most layers without params
test=develop
上级
2839e227
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
790 addition
and
773 deletion
+790
-773
paddle/fluid/operators/softmax_with_cross_entropy_op.cu
paddle/fluid/operators/softmax_with_cross_entropy_op.cu
+1
-1
python/paddle/fluid/dygraph/nn.py
python/paddle/fluid/dygraph/nn.py
+11
-11
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+39
-1
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+739
-760
未找到文件。
paddle/fluid/operators/softmax_with_cross_entropy_op.cu
浏览文件 @
e377d759
...
@@ -404,7 +404,7 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> {
...
@@ -404,7 +404,7 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> {
int
batch_size
=
logits
->
dims
()[
0
];
int
batch_size
=
logits
->
dims
()[
0
];
int
feature_size
=
logits
->
dims
()[
1
];
int
feature_size
=
logits
->
dims
()[
1
];
auto
*
logits_data
=
logits
->
data
<
T
>
();
auto
*
logits_data
=
logits
->
data
<
T
>
();
auto
*
labels_data
=
labels
->
data
<
T
>
();
auto
*
labels_data
=
labels
->
data
<
int64_t
>
();
SoftmaxWithCrossEntropyFusedKernel
(
SoftmaxWithCrossEntropyFusedKernel
(
logits_data
,
labels_data
,
softmax_data
,
loss_data
,
batch_size
,
logits_data
,
labels_data
,
softmax_data
,
loss_data
,
batch_size
,
feature_size
,
context
.
cuda_device_context
().
stream
());
feature_size
,
context
.
cuda_device_context
().
stream
());
...
...
python/paddle/fluid/dygraph/nn.py
浏览文件 @
e377d759
...
@@ -47,7 +47,7 @@ class Conv2D(layers.Layer):
...
@@ -47,7 +47,7 @@ class Conv2D(layers.Layer):
bias_attr
=
None
,
bias_attr
=
None
,
dtype
=
core
.
VarDesc
.
VarType
.
FP32
):
dtype
=
core
.
VarDesc
.
VarType
.
FP32
):
assert
param_attr
is
not
False
,
"param_attr should not be False here."
assert
param_attr
is
not
False
,
"param_attr should not be False here."
super
(
Conv2D
,
self
).
__init__
(
name_scope
)
super
(
Conv2D
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_groups
=
groups
self
.
_groups
=
groups
self
.
_stride
=
utils
.
convert_to_list
(
stride
,
2
,
'stride'
)
self
.
_stride
=
utils
.
convert_to_list
(
stride
,
2
,
'stride'
)
self
.
_padding
=
utils
.
convert_to_list
(
padding
,
2
,
'padding'
)
self
.
_padding
=
utils
.
convert_to_list
(
padding
,
2
,
'padding'
)
...
@@ -205,7 +205,7 @@ class FC(layers.Layer):
...
@@ -205,7 +205,7 @@ class FC(layers.Layer):
num_flatten_dims
=
1
,
num_flatten_dims
=
1
,
dtype
=
core
.
VarDesc
.
VarType
.
FP32
,
dtype
=
core
.
VarDesc
.
VarType
.
FP32
,
act
=
None
):
act
=
None
):
super
(
FC
,
self
).
__init__
(
name_scope
)
super
(
FC
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_size
=
size
self
.
_size
=
size
self
.
_num_flatten_dims
=
num_flatten_dims
self
.
_num_flatten_dims
=
num_flatten_dims
...
@@ -310,7 +310,7 @@ class BatchNorm(layers.Layer):
...
@@ -310,7 +310,7 @@ class BatchNorm(layers.Layer):
do_model_average_for_mean_and_var
=
False
,
do_model_average_for_mean_and_var
=
False
,
fuse_with_relu
=
False
,
fuse_with_relu
=
False
,
use_global_stats
=
False
):
use_global_stats
=
False
):
super
(
BatchNorm
,
self
).
__init__
(
name_scope
)
super
(
BatchNorm
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_param_attr
=
param_attr
self
.
_param_attr
=
param_attr
self
.
_param_attr
=
bias_attr
self
.
_param_attr
=
bias_attr
self
.
_act
=
act
self
.
_act
=
act
...
@@ -462,7 +462,7 @@ class Embedding(layers.Layer):
...
@@ -462,7 +462,7 @@ class Embedding(layers.Layer):
param_attr
=
None
,
param_attr
=
None
,
dtype
=
'float32'
):
dtype
=
'float32'
):
super
(
Embedding
,
self
).
__init__
(
name_scope
)
super
(
Embedding
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_size
=
size
self
.
_size
=
size
self
.
_is_sparse
=
is_sparse
self
.
_is_sparse
=
is_sparse
self
.
_is_distributed
=
is_distributed
self
.
_is_distributed
=
is_distributed
...
@@ -563,7 +563,7 @@ class LayerNorm(layers.Layer):
...
@@ -563,7 +563,7 @@ class LayerNorm(layers.Layer):
>>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
>>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
"""
"""
super
(
LayerNorm
,
self
).
__init__
(
name_scope
)
super
(
LayerNorm
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_scale
=
scale
self
.
_scale
=
scale
self
.
_shift
=
shift
self
.
_shift
=
shift
self
.
_begin_norm_axis
=
begin_norm_axis
self
.
_begin_norm_axis
=
begin_norm_axis
...
@@ -710,7 +710,7 @@ class GRUUnit(layers.Layer):
...
@@ -710,7 +710,7 @@ class GRUUnit(layers.Layer):
gate_activation
=
'sigmoid'
,
gate_activation
=
'sigmoid'
,
origin_mode
=
False
,
origin_mode
=
False
,
dtype
=
'float32'
):
dtype
=
'float32'
):
super
(
GRUUnit
,
self
).
__init__
(
name_scope
)
super
(
GRUUnit
,
self
).
__init__
(
name_scope
,
dtype
)
activation_dict
=
dict
(
activation_dict
=
dict
(
identity
=
0
,
identity
=
0
,
...
@@ -840,7 +840,7 @@ class NCE(layers.Layer):
...
@@ -840,7 +840,7 @@ class NCE(layers.Layer):
custom_dist
=
None
,
custom_dist
=
None
,
seed
=
0
,
seed
=
0
,
is_sparse
=
False
):
is_sparse
=
False
):
super
(
NCE
,
self
).
__init__
(
name_scope
)
super
(
NCE
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_param_attr
=
param_attr
self
.
_param_attr
=
param_attr
self
.
_bias_attr
=
bias_attr
self
.
_bias_attr
=
bias_attr
self
.
_num_total_classes
=
num_total_classes
self
.
_num_total_classes
=
num_total_classes
...
@@ -1013,7 +1013,7 @@ class PRelu(layers.Layer):
...
@@ -1013,7 +1013,7 @@ class PRelu(layers.Layer):
def
__init__
(
self
,
name_scope
,
mode
,
param_attr
=
None
):
def
__init__
(
self
,
name_scope
,
mode
,
param_attr
=
None
):
super
(
PRelu
,
self
).
__init__
(
name_scope
)
super
(
PRelu
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_mode
=
mode
self
.
_mode
=
mode
self
.
_param_attr
=
param_attr
self
.
_param_attr
=
param_attr
if
self
.
_mode
not
in
[
'all'
,
'channel'
,
'element'
]:
if
self
.
_mode
not
in
[
'all'
,
'channel'
,
'element'
]:
...
@@ -1090,7 +1090,7 @@ class BilinearTensorProduct(layers.Layer):
...
@@ -1090,7 +1090,7 @@ class BilinearTensorProduct(layers.Layer):
act
=
None
,
act
=
None
,
param_attr
=
None
,
param_attr
=
None
,
bias_attr
=
None
):
bias_attr
=
None
):
super
(
BilinearTensorProduct
,
self
).
__init__
(
name_scope
)
super
(
BilinearTensorProduct
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_param_attr
=
param_attr
self
.
_param_attr
=
param_attr
self
.
_bias_attr
=
bias_attr
self
.
_bias_attr
=
bias_attr
self
.
_act
=
act
self
.
_act
=
act
...
@@ -1260,7 +1260,7 @@ class Conv2DTranspose(layers.Layer):
...
@@ -1260,7 +1260,7 @@ class Conv2DTranspose(layers.Layer):
bias_attr
=
None
,
bias_attr
=
None
,
use_cudnn
=
True
,
use_cudnn
=
True
,
act
=
None
):
act
=
None
):
super
(
Conv2DTranspose
,
self
).
__init__
(
name_scope
)
super
(
Conv2DTranspose
,
self
).
__init__
(
name_scope
,
dtype
)
assert
param_attr
is
not
False
,
"param_attr should not be False in conv2d_transpose."
assert
param_attr
is
not
False
,
"param_attr should not be False in conv2d_transpose."
self
.
_param_attr
=
param_attr
self
.
_param_attr
=
param_attr
self
.
_bias_attr
=
bias_attr
self
.
_bias_attr
=
bias_attr
...
@@ -1388,7 +1388,7 @@ class SequenceConv(layers.Layer):
...
@@ -1388,7 +1388,7 @@ class SequenceConv(layers.Layer):
bias_attr
=
None
,
bias_attr
=
None
,
param_attr
=
None
,
param_attr
=
None
,
act
=
None
):
act
=
None
):
super
(
SequenceConv
,
self
).
__init__
(
name_scope
)
super
(
SequenceConv
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_num_filters
=
num_filters
self
.
_num_filters
=
num_filters
self
.
_filter_size
=
filter_size
self
.
_filter_size
=
filter_size
self
.
_filter_stride
=
filter_stride
self
.
_filter_stride
=
filter_stride
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
e377d759
...
@@ -480,6 +480,8 @@ def dynamic_lstm(input,
...
@@ -480,6 +480,8 @@ def dynamic_lstm(input,
forward, _ = fluid.layers.dynamic_lstm(
forward, _ = fluid.layers.dynamic_lstm(
input=forward_proj, size=hidden_dim * 4, use_peepholes=False)
input=forward_proj, size=hidden_dim * 4, use_peepholes=False)
"""
"""
assert
_in_dygraph_mode
(
)
is
not
True
,
"please use lstm instead of dynamic_lstm in dygraph mode!"
assert
bias_attr
is
not
False
,
"bias_attr should not be False in dynamic_lstmp."
assert
bias_attr
is
not
False
,
"bias_attr should not be False in dynamic_lstmp."
helper
=
LayerHelper
(
'lstm'
,
**
locals
())
helper
=
LayerHelper
(
'lstm'
,
**
locals
())
size
=
size
//
4
size
=
size
//
4
...
@@ -864,6 +866,9 @@ def dynamic_lstmp(input,
...
@@ -864,6 +866,9 @@ def dynamic_lstmp(input,
proj_activation="tanh")
proj_activation="tanh")
"""
"""
assert
_in_dygraph_mode
(
)
is
not
True
,
"please use lstm instead of dynamic_lstmp in dygraph mode!"
assert
bias_attr
is
not
False
,
"bias_attr should not be False in dynamic_lstmp."
assert
bias_attr
is
not
False
,
"bias_attr should not be False in dynamic_lstmp."
helper
=
LayerHelper
(
'lstmp'
,
**
locals
())
helper
=
LayerHelper
(
'lstmp'
,
**
locals
())
size
=
size
//
4
size
=
size
//
4
...
@@ -1035,6 +1040,9 @@ def dynamic_gru(input,
...
@@ -1035,6 +1040,9 @@ def dynamic_gru(input,
hidden = fluid.layers.dynamic_gru(input=x, size=hidden_dim)
hidden = fluid.layers.dynamic_gru(input=x, size=hidden_dim)
"""
"""
assert
_in_dygraph_mode
(
)
is
not
True
,
"please use gru instead of dynamic_gru in dygraph mode!"
helper
=
LayerHelper
(
'gru'
,
**
locals
())
helper
=
LayerHelper
(
'gru'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
...
@@ -1751,6 +1759,8 @@ def sequence_conv(input,
...
@@ -1751,6 +1759,8 @@ def sequence_conv(input,
Variable: output of sequence_conv
Variable: output of sequence_conv
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_conv'
,
**
locals
())
helper
=
LayerHelper
(
'sequence_conv'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
filter_shape
=
[
filter_size
*
input
.
shape
[
1
],
num_filters
]
filter_shape
=
[
filter_size
*
input
.
shape
[
1
],
num_filters
]
...
@@ -1810,6 +1820,8 @@ def sequence_softmax(input, use_cudnn=False, name=None):
...
@@ -1810,6 +1820,8 @@ def sequence_softmax(input, use_cudnn=False, name=None):
dtype='float32', lod_level=1)
dtype='float32', lod_level=1)
x_sequence_softmax = fluid.layers.sequence_softmax(input=x)
x_sequence_softmax = fluid.layers.sequence_softmax(input=x)
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_softmax'
,
**
locals
())
helper
=
LayerHelper
(
'sequence_softmax'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
softmax_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
softmax_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
@@ -2302,6 +2314,8 @@ def sequence_pool(input, pool_type, is_test=False):
...
@@ -2302,6 +2314,8 @@ def sequence_pool(input, pool_type, is_test=False):
last_x = fluid.layers.sequence_pool(input=x, pool_type='last')
last_x = fluid.layers.sequence_pool(input=x, pool_type='last')
first_x = fluid.layers.sequence_pool(input=x, pool_type='first')
first_x = fluid.layers.sequence_pool(input=x, pool_type='first')
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_pool'
,
**
locals
())
helper
=
LayerHelper
(
'sequence_pool'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
pool_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
pool_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
@@ -2341,6 +2355,8 @@ def sequence_concat(input, name=None):
...
@@ -2341,6 +2355,8 @@ def sequence_concat(input, name=None):
out = fluid.layers.sequence_concat(input=[seq1, seq2, seq3])
out = fluid.layers.sequence_concat(input=[seq1, seq2, seq3])
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_concat'
,
**
locals
())
helper
=
LayerHelper
(
'sequence_concat'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
helper
.
append_op
(
...
@@ -2468,6 +2484,8 @@ def sequence_slice(input, offset, length, name=None):
...
@@ -2468,6 +2484,8 @@ def sequence_slice(input, offset, length, name=None):
subseqs = fluid.layers.sequence_slice(input=seqs, offset=offset,
subseqs = fluid.layers.sequence_slice(input=seqs, offset=offset,
length=length)
length=length)
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
"sequence_slice"
,
**
locals
())
helper
=
LayerHelper
(
"sequence_slice"
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
@@ -3927,6 +3945,8 @@ def sequence_expand(x, y, ref_level=-1, name=None):
...
@@ -3927,6 +3945,8 @@ def sequence_expand(x, y, ref_level=-1, name=None):
dtype='float32', lod_level=1)
dtype='float32', lod_level=1)
out = layers.sequence_expand(x=x, y=y, ref_level=0)
out = layers.sequence_expand(x=x, y=y, ref_level=0)
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_expand'
,
input
=
x
,
**
locals
())
helper
=
LayerHelper
(
'sequence_expand'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
tmp
=
helper
.
create_variable_for_type_inference
(
dtype
)
tmp
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
@@ -3993,6 +4013,8 @@ def sequence_expand_as(x, y, name=None):
...
@@ -3993,6 +4013,8 @@ def sequence_expand_as(x, y, name=None):
dtype='float32', lod_level=1)
dtype='float32', lod_level=1)
out = layers.sequence_expand_as(x=x, y=y)
out = layers.sequence_expand_as(x=x, y=y)
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_expand_as'
,
input
=
x
,
**
locals
())
helper
=
LayerHelper
(
'sequence_expand_as'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
tmp
=
helper
.
create_variable_for_type_inference
(
dtype
)
tmp
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
@@ -4039,6 +4061,8 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
...
@@ -4039,6 +4061,8 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
out = fluid.layers.sequence_pad(x=x, pad_value=pad_value)
out = fluid.layers.sequence_pad(x=x, pad_value=pad_value)
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_pad'
,
input
=
x
,
**
locals
())
helper
=
LayerHelper
(
'sequence_pad'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
@@ -4105,6 +4129,8 @@ def sequence_unpad(x, length, name=None):
...
@@ -4105,6 +4129,8 @@ def sequence_unpad(x, length, name=None):
out = fluid.layers.sequence_unpad(x=x, length=len)
out = fluid.layers.sequence_unpad(x=x, length=len)
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_unpad'
,
input
=
x
,
**
locals
())
helper
=
LayerHelper
(
'sequence_unpad'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
@@ -5278,6 +5304,8 @@ def sequence_reshape(input, new_dim):
...
@@ -5278,6 +5304,8 @@ def sequence_reshape(input, new_dim):
x = fluid.layers.data(shape=[5, 20], dtype='float32', lod_level=1)
x = fluid.layers.data(shape=[5, 20], dtype='float32', lod_level=1)
x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10)
x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10)
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_reshape'
,
**
locals
())
helper
=
LayerHelper
(
'sequence_reshape'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
helper
.
input_dtype
())
out
=
helper
.
create_variable_for_type_inference
(
helper
.
input_dtype
())
helper
.
append_op
(
helper
.
append_op
(
...
@@ -5812,6 +5840,8 @@ def im2sequence(input,
...
@@ -5812,6 +5840,8 @@ def im2sequence(input,
input=layer, stride=[1, 1], filter_size=[2, 2])
input=layer, stride=[1, 1], filter_size=[2, 2])
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
if
isinstance
(
filter_size
,
int
):
if
isinstance
(
filter_size
,
int
):
filter_size
=
[
filter_size
,
filter_size
]
filter_size
=
[
filter_size
,
filter_size
]
...
@@ -6228,7 +6258,7 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
...
@@ -6228,7 +6258,7 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
},
},
outputs
=
{
'Diff'
:
diff
,
outputs
=
{
'Diff'
:
diff
,
'Out'
:
loss
},
'Out'
:
loss
},
attrs
=
{
'sigma'
:
sigma
})
attrs
=
{
'sigma'
:
sigma
if
sigma
is
not
None
else
1.0
})
return
loss
return
loss
...
@@ -7589,6 +7619,8 @@ def sequence_scatter(input, index, updates, name=None):
...
@@ -7589,6 +7619,8 @@ def sequence_scatter(input, index, updates, name=None):
output = fluid.layers.sequence_scatter(input, index, updates)
output = fluid.layers.sequence_scatter(input, index, updates)
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_scatter'
,
**
locals
())
helper
=
LayerHelper
(
'sequence_scatter'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
@@ -8677,6 +8709,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
...
@@ -8677,6 +8709,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
x = fluid.layers.data(shape[30, 1], dtype='int32', lod_level=1)
x = fluid.layers.data(shape[30, 1], dtype='int32', lod_level=1)
out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0)
out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0)
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_enumerate'
,
**
locals
())
helper
=
LayerHelper
(
'sequence_enumerate'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
out
=
helper
.
create_variable_for_type_inference
(
helper
.
input_dtype
(),
stop_gradient
=
True
)
helper
.
input_dtype
(),
stop_gradient
=
True
)
...
@@ -8716,6 +8750,8 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
...
@@ -8716,6 +8750,8 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
Variable: The output sequence mask.
Variable: The output sequence mask.
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_mask'
,
**
locals
())
helper
=
LayerHelper
(
'sequence_mask'
,
**
locals
())
if
name
is
None
:
if
name
is
None
:
...
@@ -9766,6 +9802,8 @@ def sequence_reverse(x, name=None):
...
@@ -9766,6 +9802,8 @@ def sequence_reverse(x, name=None):
Returns:
Returns:
out(${y_type}): ${y_comment}
out(${y_type}): ${y_comment}
"""
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
"sequence_reverse"
,
**
locals
())
helper
=
LayerHelper
(
"sequence_reverse"
,
**
locals
())
if
name
is
None
:
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
e377d759
...
@@ -18,6 +18,8 @@ import unittest
...
@@ -18,6 +18,8 @@ import unittest
import
contextlib
import
contextlib
import
numpy
as
np
import
numpy
as
np
import
decorators
import
decorators
import
inspect
from
six.moves
import
filter
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
...
@@ -58,8 +60,12 @@ class LayerTest(unittest.TestCase):
...
@@ -58,8 +60,12 @@ class LayerTest(unittest.TestCase):
fluid
.
default_main_program
().
random_seed
=
self
.
seed
fluid
.
default_main_program
().
random_seed
=
self
.
seed
yield
yield
def
get_static_graph_result
(
self
,
feed
,
fetch_list
,
with_lod
=
False
):
def
get_static_graph_result
(
self
,
exe
=
fluid
.
Executor
(
self
.
_get_place
())
feed
,
fetch_list
,
with_lod
=
False
,
force_to_use_cpu
=
False
):
exe
=
fluid
.
Executor
(
self
.
_get_place
(
force_to_use_cpu
))
exe
.
run
(
fluid
.
default_startup_program
())
exe
.
run
(
fluid
.
default_startup_program
())
return
exe
.
run
(
fluid
.
default_main_program
(),
return
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
feed
,
feed
=
feed
,
...
@@ -77,7 +83,6 @@ class LayerTest(unittest.TestCase):
...
@@ -77,7 +83,6 @@ class LayerTest(unittest.TestCase):
class
TestLayer
(
LayerTest
):
class
TestLayer
(
LayerTest
):
def
test_fc
(
self
):
def
test_fc
(
self
):
# pdb.set_trace()
inp
=
np
.
ones
([
3
,
32
,
32
],
dtype
=
'float32'
)
inp
=
np
.
ones
([
3
,
32
,
32
],
dtype
=
'float32'
)
with
self
.
static_graph
():
with
self
.
static_graph
():
t
=
layers
.
data
(
t
=
layers
.
data
(
...
@@ -596,25 +601,102 @@ class TestLayer(LayerTest):
...
@@ -596,25 +601,102 @@ class TestLayer(LayerTest):
self
.
assertTrue
(
np
.
allclose
(
nce_loss3
.
_numpy
(),
static_rlt
))
self
.
assertTrue
(
np
.
allclose
(
nce_loss3
.
_numpy
(),
static_rlt
))
class
TestBook
(
unittest
.
TestCase
):
class
TestBook
(
LayerTest
):
def
test_fit_a_line
(
self
):
def
test_all_layers
(
self
):
program
=
Program
()
attrs
=
(
getattr
(
self
,
name
)
for
name
in
dir
(
self
))
with
program_guard
(
program
,
startup_program
=
Program
()):
methods
=
filter
(
inspect
.
ismethod
,
attrs
)
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
13
],
dtype
=
'float32'
)
for
method
in
methods
:
if
not
method
.
__name__
.
startswith
(
'make_'
):
continue
print
(
method
)
import
sys
sys
.
stdout
.
flush
()
self
.
_feed_dict
=
{}
self
.
_force_to_use_cpu
=
False
with
self
.
static_graph
():
static_var
=
method
()
if
isinstance
(
static_var
,
tuple
):
static_var
=
static_var
[
0
]
if
static_var
is
not
None
:
fetch_list
=
[
static_var
.
name
]
static_result
=
self
.
get_static_graph_result
(
feed
=
self
.
_feed_dict
,
fetch_list
=
fetch_list
,
force_to_use_cpu
=
self
.
_force_to_use_cpu
)
else
:
assert
method
.
__name__
in
(
'make_get_places'
)
continue
with
self
.
dynamic_graph
(
self
.
_force_to_use_cpu
):
dy_result
=
method
()
if
isinstance
(
dy_result
,
tuple
):
dy_result
=
dy_result
[
0
]
self
.
assertTrue
(
np
.
array_equal
(
static_result
[
0
],
dy_result
.
_numpy
()))
def
_get_np_data
(
self
,
shape
,
dtype
,
append_batch_size
=
True
):
np
.
random
.
seed
(
self
.
seed
)
if
append_batch_size
:
shape
=
[
2
]
+
shape
if
dtype
==
'float32'
:
return
np
.
random
.
random
(
shape
).
astype
(
dtype
)
elif
dtype
==
'float64'
:
return
np
.
random
.
random
(
shape
).
astype
(
dtype
)
elif
dtype
==
'int32'
:
return
np
.
random
.
randint
(
0
,
2
,
shape
).
astype
(
dtype
)
elif
dtype
==
'int64'
:
return
np
.
random
.
randint
(
0
,
2
,
shape
).
astype
(
dtype
)
def
_get_data
(
self
,
name
,
shape
,
dtype
,
set_feed_dict
=
True
,
append_batch_size
=
True
):
if
base
.
enabled
():
return
base
.
to_variable
(
value
=
self
.
_get_np_data
(
shape
,
dtype
,
append_batch_size
),
name
=
name
)
else
:
if
set_feed_dict
:
self
.
_feed_dict
[
name
]
=
self
.
_get_np_data
(
shape
,
dtype
,
append_batch_size
)
return
layers
.
data
(
name
=
name
,
shape
=
shape
,
dtype
=
dtype
,
append_batch_size
=
append_batch_size
)
def
make_sampled_softmax_with_cross_entropy
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
logits
=
self
.
_get_data
(
name
=
'Logits'
,
shape
=
[
256
],
dtype
=
'float64'
)
print
(
logits
.
dtype
)
label
=
self
.
_get_data
(
name
=
'Label'
,
shape
=
[
1
],
dtype
=
'int64'
)
num_samples
=
25
output
=
layers
.
sampled_softmax_with_cross_entropy
(
logits
,
label
,
num_samples
)
return
(
output
)
def
make_fit_a_line
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
startup_program
=
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
13
],
dtype
=
'float32'
)
y_predict
=
layers
.
fc
(
input
=
x
,
size
=
1
,
act
=
None
)
y_predict
=
layers
.
fc
(
input
=
x
,
size
=
1
,
act
=
None
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
'float32'
)
y
=
self
.
_get_
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
'float32'
)
cost
=
layers
.
square_error_cost
(
input
=
y_predict
,
label
=
y
)
cost
=
layers
.
square_error_cost
(
input
=
y_predict
,
label
=
y
)
avg_cost
=
layers
.
mean
(
cost
)
avg_cost
=
layers
.
mean
(
cost
)
self
.
assertIsNotNone
(
avg_cost
)
return
(
avg_cost
)
print
(
str
(
program
))
def
test
_recognize_digits_mlp
(
self
):
def
make
_recognize_digits_mlp
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
,
startup_program
=
P
rogram
()):
fluid
.
default_startup_p
rogram
()):
# Change g_program, so the rest layers use `g_program`
# Change g_program, so the rest layers use `g_program`
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
784
],
dtype
=
'float32'
)
images
=
self
.
_get_
data
(
name
=
'pixel'
,
shape
=
[
784
],
dtype
=
'float32'
)
label
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int32
'
)
label
=
self
.
_get_data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64
'
)
hidden1
=
layers
.
fc
(
input
=
images
,
size
=
128
,
act
=
'relu'
)
hidden1
=
layers
.
fc
(
input
=
images
,
size
=
128
,
act
=
'relu'
)
hidden2
=
layers
.
fc
(
input
=
hidden1
,
size
=
64
,
act
=
'relu'
)
hidden2
=
layers
.
fc
(
input
=
hidden1
,
size
=
64
,
act
=
'relu'
)
predict
=
layers
.
fc
(
input
=
[
hidden2
,
hidden1
],
predict
=
layers
.
fc
(
input
=
[
hidden2
,
hidden1
],
...
@@ -623,32 +705,21 @@ class TestBook(unittest.TestCase):
...
@@ -623,32 +705,21 @@ class TestBook(unittest.TestCase):
param_attr
=
[
"sftmax.w1"
,
"sftmax.w2"
])
param_attr
=
[
"sftmax.w1"
,
"sftmax.w2"
])
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
)
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
)
avg_cost
=
layers
.
mean
(
cost
)
avg_cost
=
layers
.
mean
(
cost
)
self
.
assertIsNotNone
(
avg_cost
)
return
(
avg_cost
)
print
(
str
(
program
))
def
test_simple_conv2d
(
self
):
def
make_conv2d_transpose
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
,
startup_program
=
Program
()):
fluid
.
default_startup_program
()):
images
=
layers
.
data
(
img
=
self
.
_get_data
(
name
=
'pixel'
,
shape
=
[
3
,
2
,
2
],
dtype
=
'float32'
)
name
=
'pixel'
,
shape
=
[
3
,
48
,
48
],
dtype
=
'float32'
)
return
layers
.
conv2d_transpose
(
layers
.
conv2d
(
input
=
images
,
num_filters
=
3
,
filter_size
=
[
4
,
4
])
input
=
img
,
num_filters
=
10
,
output_size
=
28
)
print
(
str
(
program
))
def
test_conv2d_transpose
(
self
):
program
=
Program
()
with
program_guard
(
program
):
img
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
2
,
2
],
dtype
=
'float32'
)
layers
.
conv2d_transpose
(
input
=
img
,
num_filters
=
10
,
output_size
=
28
)
print
(
str
(
program
))
def
test
_recognize_digits_conv
(
self
):
def
make
_recognize_digits_conv
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
,
startup_program
=
P
rogram
()):
fluid
.
default_startup_p
rogram
()):
images
=
layers
.
data
(
images
=
self
.
_get_
data
(
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
label
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int32
'
)
label
=
self
.
_get_data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64
'
)
conv_pool_1
=
nets
.
simple_img_conv_pool
(
conv_pool_1
=
nets
.
simple_img_conv_pool
(
input
=
images
,
input
=
images
,
filter_size
=
5
,
filter_size
=
5
,
...
@@ -667,19 +738,19 @@ class TestBook(unittest.TestCase):
...
@@ -667,19 +738,19 @@ class TestBook(unittest.TestCase):
predict
=
layers
.
fc
(
input
=
conv_pool_2
,
size
=
10
,
act
=
"softmax"
)
predict
=
layers
.
fc
(
input
=
conv_pool_2
,
size
=
10
,
act
=
"softmax"
)
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
)
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
)
avg_cost
=
layers
.
mean
(
cost
)
avg_cost
=
layers
.
mean
(
cost
)
return
avg_cost
print
(
str
(
program
))
def
make_word_embedding
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
def
test_word_embedding
(
self
):
fluid
.
default_startup_program
()):
program
=
Program
()
with
program_guard
(
program
,
startup_program
=
Program
()):
dict_size
=
10000
dict_size
=
10000
embed_size
=
32
embed_size
=
32
first_word
=
layers
.
data
(
name
=
'firstw'
,
shape
=
[
1
],
dtype
=
'int64'
)
first_word
=
self
.
_get_data
(
name
=
'firstw'
,
shape
=
[
1
],
dtype
=
'int64'
)
second_word
=
layers
.
data
(
name
=
'secondw'
,
shape
=
[
1
],
dtype
=
'int64'
)
second_word
=
self
.
_get_data
(
third_word
=
layers
.
data
(
name
=
'thirdw'
,
shape
=
[
1
],
dtype
=
'int64'
)
name
=
'secondw'
,
shape
=
[
1
],
dtype
=
'int64'
)
forth_word
=
layers
.
data
(
name
=
'forthw'
,
shape
=
[
1
],
dtype
=
'int64'
)
third_word
=
self
.
_get_data
(
name
=
'thirdw'
,
shape
=
[
1
],
dtype
=
'int64'
)
next_word
=
layers
.
data
(
name
=
'nextw'
,
shape
=
[
1
],
dtype
=
'int64'
)
forth_word
=
self
.
_get_data
(
name
=
'forthw'
,
shape
=
[
1
],
dtype
=
'int64'
)
next_word
=
self
.
_get_data
(
name
=
'nextw'
,
shape
=
[
1
],
dtype
=
'int64'
)
embed_first
=
layers
.
embedding
(
embed_first
=
layers
.
embedding
(
input
=
first_word
,
input
=
first_word
,
...
@@ -713,257 +784,127 @@ class TestBook(unittest.TestCase):
...
@@ -713,257 +784,127 @@ class TestBook(unittest.TestCase):
act
=
'softmax'
)
act
=
'softmax'
)
cost
=
layers
.
cross_entropy
(
input
=
predict_word
,
label
=
next_word
)
cost
=
layers
.
cross_entropy
(
input
=
predict_word
,
label
=
next_word
)
avg_cost
=
layers
.
mean
(
cost
)
avg_cost
=
layers
.
mean
(
cost
)
self
.
assertIsNotNone
(
avg_cost
)
return
(
avg_cost
)
print
(
str
(
program
))
def
make_sigmoid_cross_entropy
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
def
test_linear_chain_crf
(
self
):
fluid
.
default_startup_program
()):
program
=
Program
()
dat
=
self
.
_get_data
(
name
=
'data'
,
shape
=
[
10
],
dtype
=
'float32'
)
with
program_guard
(
program
,
startup_program
=
Program
()):
lbl
=
self
.
_get_data
(
name
=
'label'
,
shape
=
[
10
],
dtype
=
'float32'
)
label_dict_len
=
10
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
784
],
dtype
=
'float32'
)
label
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int32'
)
hidden
=
layers
.
fc
(
input
=
images
,
size
=
128
)
crf
=
layers
.
linear_chain_crf
(
input
=
hidden
,
label
=
label
,
param_attr
=
ParamAttr
(
name
=
"crfw"
))
crf_decode
=
layers
.
crf_decoding
(
input
=
hidden
,
param_attr
=
ParamAttr
(
name
=
"crfw"
))
layers
.
chunk_eval
(
input
=
crf_decode
,
label
=
label
,
chunk_scheme
=
"IOB"
,
num_chunk_types
=
(
label_dict_len
-
1
)
//
2
)
self
.
assertFalse
(
crf
is
None
)
self
.
assertFalse
(
crf_decode
is
None
)
print
(
str
(
program
))
def
test_sigmoid_cross_entropy
(
self
):
program
=
Program
()
with
program_guard
(
program
):
dat
=
layers
.
data
(
name
=
'data'
,
shape
=
[
10
],
dtype
=
'float32'
)
lbl
=
layers
.
data
(
name
=
'label'
,
shape
=
[
10
],
dtype
=
'float32'
)
ignore_index
=
-
1
ignore_index
=
-
1
self
.
assertIsNotNone
(
return
(
layers
.
sigmoid_cross_entropy_with_logits
(
layers
.
sigmoid_cross_entropy_with_logits
(
x
=
dat
,
label
=
lbl
,
ignore_index
=
ignore_index
))
x
=
dat
,
label
=
lbl
,
ignore_index
=
ignore_index
))
print
(
str
(
program
))
def
make_hsigmoid
(
self
):
self
.
_force_to_use_cpu
=
True
def
test_hsigmoid
(
self
):
with
fluid
.
framework
.
_dygraph_place_guard
(
place
=
fluid
.
CPUPlace
()):
program
=
Program
()
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
2
],
dtype
=
'float32'
)
with
program_guard
(
program
):
y
=
self
.
_get_data
(
name
=
'y'
,
shape
=
[
2
],
dtype
=
'int64'
)
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
2
],
dtype
=
'float32'
)
return
(
layers
.
hsigmoid
(
input
=
x
,
label
=
y
,
num_classes
=
2
))
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
2
],
dtype
=
'int64'
)
self
.
assertIsNotNone
(
layers
.
hsigmoid
(
input
=
x
,
label
=
y
,
num_classes
=
2
))
print
(
str
(
program
))
# test hsigmod with custom tree structure
# test hsigmod with custom tree structure
program2
=
Program
()
program2
=
Program
()
with
program_guard
(
program2
):
with
program_guard
(
program2
):
x2
=
layers
.
data
(
name
=
'x2'
,
shape
=
[
4
,
8
],
dtype
=
'float32'
)
x2
=
self
.
_get_
data
(
name
=
'x2'
,
shape
=
[
4
,
8
],
dtype
=
'float32'
)
y2
=
layers
.
data
(
name
=
'y2'
,
shape
=
[
4
],
dtype
=
'int64'
)
y2
=
self
.
_get_
data
(
name
=
'y2'
,
shape
=
[
4
],
dtype
=
'int64'
)
path_table
=
layers
.
data
(
path_table
=
self
.
_get_
data
(
name
=
'path_table'
,
shape
=
[
4
,
6
],
dtype
=
'int64'
)
name
=
'path_table'
,
shape
=
[
4
,
6
],
dtype
=
'int64'
)
path_code
=
layers
.
data
(
path_code
=
self
.
_get_
data
(
name
=
'path_code'
,
shape
=
[
4
,
6
],
dtype
=
'int64'
)
name
=
'path_code'
,
shape
=
[
4
,
6
],
dtype
=
'int64'
)
self
.
assertIsNotNone
(
return
(
layers
.
hsigmoid
(
layers
.
hsigmoid
(
input
=
x2
,
input
=
x2
,
label
=
y2
,
label
=
y2
,
num_classes
=
6
,
num_classes
=
6
,
path_table
=
path_table
,
path_table
=
path_table
,
path_code
=
path_code
,
path_code
=
path_code
,
is_custom
=
True
))
is_custom
=
True
))
print
(
str
(
program2
))
print
(
str
(
program2
))
def
test_sequence_expand
(
self
):
def
make_pool2d
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
],
dtype
=
'float32'
)
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
3
,
224
,
224
],
dtype
=
'float32'
)
y
=
layers
.
data
(
return
(
layers
.
pool2d
(
name
=
'y'
,
shape
=
[
10
,
20
],
dtype
=
'float32'
,
lod_level
=
2
)
x
,
pool_size
=
[
5
,
3
],
pool_stride
=
[
1
,
2
],
pool_padding
=
(
2
,
1
)))
self
.
assertIsNotNone
(
layers
.
sequence_expand
(
x
=
x
,
y
=
y
,
ref_level
=
1
))
print
(
str
(
program
))
def
make_adaptive_pool2d
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
def
test_sequence_unpad
(
self
):
fluid
.
default_startup_program
()):
program
=
Program
()
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
3
,
224
,
224
],
dtype
=
'float32'
)
with
program_guard
(
program
):
return
(
layers
.
adaptive_pool2d
(
x
,
[
3
,
3
],
pool_type
=
'avg'
))
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
,
5
],
dtype
=
'float32'
)
length
=
layers
.
data
(
name
=
'length'
,
shape
=
[
1
],
dtype
=
'int64'
)
self
.
assertIsNotNone
(
layers
.
sequence_unpad
(
x
=
x
,
length
=
length
))
print
(
str
(
program
))
def
test_pool2d
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
224
,
224
],
dtype
=
'float32'
)
self
.
assertIsNotNone
(
layers
.
pool2d
(
x
,
pool_size
=
[
5
,
3
],
pool_stride
=
[
1
,
2
],
pool_padding
=
(
2
,
1
)))
def
test_adaptive_pool2d
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
224
,
224
],
dtype
=
'float32'
)
self
.
assertIsNotNone
(
layers
.
adaptive_pool2d
(
x
,
[
3
,
3
],
pool_type
=
'avg'
))
pool
,
mask
=
layers
.
adaptive_pool2d
(
x
,
[
3
,
3
],
require_index
=
True
)
pool
,
mask
=
layers
.
adaptive_pool2d
(
x
,
[
3
,
3
],
require_index
=
True
)
self
.
assertIsNotNone
(
pool
)
return
(
pool
)
self
.
assertIsNotNone
(
mask
)
return
(
mask
)
self
.
assertIsNotNone
(
layers
.
adaptive_pool2d
(
x
,
3
,
pool_type
=
'avg'
))
return
(
layers
.
adaptive_pool2d
(
x
,
3
,
pool_type
=
'avg'
))
pool
,
mask
=
layers
.
adaptive_pool2d
(
x
,
3
,
require_index
=
True
)
pool
,
mask
=
layers
.
adaptive_pool2d
(
x
,
3
,
require_index
=
True
)
self
.
assertIsNotNone
(
pool
)
return
(
pool
)
self
.
assertIsNotNone
(
mask
)
return
(
mask
)
def
test_adaptive_pool3d
(
self
):
def
make_adaptive_pool3d
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
244
,
224
,
224
],
dtype
=
'float32'
)
x
=
self
.
_get_data
(
self
.
assertIsNotNone
(
name
=
'x'
,
shape
=
[
3
,
244
,
224
,
224
],
dtype
=
'float32'
)
layers
.
adaptive_pool3d
(
return
(
layers
.
adaptive_pool3d
(
x
,
[
3
,
3
,
3
],
pool_type
=
'avg'
))
x
,
[
3
,
3
,
3
],
pool_type
=
'avg'
))
pool
,
mask
=
layers
.
adaptive_pool3d
(
pool
,
mask
=
layers
.
adaptive_pool3d
(
x
,
[
3
,
3
,
3
],
require_index
=
True
)
x
,
[
3
,
3
,
3
],
require_index
=
True
)
self
.
assertIsNotNone
(
pool
)
return
(
pool
)
self
.
assertIsNotNone
(
mask
)
return
(
mask
)
self
.
assertIsNotNone
(
layers
.
adaptive_pool3d
(
x
,
3
,
pool_type
=
'avg'
))
return
(
layers
.
adaptive_pool3d
(
x
,
3
,
pool_type
=
'avg'
))
pool
,
mask
=
layers
.
adaptive_pool3d
(
x
,
3
,
require_index
=
True
)
pool
,
mask
=
layers
.
adaptive_pool3d
(
x
,
3
,
require_index
=
True
)
self
.
assertIsNotNone
(
pool
)
return
(
pool
)
self
.
assertIsNotNone
(
mask
)
return
(
mask
)
def
test
_lstm_unit
(
self
):
def
make
_lstm_unit
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
x_t_data
=
layers
.
data
(
x_t_data
=
self
.
_get_
data
(
name
=
'x_t_data'
,
shape
=
[
10
,
10
],
dtype
=
'float32'
)
name
=
'x_t_data'
,
shape
=
[
10
,
10
],
dtype
=
'float32'
)
x_t
=
layers
.
fc
(
input
=
x_t_data
,
size
=
10
)
x_t
=
layers
.
fc
(
input
=
x_t_data
,
size
=
10
)
prev_hidden_data
=
layers
.
data
(
prev_hidden_data
=
self
.
_get_
data
(
name
=
'prev_hidden_data'
,
shape
=
[
10
,
30
],
dtype
=
'float32'
)
name
=
'prev_hidden_data'
,
shape
=
[
10
,
30
],
dtype
=
'float32'
)
prev_hidden
=
layers
.
fc
(
input
=
prev_hidden_data
,
size
=
30
)
prev_hidden
=
layers
.
fc
(
input
=
prev_hidden_data
,
size
=
30
)
prev_cell_data
=
layers
.
data
(
prev_cell_data
=
self
.
_get_
data
(
name
=
'prev_cell'
,
shape
=
[
10
,
30
],
dtype
=
'float32'
)
name
=
'prev_cell'
,
shape
=
[
10
,
30
],
dtype
=
'float32'
)
prev_cell
=
layers
.
fc
(
input
=
prev_cell_data
,
size
=
30
)
prev_cell
=
layers
.
fc
(
input
=
prev_cell_data
,
size
=
30
)
self
.
assertIsNotNone
(
return
(
layers
.
lstm_unit
(
layers
.
lstm_unit
(
x_t
=
x_t
,
hidden_t_prev
=
prev_hidden
,
cell_t_prev
=
prev_cell
))
x_t
=
x_t
,
hidden_t_prev
=
prev_hidden
,
cell_t_prev
=
prev_cell
))
print
(
str
(
program
))
def
test_dynamic_lstmp
(
self
):
def
make_softmax
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()):
hidden_dim
,
proj_dim
=
16
,
8
data
=
self
.
_get_data
(
name
=
'data'
,
shape
=
[
10
],
dtype
=
'float32'
)
seq_data
=
layers
.
data
(
name
=
'seq_data'
,
shape
=
[
10
,
10
],
dtype
=
'float32'
,
lod_level
=
1
)
fc_out
=
layers
.
fc
(
input
=
seq_data
,
size
=
4
*
hidden_dim
)
self
.
assertIsNotNone
(
layers
.
dynamic_lstmp
(
input
=
fc_out
,
size
=
4
*
hidden_dim
,
proj_size
=
proj_dim
))
print
(
str
(
program
))
def
test_sequence_softmax
(
self
):
program
=
Program
()
with
program_guard
(
program
):
seq_data
=
layers
.
data
(
name
=
'seq_data'
,
shape
=
[
10
,
10
],
dtype
=
'float32'
,
lod_level
=
1
)
seq
=
layers
.
fc
(
input
=
seq_data
,
size
=
20
)
self
.
assertIsNotNone
(
layers
.
sequence_softmax
(
seq
))
print
(
str
(
program
))
def
test_softmax
(
self
):
program
=
Program
()
with
program_guard
(
program
):
data
=
layers
.
data
(
name
=
'data'
,
shape
=
[
10
],
dtype
=
'float32'
)
hid
=
layers
.
fc
(
input
=
data
,
size
=
20
)
hid
=
layers
.
fc
(
input
=
data
,
size
=
20
)
self
.
assertIsNotNone
(
layers
.
softmax
(
hid
,
axis
=
1
))
return
(
layers
.
softmax
(
hid
,
axis
=
1
))
print
(
str
(
program
))
def
test
_space_to_depth
(
self
):
def
make
_space_to_depth
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
data
=
layers
.
data
(
data
=
self
.
_get_
data
(
name
=
'data'
,
name
=
'data'
,
shape
=
[
32
,
9
,
6
,
6
],
shape
=
[
32
,
9
,
6
,
6
],
append_batch_size
=
False
,
append_batch_size
=
False
,
dtype
=
'float32'
)
dtype
=
'float32'
)
self
.
assertIsNotNone
(
layers
.
space_to_depth
(
data
,
3
))
return
(
layers
.
space_to_depth
(
data
,
3
))
print
(
str
(
program
))
def
test_sequence_unsqueeze
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
,
2
],
dtype
=
'float32'
)
out
=
layers
.
unsqueeze
(
input
=
x
,
axes
=
[
1
])
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_squeeze
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
1
,
1
,
4
],
dtype
=
'float32'
)
out
=
layers
.
squeeze
(
input
=
x
,
axes
=
[
2
])
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_lrn
(
self
):
program
=
Program
()
with
program_guard
(
program
):
data
=
layers
.
data
(
name
=
'data'
,
shape
=
[
6
,
2
,
2
],
dtype
=
'float32'
)
self
.
assertIsNotNone
(
layers
.
lrn
(
data
))
print
(
str
(
program
))
def
test_get_places
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
get_places
(
device_count
=
4
)
self
.
assertIsNotNone
(
x
)
print
(
str
(
program
))
def
test_sequence_reshape
(
self
):
def
make_lrn
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
],
dtype
=
'float32'
,
lod_level
=
1
)
data
=
self
.
_get_data
(
name
=
'data'
,
shape
=
[
6
,
2
,
2
],
dtype
=
'float32'
)
out
=
layers
.
sequence_reshape
(
input
=
x
,
new_dim
=
16
)
return
(
layers
.
lrn
(
data
))
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_im2sequence
(
self
):
def
make_get_places
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
128
,
128
],
dtype
=
'float32'
)
get_places
(
device_count
=
1
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[],
dtype
=
'float32'
)
output
=
layers
.
im2sequence
(
input
=
x
,
input_image_size
=
y
,
stride
=
[
1
,
1
],
filter_size
=
[
2
,
2
],
out_stride
=
[
1
,
1
])
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
def
test_sampled_softmax_with_cross_entropy
(
self
):
program
=
Program
()
with
program_guard
(
program
):
logits
=
layers
.
data
(
name
=
'Logits'
,
shape
=
[
256
],
dtype
=
'float64'
)
label
=
layers
.
data
(
name
=
'Label'
,
shape
=
[
1
],
dtype
=
'int64'
)
num_samples
=
25
output
=
layers
.
sampled_softmax_with_cross_entropy
(
logits
,
label
,
num_samples
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
@
decorators
.
prog_scope
()
@
decorators
.
prog_scope
()
def
test
_nce
(
self
):
def
make
_nce
(
self
):
window_size
=
5
window_size
=
5
words
=
[]
words
=
[]
for
i
in
range
(
window_size
):
for
i
in
range
(
window_size
):
words
.
append
(
words
.
append
(
layers
.
data
(
self
.
_get_
data
(
name
=
'word_{0}'
.
format
(
i
),
shape
=
[
1
],
dtype
=
'int64'
))
name
=
'word_{0}'
.
format
(
i
),
shape
=
[
1
],
dtype
=
'int64'
))
dict_size
=
10000
dict_size
=
10000
...
@@ -989,278 +930,171 @@ class TestBook(unittest.TestCase):
...
@@ -989,278 +930,171 @@ class TestBook(unittest.TestCase):
param_attr
=
'nce.w'
,
param_attr
=
'nce.w'
,
bias_attr
=
'nce.b'
)
bias_attr
=
'nce.b'
)
avg_loss
=
layers
.
mean
(
loss
)
avg_loss
=
layers
.
mean
(
loss
)
self
.
assertIsNotNone
(
avg_loss
)
return
(
avg_loss
)
print
(
str
(
default_main_program
()))
print
(
str
(
default_main_program
()))
def
test_row_conv
(
self
):
def
make_multiplex
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
16
],
dtype
=
'float32'
,
lod_level
=
1
)
x1
=
self
.
_get_data
(
name
=
'x1'
,
shape
=
[
4
],
dtype
=
'float32'
)
out
=
layers
.
row_conv
(
input
=
x
,
future_context_size
=
2
)
x2
=
self
.
_get_data
(
name
=
'x2'
,
shape
=
[
4
],
dtype
=
'float32'
)
self
.
assertIsNotNone
(
out
)
index
=
self
.
_get_data
(
name
=
'index'
,
shape
=
[
1
],
dtype
=
'int32'
)
print
(
str
(
program
))
def
test_multiplex
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x1
=
layers
.
data
(
name
=
'x1'
,
shape
=
[
4
],
dtype
=
'float32'
)
x2
=
layers
.
data
(
name
=
'x2'
,
shape
=
[
4
],
dtype
=
'float32'
)
index
=
layers
.
data
(
name
=
'index'
,
shape
=
[
1
],
dtype
=
'int32'
)
out
=
layers
.
multiplex
(
inputs
=
[
x1
,
x2
],
index
=
index
)
out
=
layers
.
multiplex
(
inputs
=
[
x1
,
x2
],
index
=
index
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
make_softmax_with_cross_entropy
(
self
):
def
test_softmax_with_cross_entropy
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
program
=
Program
()
fluid
.
default_startup_program
()):
with
program_guard
(
program
):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
16
],
dtype
=
'float32'
)
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
16
],
dtype
=
'float32'
)
y
=
self
.
_get_data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
y
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
loss
,
softmax
=
layers
.
softmax_with_cross_entropy
(
loss
,
softmax
=
layers
.
softmax_with_cross_entropy
(
x
,
y
,
return_softmax
=
True
)
x
,
y
,
return_softmax
=
True
)
self
.
assertIsNotNone
(
loss
)
return
(
loss
)
self
.
assertIsNotNone
(
softmax
)
return
(
softmax
)
loss
=
layers
.
softmax_with_cross_entropy
(
x
,
y
)
loss
=
layers
.
softmax_with_cross_entropy
(
x
,
y
)
self
.
assertIsNotNone
(
loss
)
return
(
loss
)
print
(
str
(
program
))
def
make_smooth_l1
(
self
):
def
test_smooth_l1
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
program
=
Program
()
fluid
.
default_startup_program
()):
with
program_guard
(
program
):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
4
],
dtype
=
'float32'
)
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
4
],
dtype
=
'float32'
)
y
=
self
.
_get_data
(
name
=
'label'
,
shape
=
[
4
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'label'
,
shape
=
[
4
],
dtype
=
'float32'
)
loss
=
layers
.
smooth_l1
(
x
,
y
)
loss
=
layers
.
smooth_l1
(
x
,
y
)
self
.
assertIsNotNone
(
loss
)
return
(
loss
)
print
(
str
(
program
))
def
test
_scatter
(
self
):
def
make
_scatter
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
x
=
layers
.
data
(
x
=
self
.
_get_
data
(
name
=
'x'
,
name
=
'x'
,
shape
=
[
3
,
3
],
shape
=
[
3
,
3
],
append_batch_size
=
False
,
append_batch_size
=
False
,
dtype
=
'float32'
)
dtype
=
'float32'
)
idx
=
layers
.
data
(
idx
=
self
.
_get_
data
(
name
=
'idx'
,
shape
=
[
2
],
append_batch_size
=
False
,
dtype
=
'int32'
)
name
=
'idx'
,
shape
=
[
2
],
append_batch_size
=
False
,
dtype
=
'int32'
)
updates
=
layers
.
data
(
updates
=
self
.
_get_
data
(
name
=
'updates'
,
name
=
'updates'
,
shape
=
[
2
,
3
],
shape
=
[
2
,
3
],
append_batch_size
=
False
,
append_batch_size
=
False
,
dtype
=
'float32'
)
dtype
=
'float32'
)
out
=
layers
.
scatter
(
input
=
x
,
index
=
idx
,
updates
=
updates
)
out
=
layers
.
scatter
(
input
=
x
,
index
=
idx
,
updates
=
updates
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test_sequence_scatter
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
6
],
append_batch_size
=
False
,
dtype
=
'float32'
)
idx
=
layers
.
data
(
name
=
'idx'
,
shape
=
[
12
,
1
],
append_batch_size
=
False
,
dtype
=
'int32'
,
lod_level
=
1
)
updates
=
layers
.
data
(
name
=
'updates'
,
shape
=
[
12
,
1
],
append_batch_size
=
False
,
dtype
=
'float32'
,
lod_level
=
1
)
out
=
layers
.
sequence_scatter
(
input
=
x
,
index
=
idx
,
updates
=
updates
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_sequence_slice
(
self
):
def
make_label_smooth
(
self
):
program
=
Program
()
# TODO(minqiyang): support gpu ut
with
program_guard
(
program
):
self
.
_force_to_use_cpu
=
True
import
numpy
as
np
with
fluid
.
framework
.
_dygraph_place_guard
(
place
=
fluid
.
CPUPlace
()):
seqs
=
layers
.
data
(
label
=
self
.
_get_data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
"int32"
)
name
=
'x'
,
shape
=
[
10
,
5
],
dtype
=
'float32'
,
lod_level
=
1
)
offset
=
layers
.
assign
(
input
=
np
.
array
([[
0
,
1
]]).
astype
(
'int32'
))
length
=
layers
.
assign
(
input
=
np
.
array
([[
2
,
1
]]).
astype
(
'int32'
))
out
=
layers
.
sequence_slice
(
input
=
seqs
,
offset
=
offset
,
length
=
length
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_lod_reset
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
10
,
20
],
dtype
=
'float32'
,
lod_level
=
2
)
print
(
layers
.
lod_reset
(
x
=
x
,
y
=
y
))
print
(
str
(
program
))
def
test_label_smooth
(
self
):
program
=
Program
()
with
program_guard
(
program
):
label
=
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
"float32"
)
one_hot_label
=
layers
.
one_hot
(
input
=
label
,
depth
=
10
)
one_hot_label
=
layers
.
one_hot
(
input
=
label
,
depth
=
10
)
smooth_label
=
layers
.
label_smooth
(
smooth_label
=
layers
.
label_smooth
(
label
=
one_hot_label
,
epsilon
=
0.1
,
dtype
=
"float32"
)
label
=
one_hot_label
,
epsilon
=
0.1
,
dtype
=
"int32"
)
self
.
assertIsNotNone
(
smooth_label
)
return
(
smooth_label
)
print
(
str
(
program
))
def
test_topk
(
self
):
program
=
Program
()
with
program_guard
(
program
):
data
=
layers
.
data
(
name
=
"label"
,
shape
=
[
200
],
dtype
=
"float32"
)
values
,
indices
=
layers
.
topk
(
data
,
k
=
5
)
self
.
assertIsNotNone
(
values
)
self
.
assertIsNotNone
(
indices
)
print
(
str
(
program
))
def
test_roi_pool
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
256
,
30
,
30
],
dtype
=
"float32"
)
rois
=
layers
.
data
(
name
=
"rois"
,
shape
=
[
4
],
dtype
=
"float32"
,
lod_level
=
1
)
output
=
layers
.
roi_pool
(
x
,
rois
,
7
,
7
,
0.6
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
def
test_psroi_pool
(
self
):
def
make_topk
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
245
,
30
,
30
],
dtype
=
"float32"
)
data
=
self
.
_get_data
(
name
=
"label"
,
shape
=
[
200
],
dtype
=
"float32"
)
rois
=
layers
.
data
(
values
,
indices
=
layers
.
topk
(
data
,
k
=
5
)
name
=
"rois"
,
shape
=
[
4
],
dtype
=
"float32"
,
lod_level
=
1
)
return
(
values
)
output
=
layers
.
psroi_pool
(
x
,
rois
,
5
,
0.25
,
7
,
7
)
return
(
indices
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
def
test_roi_align
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
256
,
30
,
30
],
dtype
=
"float32"
)
rois
=
layers
.
data
(
name
=
"rois"
,
shape
=
[
4
],
dtype
=
"float32"
,
lod_level
=
1
)
output
=
layers
.
roi_align
(
x
,
rois
,
14
,
14
,
0.5
,
2
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
def
test
_resize_bilinear
(
self
):
def
make
_resize_bilinear
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
9
,
6
],
dtype
=
"float32"
)
x
=
self
.
_get_
data
(
name
=
'x'
,
shape
=
[
3
,
9
,
6
],
dtype
=
"float32"
)
output
=
layers
.
resize_bilinear
(
x
,
out_shape
=
[
12
,
12
])
output
=
layers
.
resize_bilinear
(
x
,
out_shape
=
[
12
,
12
])
self
.
assertIsNotNone
(
output
)
return
(
output
)
output
=
layers
.
resize_bilinear
(
x
,
scale
=
3
)
output
=
layers
.
resize_bilinear
(
x
,
scale
=
3
)
self
.
assertIsNotNone
(
output
)
return
(
output
)
print
(
str
(
program
))
def
test
_resize_nearest
(
self
):
def
make
_resize_nearest
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
9
,
6
],
dtype
=
"float32"
)
x
=
self
.
_get_
data
(
name
=
'x'
,
shape
=
[
3
,
9
,
6
],
dtype
=
"float32"
)
output
=
layers
.
resize_nearest
(
x
,
out_shape
=
[
12
,
12
])
output
=
layers
.
resize_nearest
(
x
,
out_shape
=
[
12
,
12
])
self
.
assertIsNotNone
(
output
)
return
(
output
)
output
=
layers
.
resize_nearest
(
x
,
scale
=
3
)
output
=
layers
.
resize_nearest
(
x
,
scale
=
3
)
self
.
assertIsNotNone
(
output
)
return
(
output
)
print
(
str
(
program
))
def
test
_polygon_box_transform
(
self
):
def
make
_polygon_box_transform
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
,
4
,
4
],
dtype
=
"float32"
)
x
=
self
.
_get_
data
(
name
=
'x'
,
shape
=
[
8
,
4
,
4
],
dtype
=
"float32"
)
output
=
layers
.
polygon_box_transform
(
input
=
x
)
output
=
layers
.
polygon_box_transform
(
input
=
x
)
self
.
assertIsNotNone
(
output
)
return
(
output
)
print
(
str
(
program
))
def
test
_l2_normalize
(
self
):
def
make
_l2_normalize
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
,
7
,
10
],
dtype
=
"float32"
)
x
=
self
.
_get_
data
(
name
=
'x'
,
shape
=
[
8
,
7
,
10
],
dtype
=
"float32"
)
output
=
layers
.
l2_normalize
(
x
,
axis
=
1
)
output
=
layers
.
l2_normalize
(
x
,
axis
=
1
)
return
output
def
test
_maxout
(
self
):
def
make
_maxout
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
data
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
,
6
,
6
],
dtype
=
"float32"
)
data
=
self
.
_get_
data
(
name
=
'x'
,
shape
=
[
8
,
6
,
6
],
dtype
=
"float32"
)
output
=
layers
.
maxout
(
x
=
data
,
groups
=
2
)
output
=
layers
.
maxout
(
x
=
data
,
groups
=
2
)
self
.
assertIsNotNone
(
output
)
return
(
output
)
print
(
str
(
program
))
def
make_crop
(
self
):
def
test_crop
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
program
=
Program
()
fluid
.
default_startup_program
()):
with
program_guard
(
program
):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
3
,
5
],
dtype
=
"float32"
)
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
5
],
dtype
=
"float32"
)
y
=
self
.
_get_data
(
name
=
'y'
,
shape
=
[
2
,
3
],
dtype
=
"float32"
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
2
,
3
],
dtype
=
"float32"
)
output
=
layers
.
crop
(
x
,
shape
=
y
)
output
=
layers
.
crop
(
x
,
shape
=
y
)
self
.
assertIsNotNone
(
output
)
return
(
output
)
print
(
str
(
program
))
def
make_mean_iou
(
self
):
def
test_mean_iou
(
self
):
# TODO(minqiyang): support gpu ut
program
=
Program
()
self
.
_force_to_use_cpu
=
True
with
program_guard
(
program
):
with
fluid
.
framework
.
_dygraph_place_guard
(
place
=
fluid
.
CPUPlace
()
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
16
],
dtype
=
'floa
t32'
)
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
16
],
dtype
=
'in
t32'
)
y
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64
'
)
y
=
self
.
_get_data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int32
'
)
iou
=
layers
.
mean_iou
(
x
,
y
,
2
)
iou
=
layers
.
mean_iou
(
x
,
y
,
2
)
self
.
assertIsNotNone
(
iou
)
return
(
iou
)
print
(
str
(
program
))
def
test
_argsort
(
self
):
def
make
_argsort
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
data
=
layers
.
data
(
name
=
'x'
,
shape
=
[
2
,
3
,
3
],
dtype
=
"float32"
)
data
=
self
.
_get_
data
(
name
=
'x'
,
shape
=
[
2
,
3
,
3
],
dtype
=
"float32"
)
out
,
ids
=
layers
.
argsort
(
input
=
data
,
axis
=
1
)
out
,
ids
=
layers
.
argsort
(
input
=
data
,
axis
=
1
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
self
.
assertIsNotNone
(
ids
)
return
(
ids
)
print
(
str
(
program
))
def
make_rank_loss
(
self
):
def
test_rank_loss
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
program
=
Program
()
fluid
.
default_startup_program
()):
with
program_guard
(
program
):
label
=
self
.
_get_data
(
label
=
layers
.
data
(
name
=
'label'
,
name
=
'label'
,
append_batch_size
=
False
,
append_batch_size
=
False
,
shape
=
[
16
,
1
],
shape
=
[
16
,
1
],
dtype
=
"float32"
)
dtype
=
"float32"
)
left
=
layers
.
data
(
left
=
self
.
_get_
data
(
name
=
'left'
,
name
=
'left'
,
append_batch_size
=
False
,
append_batch_size
=
False
,
shape
=
[
16
,
1
],
shape
=
[
16
,
1
],
dtype
=
"float32"
)
dtype
=
"float32"
)
right
=
layers
.
data
(
right
=
self
.
_get_
data
(
name
=
'right'
,
name
=
'right'
,
append_batch_size
=
False
,
append_batch_size
=
False
,
shape
=
[
16
,
1
],
shape
=
[
16
,
1
],
dtype
=
"float32"
)
dtype
=
"float32"
)
out
=
layers
.
rank_loss
(
label
,
left
,
right
,
name
=
"rank_loss"
)
out
=
layers
.
rank_loss
(
label
,
left
,
right
,
name
=
"rank_loss"
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test_flatten
(
self
):
def
make_shape
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()):
x
=
layers
.
data
(
input
=
self
.
_get_data
(
name
=
'x'
,
append_batch_size
=
False
,
shape
=
[
4
,
4
,
3
],
dtype
=
"float32"
)
out
=
layers
.
flatten
(
x
,
axis
=
1
,
name
=
"flatten"
)
self
.
assertIsNotNone
(
out
)
def
test_shape
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
3
,
100
,
100
],
dtype
=
"float32"
)
name
=
"input"
,
shape
=
[
3
,
100
,
100
],
dtype
=
"float32"
)
out
=
layers
.
shape
(
input
)
out
=
layers
.
shape
(
input
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_pad2d
(
self
):
def
make
_pad2d
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
3
,
100
,
100
],
dtype
=
"float32"
)
name
=
"input"
,
shape
=
[
3
,
100
,
100
],
dtype
=
"float32"
)
paddings
=
layers
.
fill_constant
(
shape
=
[
4
],
dtype
=
'int32'
,
value
=
1
)
paddings
=
layers
.
fill_constant
(
shape
=
[
4
],
dtype
=
'int32'
,
value
=
1
)
out
=
layers
.
pad2d
(
out
=
layers
.
pad2d
(
...
@@ -1275,14 +1109,13 @@ class TestBook(unittest.TestCase):
...
@@ -1275,14 +1109,13 @@ class TestBook(unittest.TestCase):
mode
=
'reflect'
,
mode
=
'reflect'
,
data_format
=
'NCHW'
,
data_format
=
'NCHW'
,
name
=
"shape"
)
name
=
"shape"
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
self
.
assertIsNotNone
(
out_1
)
return
(
out_1
)
print
(
str
(
program
))
def
test
_prelu
(
self
):
def
make
_prelu
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
5
,
200
,
100
,
100
],
dtype
=
"float32"
)
name
=
"input"
,
shape
=
[
5
,
200
,
100
,
100
],
dtype
=
"float32"
)
mode
=
'channel'
mode
=
'channel'
out
=
layers
.
prelu
(
out
=
layers
.
prelu
(
...
@@ -1290,291 +1123,365 @@ class TestBook(unittest.TestCase):
...
@@ -1290,291 +1123,365 @@ class TestBook(unittest.TestCase):
mode
,
mode
,
param_attr
=
ParamAttr
(
initializer
=
Constant
(
1.0
)),
param_attr
=
ParamAttr
(
initializer
=
Constant
(
1.0
)),
name
=
'prelu'
)
name
=
'prelu'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_brelu
(
self
):
def
make
_brelu
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
brelu
(
input
,
t_min
=
1.0
,
t_max
=
20.0
,
name
=
'brelu'
)
out
=
layers
.
brelu
(
input
,
t_min
=
1.0
,
t_max
=
20.0
,
name
=
'brelu'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_leaky_relu
(
self
):
def
make
_leaky_relu
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
leaky_relu
(
input
,
alpha
=
0.1
,
name
=
'leaky_relu'
)
out
=
layers
.
leaky_relu
(
input
,
alpha
=
0.1
,
name
=
'leaky_relu'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_soft_relu
(
self
):
def
make
_soft_relu
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
soft_relu
(
input
,
threshold
=
30.0
,
name
=
'soft_relu'
)
out
=
layers
.
soft_relu
(
input
,
threshold
=
30.0
,
name
=
'soft_relu'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_sigmoid
(
self
):
def
make
_sigmoid
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
sigmoid
(
input
,
name
=
'sigmoid'
)
out
=
layers
.
sigmoid
(
input
,
name
=
'sigmoid'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_logsigmoid
(
self
):
def
make
_logsigmoid
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
logsigmoid
(
input
,
name
=
'logsigmoid'
)
out
=
layers
.
logsigmoid
(
input
,
name
=
'logsigmoid'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_exp
(
self
):
def
make
_exp
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
exp
(
input
,
name
=
'exp'
)
out
=
layers
.
exp
(
input
,
name
=
'exp'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_tanh
(
self
):
def
make
_tanh
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
tanh
(
input
,
name
=
'tanh'
)
out
=
layers
.
tanh
(
input
,
name
=
'tanh'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_tanh_shrink
(
self
):
def
make
_tanh_shrink
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
tanh_shrink
(
input
,
name
=
'tanh_shrink'
)
out
=
layers
.
tanh_shrink
(
input
,
name
=
'tanh_shrink'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_sqrt
(
self
):
def
make
_sqrt
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
sqrt
(
input
,
name
=
'sqrt'
)
out
=
layers
.
sqrt
(
input
,
name
=
'sqrt'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_abs
(
self
):
def
make
_abs
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
abs
(
input
,
name
=
'abs'
)
out
=
layers
.
abs
(
input
,
name
=
'abs'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_ceil
(
self
):
def
make
_ceil
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
ceil
(
input
,
name
=
'ceil'
)
out
=
layers
.
ceil
(
input
,
name
=
'ceil'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_floor
(
self
):
def
make
_floor
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
floor
(
input
,
name
=
'floor'
)
out
=
layers
.
floor
(
input
,
name
=
'floor'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_cos
(
self
):
def
make
_cos
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
cos
(
input
,
name
=
'cos'
)
out
=
layers
.
cos
(
input
,
name
=
'cos'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_sin
(
self
):
def
make
_sin
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
sin
(
input
,
name
=
'sin'
)
out
=
layers
.
sin
(
input
,
name
=
'sin'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_round
(
self
):
def
make
_round
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
round
(
input
,
name
=
'round'
)
out
=
layers
.
round
(
input
,
name
=
'round'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_reciprocal
(
self
):
def
make
_reciprocal
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
reciprocal
(
input
,
name
=
'reciprocal'
)
out
=
layers
.
reciprocal
(
input
,
name
=
'reciprocal'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_square
(
self
):
def
make
_square
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
square
(
input
,
name
=
'square'
)
out
=
layers
.
square
(
input
,
name
=
'square'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_softplus
(
self
):
def
make
_softplus
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
softplus
(
input
,
name
=
'softplus'
)
out
=
layers
.
softplus
(
input
,
name
=
'softplus'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_softsign
(
self
):
def
make
_softsign
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
softsign
(
input
,
name
=
'softsign'
)
out
=
layers
.
softsign
(
input
,
name
=
'softsign'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test_roi_perspective_transform
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
256
,
30
,
30
],
dtype
=
"float32"
)
rois
=
layers
.
data
(
name
=
"rois"
,
shape
=
[
8
],
dtype
=
"float32"
,
lod_level
=
1
)
output
=
layers
.
roi_perspective_transform
(
x
,
rois
,
7
,
7
,
0.6
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
def
test_sequence_enumerate
(
self
):
def
make_cross_entropy
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()):
x
=
layers
.
data
(
name
=
"input"
,
shape
=
[
1
],
dtype
=
'int32'
,
lod_level
=
1
)
x
=
self
.
_get_data
(
name
=
"x"
,
shape
=
[
30
,
10
],
dtype
=
"float32"
)
out
=
layers
.
sequence_enumerate
(
input
=
x
,
win_size
=
2
,
pad_value
=
0
)
label
=
self
.
_get_data
(
name
=
"label"
,
shape
=
[
30
,
1
],
dtype
=
"int64"
)
print
(
str
(
program
))
def
test_cross_entropy
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
30
,
10
],
dtype
=
"float32"
)
label
=
layers
.
data
(
name
=
"label"
,
shape
=
[
30
,
1
],
dtype
=
"int32"
)
mode
=
'channel'
mode
=
'channel'
out
=
layers
.
cross_entropy
(
x
,
label
,
False
,
4
)
out
=
layers
.
cross_entropy
(
x
,
label
,
False
,
4
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
def
test
_bpr_loss
(
self
):
def
make
_bpr_loss
(
self
):
program
=
Program
()
self
.
_force_to_use_cpu
=
True
with
program_guard
(
program
):
with
fluid
.
framework
.
_dygraph_place_guard
(
place
=
fluid
.
CPUPlace
()
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
30
,
10
],
dtype
=
"float32"
)
x
=
self
.
_get_
data
(
name
=
"x"
,
shape
=
[
30
,
10
],
dtype
=
"float32"
)
label
=
layers
.
data
(
name
=
"label"
,
shape
=
[
30
,
1
],
dtype
=
"int32
"
)
label
=
self
.
_get_data
(
name
=
"label"
,
shape
=
[
30
,
1
],
dtype
=
"int64
"
)
out
=
layers
.
bpr_loss
(
x
,
label
)
out
=
layers
.
bpr_loss
(
x
,
label
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_expand
(
self
):
def
make
_expand
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
x
=
layers
.
data
(
name
=
"input"
,
shape
=
[
10
],
dtype
=
'int32'
)
x
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
10
],
dtype
=
'int32'
)
out
=
layers
.
expand
(
x
,
[
1
,
2
])
out
=
layers
.
expand
(
x
,
[
1
,
2
])
print
(
str
(
program
))
return
out
def
test_uniform_random_batch_size_like
(
self
):
def
make_uniform_random_batch_size_like
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
input
=
self
.
_get_data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
out
=
layers
.
uniform_random_batch_size_like
(
input
,
[
-
1
,
11
])
out
=
layers
.
uniform_random_batch_size_like
(
input
,
[
-
1
,
11
])
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_gaussian_random
(
self
):
def
make
_gaussian_random
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
out
=
layers
.
gaussian_random
(
shape
=
[
20
,
30
])
out
=
layers
.
gaussian_random
(
shape
=
[
20
,
30
])
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_sampling_id
(
self
):
def
make
_sampling_id
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
x
=
layers
.
data
(
x
=
self
.
_get_
data
(
name
=
"X"
,
name
=
"X"
,
shape
=
[
13
,
11
],
shape
=
[
13
,
11
],
dtype
=
'float32'
,
dtype
=
'float32'
,
append_batch_size
=
False
)
append_batch_size
=
False
)
out
=
layers
.
sampling_id
(
x
)
out
=
layers
.
sampling_id
(
x
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test_gaussian_random_batch_size_like
(
self
):
def
make_gaussian_random_batch_size_like
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
input
=
self
.
_get_data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
out
=
layers
.
gaussian_random_batch_size_like
(
out
=
layers
.
gaussian_random_batch_size_like
(
input
,
shape
=
[
-
1
,
11
],
mean
=
1.0
,
std
=
2.0
)
input
,
shape
=
[
-
1
,
11
],
mean
=
1.0
,
std
=
2.0
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test_sum
(
self
):
def
make_sum
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
input
=
self
.
_get_data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
out
=
layers
.
sum
(
input
)
out
=
layers
.
sum
(
input
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
test
_slice
(
self
):
def
make
_slice
(
self
):
starts
=
[
1
,
0
,
2
]
starts
=
[
1
,
0
,
2
]
ends
=
[
3
,
3
,
4
]
ends
=
[
3
,
3
,
4
]
axes
=
[
0
,
1
,
2
]
axes
=
[
0
,
1
,
2
]
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
3
,
4
,
5
,
6
],
dtype
=
'float32'
)
name
=
"input"
,
shape
=
[
3
,
4
,
5
,
6
],
dtype
=
'float32'
)
out
=
layers
.
slice
(
input
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
out
=
layers
.
slice
(
input
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
return
out
def
test
_softshrink
(
self
):
def
make
_softshrink
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
softshrink
(
input
,
name
=
'softshrink'
)
out
=
layers
.
softshrink
(
input
,
name
=
'softshrink'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
iou_similarity
(
self
):
def
iou_similarity
(
self
):
program
=
Program
()
with
program_guard
(
fluid
.
default_main_program
(),
with
program_guard
(
program
):
fluid
.
default_startup_program
()
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
16
],
dtype
=
"float32"
)
x
=
self
.
_get_
data
(
name
=
"x"
,
shape
=
[
16
],
dtype
=
"float32"
)
y
=
layers
.
data
(
name
=
"y"
,
shape
=
[
16
],
dtype
=
"float32"
)
y
=
self
.
_get_
data
(
name
=
"y"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
iou_similarity
(
x
,
y
,
name
=
'iou_similarity'
)
out
=
layers
.
iou_similarity
(
x
,
y
,
name
=
'iou_similarity'
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
make_grid_sampler
(
self
):
def
test_grid_sampler
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
program
=
Program
()
fluid
.
default_startup_program
()):
with
program_guard
(
program
):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
3
,
5
,
7
],
dtype
=
'float32'
)
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
5
,
7
],
dtype
=
'float32'
)
grid
=
self
.
_get_data
(
name
=
'grid'
,
shape
=
[
5
,
7
,
2
],
dtype
=
'float32'
)
grid
=
layers
.
data
(
name
=
'grid'
,
shape
=
[
5
,
7
,
2
],
dtype
=
'float32'
)
out
=
layers
.
grid_sampler
(
x
,
grid
)
out
=
layers
.
grid_sampler
(
x
,
grid
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
print
(
str
(
program
))
def
make_bilinear_tensor_product_layer
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
data
=
self
.
_get_data
(
name
=
'data'
,
shape
=
[
4
],
dtype
=
"float32"
)
theta
=
self
.
_get_data
(
name
=
"theta"
,
shape
=
[
5
],
dtype
=
"float32"
)
out
=
layers
.
bilinear_tensor_product
(
data
,
theta
,
6
)
return
(
out
)
def
make_batch_norm
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
data
=
self
.
_get_data
(
name
=
'data'
,
shape
=
[
32
,
128
,
128
],
dtype
=
"float32"
)
out
=
layers
.
batch_norm
(
data
)
return
(
out
)
def
make_range
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
layers
.
range
(
0
,
10
,
2
,
'int32'
)
y
=
layers
.
range
(
0.1
,
10.0
,
0.2
,
'float32'
)
return
y
def
make_spectral_norm
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
weight
=
self
.
_get_data
(
name
=
'weight'
,
shape
=
[
2
,
3
,
32
,
32
],
dtype
=
"float32"
,
append_batch_size
=
False
)
out
=
layers
.
spectral_norm
(
weight
,
dim
=
1
,
power_iters
=
1
)
return
(
out
)
def
make_kldiv_loss
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
32
,
128
,
128
],
dtype
=
"float32"
)
target
=
self
.
_get_data
(
name
=
'target'
,
shape
=
[
32
,
128
,
128
],
dtype
=
"float32"
)
loss
=
layers
.
kldiv_loss
(
x
=
x
,
target
=
target
,
reduction
=
'batchmean'
)
return
(
loss
)
def
make_temporal_shift
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
"X"
,
shape
=
[
16
,
4
,
4
],
dtype
=
"float32"
)
out
=
layers
.
temporal_shift
(
x
,
seg_num
=
2
,
shift_ratio
=
0.2
)
return
(
out
)
def
make_shuffle_channel
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
"X"
,
shape
=
[
16
,
4
,
4
],
dtype
=
"float32"
)
out
=
layers
.
shuffle_channel
(
x
,
group
=
4
)
return
(
out
)
def
make_fsp
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
"X"
,
shape
=
[
16
,
4
,
4
],
dtype
=
"float32"
)
y
=
self
.
_get_data
(
name
=
"Y"
,
shape
=
[
8
,
4
,
4
],
dtype
=
"float32"
)
out
=
layers
.
fsp_matrix
(
x
,
y
)
return
(
out
)
def
test_dynamic_lstmp
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
hidden_dim
,
proj_dim
=
16
,
8
seq_data
=
layers
.
data
(
name
=
'seq_data'
,
shape
=
[
10
,
10
],
dtype
=
'float32'
,
lod_level
=
1
)
fc_out
=
layers
.
fc
(
input
=
seq_data
,
size
=
4
*
hidden_dim
)
self
.
assertIsNotNone
(
layers
.
dynamic_lstmp
(
input
=
fc_out
,
size
=
4
*
hidden_dim
,
proj_size
=
proj_dim
))
def
test_linear_chain_crf
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
label_dict_len
=
10
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
784
],
dtype
=
'float32'
)
label
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int32'
)
hidden
=
layers
.
fc
(
input
=
images
,
size
=
2
)
crf
=
layers
.
linear_chain_crf
(
input
=
hidden
,
label
=
label
,
param_attr
=
ParamAttr
(
name
=
"crfw"
))
crf_decode
=
layers
.
crf_decoding
(
input
=
hidden
,
param_attr
=
ParamAttr
(
name
=
"crfw"
))
self
.
assertFalse
(
crf
is
None
)
self
.
assertFalse
(
crf_decode
is
None
)
return
layers
.
chunk_eval
(
input
=
crf_decode
,
label
=
label
,
chunk_scheme
=
"IOB"
,
num_chunk_types
=
(
label_dict_len
-
1
)
//
2
)
def
test_im2sequence
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
128
,
128
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[],
dtype
=
'float32'
)
output
=
layers
.
im2sequence
(
input
=
x
,
input_image_size
=
y
,
stride
=
[
1
,
1
],
filter_size
=
[
2
,
2
],
out_stride
=
[
1
,
1
])
return
(
output
)
def
test_lod_reset
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
10
,
20
],
dtype
=
'float32'
,
lod_level
=
2
)
return
(
layers
.
lod_reset
(
x
=
x
,
y
=
y
))
def
test_affine_grid
(
self
):
def
test_affine_grid
(
self
):
program
=
Program
()
with
self
.
static_graph
():
with
program_guard
(
program
):
data
=
layers
.
data
(
name
=
'data'
,
shape
=
[
2
,
3
,
3
],
dtype
=
"float32"
)
data
=
layers
.
data
(
name
=
'data'
,
shape
=
[
2
,
3
,
3
],
dtype
=
"float32"
)
out
,
ids
=
layers
.
argsort
(
input
=
data
,
axis
=
1
)
out
,
ids
=
layers
.
argsort
(
input
=
data
,
axis
=
1
)
...
@@ -1586,81 +1493,153 @@ class TestBook(unittest.TestCase):
...
@@ -1586,81 +1493,153 @@ class TestBook(unittest.TestCase):
self
.
assertIsNotNone
(
data_0
)
self
.
assertIsNotNone
(
data_0
)
self
.
assertIsNotNone
(
data_1
)
self
.
assertIsNotNone
(
data_1
)
print
(
str
(
program
))
def
test_bilinear_tensor_product_layer
(
self
):
def
test_psroi_pool
(
self
):
program
=
Program
()
# TODO(minqiyang): dygraph do not support lod now
with
program_guard
(
program
):
with
self
.
static_graph
():
data
=
layers
.
data
(
name
=
'data'
,
shape
=
[
4
],
dtype
=
"float32"
)
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
245
,
30
,
30
],
dtype
=
"float32"
)
rois
=
layers
.
data
(
name
=
"rois"
,
shape
=
[
4
],
dtype
=
"float32"
,
lod_level
=
1
)
output
=
layers
.
psroi_pool
(
x
,
rois
,
5
,
0.25
,
7
,
7
)
return
(
output
)
theta
=
layers
.
data
(
name
=
"theta"
,
shape
=
[
5
],
dtype
=
"float32"
)
def
test_sequence_expand
(
self
):
out
=
layers
.
bilinear_tensor_product
(
data
,
theta
,
6
)
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
10
,
20
],
dtype
=
'float32'
,
lod_level
=
2
)
return
(
layers
.
sequence_expand
(
x
=
x
,
y
=
y
,
ref_level
=
1
))
print
(
str
(
program
))
def
test_sequence_reshape
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
],
dtype
=
'float32'
,
lod_level
=
1
)
out
=
layers
.
sequence_reshape
(
input
=
x
,
new_dim
=
16
)
return
(
out
)
def
test_
batch_norm
(
self
):
def
test_
sequence_unpad
(
self
):
program
=
Program
()
# TODO(minqiyang): dygraph do not support lod now
with
program_guard
(
program
):
with
self
.
static_graph
(
):
data
=
layers
.
data
(
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
,
5
],
dtype
=
'float32'
)
name
=
'data'
,
shape
=
[
32
,
128
,
128
],
dtype
=
"float32"
)
length
=
layers
.
data
(
name
=
'length'
,
shape
=
[
1
],
dtype
=
'int64'
)
out
=
layers
.
batch_norm
(
data
)
return
(
layers
.
sequence_unpad
(
x
=
x
,
length
=
length
)
)
print
(
str
(
program
))
def
test_sequence_softmax
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
seq_data
=
layers
.
data
(
name
=
'seq_data'
,
shape
=
[
10
,
10
],
dtype
=
'float32'
,
lod_level
=
1
)
seq
=
layers
.
fc
(
input
=
seq_data
,
size
=
20
)
return
(
layers
.
sequence_softmax
(
seq
))
def
test_range
(
self
):
def
test_sequence_unsqueeze
(
self
):
program
=
Program
()
# TODO(minqiyang): dygraph do not support lod now
with
program_guard
(
program
):
with
self
.
static_graph
():
layers
.
range
(
0
,
10
,
2
,
'int32'
)
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
,
2
],
dtype
=
'float32'
)
layers
.
range
(
0.1
,
10.0
,
0.2
,
'float32'
)
out
=
layers
.
unsqueeze
(
input
=
x
,
axes
=
[
1
])
return
(
out
)
print
(
str
(
program
))
def
test_sequence_scatter
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
6
],
append_batch_size
=
False
,
dtype
=
'float32'
)
idx
=
layers
.
data
(
name
=
'idx'
,
shape
=
[
12
,
1
],
append_batch_size
=
False
,
dtype
=
'int32'
,
lod_level
=
1
)
updates
=
layers
.
data
(
name
=
'updates'
,
shape
=
[
12
,
1
],
append_batch_size
=
False
,
dtype
=
'float32'
,
lod_level
=
1
)
out
=
layers
.
sequence_scatter
(
input
=
x
,
index
=
idx
,
updates
=
updates
)
return
(
out
)
def
test_spectral_norm
(
self
):
def
test_sequence_slice
(
self
):
program
=
Program
()
# TODO(minqiyang): dygraph do not support lod now
with
program_guard
(
program
):
with
self
.
static_graph
():
weight
=
layers
.
data
(
import
numpy
as
np
name
=
'weight'
,
seqs
=
layers
.
data
(
shape
=
[
2
,
3
,
32
,
32
],
name
=
'x'
,
shape
=
[
10
,
5
],
dtype
=
'float32'
,
lod_level
=
1
)
dtype
=
"float32"
,
offset
=
layers
.
assign
(
input
=
np
.
array
([[
0
,
1
]]).
astype
(
'int32'
))
append_batch_size
=
False
)
length
=
layers
.
assign
(
input
=
np
.
array
([[
2
,
1
]]).
astype
(
'int32'
))
out
=
layers
.
spectral_norm
(
weight
,
dim
=
1
,
power_iters
=
1
)
out
=
layers
.
sequence_slice
(
self
.
assertIsNotNone
(
out
)
input
=
seqs
,
offset
=
offset
,
length
=
length
)
return
(
out
)
def
test_kldiv_loss
(
self
):
def
test_roi_pool
(
self
):
program
=
Program
()
# TODO(minqiyang): dygraph do not support lod now
with
program_guard
(
program
):
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
32
,
128
,
128
],
dtype
=
"float32"
)
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
256
,
30
,
30
],
dtype
=
"float32"
)
target
=
layers
.
data
(
rois
=
layers
.
data
(
name
=
'target'
,
shape
=
[
32
,
128
,
128
],
dtype
=
"float32"
)
name
=
"rois"
,
shape
=
[
4
],
dtype
=
"float32"
,
lod_level
=
1
)
loss
=
layers
.
kldiv_loss
(
x
=
x
,
target
=
target
,
reduction
=
'batchmean'
)
output
=
layers
.
roi_pool
(
x
,
rois
,
7
,
7
,
0.6
)
self
.
assertIsNotNone
(
loss
)
return
(
output
)
print
(
str
(
program
))
def
test_sequence_enumerate
(
self
):
# TODO(minqiyang): dygraph do not support lod now
def
test_temporal_shift
(
self
):
with
self
.
static_graph
():
program
=
Program
()
x
=
layers
.
data
(
name
=
"input"
,
shape
=
[
1
],
dtype
=
'int32'
,
lod_level
=
1
)
with
program_guard
(
program
):
out
=
layers
.
sequence_enumerate
(
input
=
x
,
win_size
=
2
,
pad_value
=
0
)
x
=
layers
.
data
(
name
=
"X"
,
shape
=
[
16
,
4
,
4
],
dtype
=
"float32"
)
out
=
layers
.
temporal_shift
(
x
,
seg_num
=
4
,
shift_ratio
=
0.2
)
def
test_roi_align
(
self
):
self
.
assertIsNotNone
(
out
)
# TODO(minqiyang): dygraph do not support lod now
print
(
str
(
program
))
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
256
,
30
,
30
],
dtype
=
"float32"
)
def
test_shuffle_channel
(
self
):
rois
=
layers
.
data
(
program
=
Program
()
name
=
"rois"
,
shape
=
[
4
],
dtype
=
"float32"
,
lod_level
=
1
)
with
program_guard
(
program
):
output
=
layers
.
roi_align
(
x
,
rois
,
14
,
14
,
0.5
,
2
)
x
=
layers
.
data
(
name
=
"X"
,
shape
=
[
16
,
4
,
4
],
dtype
=
"float32"
)
return
(
output
)
out
=
layers
.
shuffle_channel
(
x
,
group
=
4
)
self
.
assertIsNotNone
(
out
)
def
test_roi_perspective_transform
(
self
):
print
(
str
(
program
))
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
def
test_fsp
(
self
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
256
,
30
,
30
],
dtype
=
"float32"
)
program
=
Program
()
rois
=
layers
.
data
(
with
program_guard
(
program
):
name
=
"rois"
,
shape
=
[
8
],
dtype
=
"float32"
,
lod_level
=
1
)
x
=
layers
.
data
(
name
=
"X"
,
shape
=
[
16
,
4
,
4
],
dtype
=
"float32"
)
output
=
layers
.
roi_perspective_transform
(
x
,
rois
,
7
,
7
,
0.6
)
y
=
layers
.
data
(
name
=
"Y"
,
shape
=
[
8
,
4
,
4
],
dtype
=
"float32"
)
return
(
output
)
out
=
layers
.
fsp_matrix
(
x
,
y
)
self
.
assertIsNotNone
(
out
)
def
test_row_conv
(
self
):
print
(
str
(
program
))
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
16
],
dtype
=
'float32'
,
lod_level
=
1
)
out
=
layers
.
row_conv
(
input
=
x
,
future_context_size
=
2
)
return
(
out
)
def
test_simple_conv2d
(
self
):
# TODO(minqiyang): dygraph do not support layers with param now
with
self
.
static_graph
():
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
48
,
48
],
dtype
=
'float32'
)
return
layers
.
conv2d
(
input
=
images
,
num_filters
=
3
,
filter_size
=
[
4
,
4
])
def
test_squeeze
(
self
):
# TODO(minqiyang): dygraph do not support layers with param now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
1
,
1
,
4
],
dtype
=
'float32'
)
out
=
layers
.
squeeze
(
input
=
x
,
axes
=
[
2
])
return
(
out
)
def
test_flatten
(
self
):
# TODO(minqiyang): dygraph do not support op without kernel now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
append_batch_size
=
False
,
shape
=
[
4
,
4
,
3
],
dtype
=
"float32"
)
out
=
layers
.
flatten
(
x
,
axis
=
1
,
name
=
"flatten"
)
return
(
out
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录