Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
e377d759
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e377d759
编写于
4月 02, 2019
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add UT for most layers without params
test=develop
上级
2839e227
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
790 addition
and
773 deletion
+790
-773
paddle/fluid/operators/softmax_with_cross_entropy_op.cu
paddle/fluid/operators/softmax_with_cross_entropy_op.cu
+1
-1
python/paddle/fluid/dygraph/nn.py
python/paddle/fluid/dygraph/nn.py
+11
-11
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+39
-1
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+739
-760
未找到文件。
paddle/fluid/operators/softmax_with_cross_entropy_op.cu
浏览文件 @
e377d759
...
...
@@ -404,7 +404,7 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> {
int
batch_size
=
logits
->
dims
()[
0
];
int
feature_size
=
logits
->
dims
()[
1
];
auto
*
logits_data
=
logits
->
data
<
T
>
();
auto
*
labels_data
=
labels
->
data
<
T
>
();
auto
*
labels_data
=
labels
->
data
<
int64_t
>
();
SoftmaxWithCrossEntropyFusedKernel
(
logits_data
,
labels_data
,
softmax_data
,
loss_data
,
batch_size
,
feature_size
,
context
.
cuda_device_context
().
stream
());
...
...
python/paddle/fluid/dygraph/nn.py
浏览文件 @
e377d759
...
...
@@ -47,7 +47,7 @@ class Conv2D(layers.Layer):
bias_attr
=
None
,
dtype
=
core
.
VarDesc
.
VarType
.
FP32
):
assert
param_attr
is
not
False
,
"param_attr should not be False here."
super
(
Conv2D
,
self
).
__init__
(
name_scope
)
super
(
Conv2D
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_groups
=
groups
self
.
_stride
=
utils
.
convert_to_list
(
stride
,
2
,
'stride'
)
self
.
_padding
=
utils
.
convert_to_list
(
padding
,
2
,
'padding'
)
...
...
@@ -205,7 +205,7 @@ class FC(layers.Layer):
num_flatten_dims
=
1
,
dtype
=
core
.
VarDesc
.
VarType
.
FP32
,
act
=
None
):
super
(
FC
,
self
).
__init__
(
name_scope
)
super
(
FC
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_size
=
size
self
.
_num_flatten_dims
=
num_flatten_dims
...
...
@@ -310,7 +310,7 @@ class BatchNorm(layers.Layer):
do_model_average_for_mean_and_var
=
False
,
fuse_with_relu
=
False
,
use_global_stats
=
False
):
super
(
BatchNorm
,
self
).
__init__
(
name_scope
)
super
(
BatchNorm
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_param_attr
=
param_attr
self
.
_param_attr
=
bias_attr
self
.
_act
=
act
...
...
@@ -462,7 +462,7 @@ class Embedding(layers.Layer):
param_attr
=
None
,
dtype
=
'float32'
):
super
(
Embedding
,
self
).
__init__
(
name_scope
)
super
(
Embedding
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_size
=
size
self
.
_is_sparse
=
is_sparse
self
.
_is_distributed
=
is_distributed
...
...
@@ -563,7 +563,7 @@ class LayerNorm(layers.Layer):
>>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
"""
super
(
LayerNorm
,
self
).
__init__
(
name_scope
)
super
(
LayerNorm
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_scale
=
scale
self
.
_shift
=
shift
self
.
_begin_norm_axis
=
begin_norm_axis
...
...
@@ -710,7 +710,7 @@ class GRUUnit(layers.Layer):
gate_activation
=
'sigmoid'
,
origin_mode
=
False
,
dtype
=
'float32'
):
super
(
GRUUnit
,
self
).
__init__
(
name_scope
)
super
(
GRUUnit
,
self
).
__init__
(
name_scope
,
dtype
)
activation_dict
=
dict
(
identity
=
0
,
...
...
@@ -840,7 +840,7 @@ class NCE(layers.Layer):
custom_dist
=
None
,
seed
=
0
,
is_sparse
=
False
):
super
(
NCE
,
self
).
__init__
(
name_scope
)
super
(
NCE
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_param_attr
=
param_attr
self
.
_bias_attr
=
bias_attr
self
.
_num_total_classes
=
num_total_classes
...
...
@@ -1013,7 +1013,7 @@ class PRelu(layers.Layer):
def
__init__
(
self
,
name_scope
,
mode
,
param_attr
=
None
):
super
(
PRelu
,
self
).
__init__
(
name_scope
)
super
(
PRelu
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_mode
=
mode
self
.
_param_attr
=
param_attr
if
self
.
_mode
not
in
[
'all'
,
'channel'
,
'element'
]:
...
...
@@ -1090,7 +1090,7 @@ class BilinearTensorProduct(layers.Layer):
act
=
None
,
param_attr
=
None
,
bias_attr
=
None
):
super
(
BilinearTensorProduct
,
self
).
__init__
(
name_scope
)
super
(
BilinearTensorProduct
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_param_attr
=
param_attr
self
.
_bias_attr
=
bias_attr
self
.
_act
=
act
...
...
@@ -1260,7 +1260,7 @@ class Conv2DTranspose(layers.Layer):
bias_attr
=
None
,
use_cudnn
=
True
,
act
=
None
):
super
(
Conv2DTranspose
,
self
).
__init__
(
name_scope
)
super
(
Conv2DTranspose
,
self
).
__init__
(
name_scope
,
dtype
)
assert
param_attr
is
not
False
,
"param_attr should not be False in conv2d_transpose."
self
.
_param_attr
=
param_attr
self
.
_bias_attr
=
bias_attr
...
...
@@ -1388,7 +1388,7 @@ class SequenceConv(layers.Layer):
bias_attr
=
None
,
param_attr
=
None
,
act
=
None
):
super
(
SequenceConv
,
self
).
__init__
(
name_scope
)
super
(
SequenceConv
,
self
).
__init__
(
name_scope
,
dtype
)
self
.
_num_filters
=
num_filters
self
.
_filter_size
=
filter_size
self
.
_filter_stride
=
filter_stride
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
e377d759
...
...
@@ -480,6 +480,8 @@ def dynamic_lstm(input,
forward, _ = fluid.layers.dynamic_lstm(
input=forward_proj, size=hidden_dim * 4, use_peepholes=False)
"""
assert
_in_dygraph_mode
(
)
is
not
True
,
"please use lstm instead of dynamic_lstm in dygraph mode!"
assert
bias_attr
is
not
False
,
"bias_attr should not be False in dynamic_lstmp."
helper
=
LayerHelper
(
'lstm'
,
**
locals
())
size
=
size
//
4
...
...
@@ -864,6 +866,9 @@ def dynamic_lstmp(input,
proj_activation="tanh")
"""
assert
_in_dygraph_mode
(
)
is
not
True
,
"please use lstm instead of dynamic_lstmp in dygraph mode!"
assert
bias_attr
is
not
False
,
"bias_attr should not be False in dynamic_lstmp."
helper
=
LayerHelper
(
'lstmp'
,
**
locals
())
size
=
size
//
4
...
...
@@ -1035,6 +1040,9 @@ def dynamic_gru(input,
hidden = fluid.layers.dynamic_gru(input=x, size=hidden_dim)
"""
assert
_in_dygraph_mode
(
)
is
not
True
,
"please use gru instead of dynamic_gru in dygraph mode!"
helper
=
LayerHelper
(
'gru'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -1751,6 +1759,8 @@ def sequence_conv(input,
Variable: output of sequence_conv
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_conv'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
filter_shape
=
[
filter_size
*
input
.
shape
[
1
],
num_filters
]
...
...
@@ -1810,6 +1820,8 @@ def sequence_softmax(input, use_cudnn=False, name=None):
dtype='float32', lod_level=1)
x_sequence_softmax = fluid.layers.sequence_softmax(input=x)
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_softmax'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
softmax_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -2302,6 +2314,8 @@ def sequence_pool(input, pool_type, is_test=False):
last_x = fluid.layers.sequence_pool(input=x, pool_type='last')
first_x = fluid.layers.sequence_pool(input=x, pool_type='first')
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_pool'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
pool_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -2341,6 +2355,8 @@ def sequence_concat(input, name=None):
out = fluid.layers.sequence_concat(input=[seq1, seq2, seq3])
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_concat'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
...
...
@@ -2468,6 +2484,8 @@ def sequence_slice(input, offset, length, name=None):
subseqs = fluid.layers.sequence_slice(input=seqs, offset=offset,
length=length)
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
"sequence_slice"
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -3927,6 +3945,8 @@ def sequence_expand(x, y, ref_level=-1, name=None):
dtype='float32', lod_level=1)
out = layers.sequence_expand(x=x, y=y, ref_level=0)
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_expand'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
tmp
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -3993,6 +4013,8 @@ def sequence_expand_as(x, y, name=None):
dtype='float32', lod_level=1)
out = layers.sequence_expand_as(x=x, y=y)
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_expand_as'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
tmp
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -4039,6 +4061,8 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
out = fluid.layers.sequence_pad(x=x, pad_value=pad_value)
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_pad'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -4105,6 +4129,8 @@ def sequence_unpad(x, length, name=None):
out = fluid.layers.sequence_unpad(x=x, length=len)
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_unpad'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -5278,6 +5304,8 @@ def sequence_reshape(input, new_dim):
x = fluid.layers.data(shape=[5, 20], dtype='float32', lod_level=1)
x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10)
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_reshape'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
helper
.
input_dtype
())
helper
.
append_op
(
...
...
@@ -5812,6 +5840,8 @@ def im2sequence(input,
input=layer, stride=[1, 1], filter_size=[2, 2])
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
if
isinstance
(
filter_size
,
int
):
filter_size
=
[
filter_size
,
filter_size
]
...
...
@@ -6228,7 +6258,7 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
},
outputs
=
{
'Diff'
:
diff
,
'Out'
:
loss
},
attrs
=
{
'sigma'
:
sigma
})
attrs
=
{
'sigma'
:
sigma
if
sigma
is
not
None
else
1.0
})
return
loss
...
...
@@ -7589,6 +7619,8 @@ def sequence_scatter(input, index, updates, name=None):
output = fluid.layers.sequence_scatter(input, index, updates)
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_scatter'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -8677,6 +8709,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
x = fluid.layers.data(shape[30, 1], dtype='int32', lod_level=1)
out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0)
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_enumerate'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
helper
.
input_dtype
(),
stop_gradient
=
True
)
...
...
@@ -8716,6 +8750,8 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
Variable: The output sequence mask.
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
'sequence_mask'
,
**
locals
())
if
name
is
None
:
...
...
@@ -9766,6 +9802,8 @@ def sequence_reverse(x, name=None):
Returns:
out(${y_type}): ${y_comment}
"""
assert
not
_in_dygraph_mode
(),
(
"sequence layer is not supported in dygraph mode yet."
)
helper
=
LayerHelper
(
"sequence_reverse"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
e377d759
...
...
@@ -18,6 +18,8 @@ import unittest
import
contextlib
import
numpy
as
np
import
decorators
import
inspect
from
six.moves
import
filter
import
paddle
import
paddle.fluid
as
fluid
...
...
@@ -58,8 +60,12 @@ class LayerTest(unittest.TestCase):
fluid
.
default_main_program
().
random_seed
=
self
.
seed
yield
def
get_static_graph_result
(
self
,
feed
,
fetch_list
,
with_lod
=
False
):
exe
=
fluid
.
Executor
(
self
.
_get_place
())
def
get_static_graph_result
(
self
,
feed
,
fetch_list
,
with_lod
=
False
,
force_to_use_cpu
=
False
):
exe
=
fluid
.
Executor
(
self
.
_get_place
(
force_to_use_cpu
))
exe
.
run
(
fluid
.
default_startup_program
())
return
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
feed
,
...
...
@@ -77,7 +83,6 @@ class LayerTest(unittest.TestCase):
class
TestLayer
(
LayerTest
):
def
test_fc
(
self
):
# pdb.set_trace()
inp
=
np
.
ones
([
3
,
32
,
32
],
dtype
=
'float32'
)
with
self
.
static_graph
():
t
=
layers
.
data
(
...
...
@@ -596,25 +601,102 @@ class TestLayer(LayerTest):
self
.
assertTrue
(
np
.
allclose
(
nce_loss3
.
_numpy
(),
static_rlt
))
class
TestBook
(
unittest
.
TestCase
):
def
test_fit_a_line
(
self
):
program
=
Program
()
with
program_guard
(
program
,
startup_program
=
Program
()):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
13
],
dtype
=
'float32'
)
class
TestBook
(
LayerTest
):
def
test_all_layers
(
self
):
attrs
=
(
getattr
(
self
,
name
)
for
name
in
dir
(
self
))
methods
=
filter
(
inspect
.
ismethod
,
attrs
)
for
method
in
methods
:
if
not
method
.
__name__
.
startswith
(
'make_'
):
continue
print
(
method
)
import
sys
sys
.
stdout
.
flush
()
self
.
_feed_dict
=
{}
self
.
_force_to_use_cpu
=
False
with
self
.
static_graph
():
static_var
=
method
()
if
isinstance
(
static_var
,
tuple
):
static_var
=
static_var
[
0
]
if
static_var
is
not
None
:
fetch_list
=
[
static_var
.
name
]
static_result
=
self
.
get_static_graph_result
(
feed
=
self
.
_feed_dict
,
fetch_list
=
fetch_list
,
force_to_use_cpu
=
self
.
_force_to_use_cpu
)
else
:
assert
method
.
__name__
in
(
'make_get_places'
)
continue
with
self
.
dynamic_graph
(
self
.
_force_to_use_cpu
):
dy_result
=
method
()
if
isinstance
(
dy_result
,
tuple
):
dy_result
=
dy_result
[
0
]
self
.
assertTrue
(
np
.
array_equal
(
static_result
[
0
],
dy_result
.
_numpy
()))
def
_get_np_data
(
self
,
shape
,
dtype
,
append_batch_size
=
True
):
np
.
random
.
seed
(
self
.
seed
)
if
append_batch_size
:
shape
=
[
2
]
+
shape
if
dtype
==
'float32'
:
return
np
.
random
.
random
(
shape
).
astype
(
dtype
)
elif
dtype
==
'float64'
:
return
np
.
random
.
random
(
shape
).
astype
(
dtype
)
elif
dtype
==
'int32'
:
return
np
.
random
.
randint
(
0
,
2
,
shape
).
astype
(
dtype
)
elif
dtype
==
'int64'
:
return
np
.
random
.
randint
(
0
,
2
,
shape
).
astype
(
dtype
)
def
_get_data
(
self
,
name
,
shape
,
dtype
,
set_feed_dict
=
True
,
append_batch_size
=
True
):
if
base
.
enabled
():
return
base
.
to_variable
(
value
=
self
.
_get_np_data
(
shape
,
dtype
,
append_batch_size
),
name
=
name
)
else
:
if
set_feed_dict
:
self
.
_feed_dict
[
name
]
=
self
.
_get_np_data
(
shape
,
dtype
,
append_batch_size
)
return
layers
.
data
(
name
=
name
,
shape
=
shape
,
dtype
=
dtype
,
append_batch_size
=
append_batch_size
)
def
make_sampled_softmax_with_cross_entropy
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
logits
=
self
.
_get_data
(
name
=
'Logits'
,
shape
=
[
256
],
dtype
=
'float64'
)
print
(
logits
.
dtype
)
label
=
self
.
_get_data
(
name
=
'Label'
,
shape
=
[
1
],
dtype
=
'int64'
)
num_samples
=
25
output
=
layers
.
sampled_softmax_with_cross_entropy
(
logits
,
label
,
num_samples
)
return
(
output
)
def
make_fit_a_line
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
startup_program
=
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
13
],
dtype
=
'float32'
)
y_predict
=
layers
.
fc
(
input
=
x
,
size
=
1
,
act
=
None
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
'float32'
)
y
=
self
.
_get_
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
'float32'
)
cost
=
layers
.
square_error_cost
(
input
=
y_predict
,
label
=
y
)
avg_cost
=
layers
.
mean
(
cost
)
self
.
assertIsNotNone
(
avg_cost
)
print
(
str
(
program
))
return
(
avg_cost
)
def
test
_recognize_digits_mlp
(
self
):
program
=
Program
()
with
program_guard
(
program
,
startup_program
=
P
rogram
()):
def
make
_recognize_digits_mlp
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_p
rogram
()):
# Change g_program, so the rest layers use `g_program`
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
784
],
dtype
=
'float32'
)
label
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int32
'
)
images
=
self
.
_get_
data
(
name
=
'pixel'
,
shape
=
[
784
],
dtype
=
'float32'
)
label
=
self
.
_get_data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64
'
)
hidden1
=
layers
.
fc
(
input
=
images
,
size
=
128
,
act
=
'relu'
)
hidden2
=
layers
.
fc
(
input
=
hidden1
,
size
=
64
,
act
=
'relu'
)
predict
=
layers
.
fc
(
input
=
[
hidden2
,
hidden1
],
...
...
@@ -623,32 +705,21 @@ class TestBook(unittest.TestCase):
param_attr
=
[
"sftmax.w1"
,
"sftmax.w2"
])
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
)
avg_cost
=
layers
.
mean
(
cost
)
self
.
assertIsNotNone
(
avg_cost
)
print
(
str
(
program
))
return
(
avg_cost
)
def
test_simple_conv2d
(
self
):
program
=
Program
()
with
program_guard
(
program
,
startup_program
=
Program
()):
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
48
,
48
],
dtype
=
'float32'
)
layers
.
conv2d
(
input
=
images
,
num_filters
=
3
,
filter_size
=
[
4
,
4
])
print
(
str
(
program
))
def
test_conv2d_transpose
(
self
):
program
=
Program
()
with
program_guard
(
program
):
img
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
2
,
2
],
dtype
=
'float32'
)
layers
.
conv2d_transpose
(
input
=
img
,
num_filters
=
10
,
output_size
=
28
)
print
(
str
(
program
))
def
make_conv2d_transpose
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
img
=
self
.
_get_data
(
name
=
'pixel'
,
shape
=
[
3
,
2
,
2
],
dtype
=
'float32'
)
return
layers
.
conv2d_transpose
(
input
=
img
,
num_filters
=
10
,
output_size
=
28
)
def
test
_recognize_digits_conv
(
self
):
program
=
Program
()
with
program_guard
(
program
,
startup_program
=
P
rogram
()):
images
=
layers
.
data
(
def
make
_recognize_digits_conv
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_p
rogram
()):
images
=
self
.
_get_
data
(
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
label
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int32
'
)
label
=
self
.
_get_data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64
'
)
conv_pool_1
=
nets
.
simple_img_conv_pool
(
input
=
images
,
filter_size
=
5
,
...
...
@@ -667,19 +738,19 @@ class TestBook(unittest.TestCase):
predict
=
layers
.
fc
(
input
=
conv_pool_2
,
size
=
10
,
act
=
"softmax"
)
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
)
avg_cost
=
layers
.
mean
(
cost
)
return
avg_cost
print
(
str
(
program
))
def
test_word_embedding
(
self
):
program
=
Program
()
with
program_guard
(
program
,
startup_program
=
Program
()):
def
make_word_embedding
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
dict_size
=
10000
embed_size
=
32
first_word
=
layers
.
data
(
name
=
'firstw'
,
shape
=
[
1
],
dtype
=
'int64'
)
second_word
=
layers
.
data
(
name
=
'secondw'
,
shape
=
[
1
],
dtype
=
'int64'
)
third_word
=
layers
.
data
(
name
=
'thirdw'
,
shape
=
[
1
],
dtype
=
'int64'
)
forth_word
=
layers
.
data
(
name
=
'forthw'
,
shape
=
[
1
],
dtype
=
'int64'
)
next_word
=
layers
.
data
(
name
=
'nextw'
,
shape
=
[
1
],
dtype
=
'int64'
)
first_word
=
self
.
_get_data
(
name
=
'firstw'
,
shape
=
[
1
],
dtype
=
'int64'
)
second_word
=
self
.
_get_data
(
name
=
'secondw'
,
shape
=
[
1
],
dtype
=
'int64'
)
third_word
=
self
.
_get_data
(
name
=
'thirdw'
,
shape
=
[
1
],
dtype
=
'int64'
)
forth_word
=
self
.
_get_data
(
name
=
'forthw'
,
shape
=
[
1
],
dtype
=
'int64'
)
next_word
=
self
.
_get_data
(
name
=
'nextw'
,
shape
=
[
1
],
dtype
=
'int64'
)
embed_first
=
layers
.
embedding
(
input
=
first_word
,
...
...
@@ -713,257 +784,127 @@ class TestBook(unittest.TestCase):
act
=
'softmax'
)
cost
=
layers
.
cross_entropy
(
input
=
predict_word
,
label
=
next_word
)
avg_cost
=
layers
.
mean
(
cost
)
self
.
assertIsNotNone
(
avg_cost
)
return
(
avg_cost
)
print
(
str
(
program
))
def
test_linear_chain_crf
(
self
):
program
=
Program
()
with
program_guard
(
program
,
startup_program
=
Program
()):
label_dict_len
=
10
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
784
],
dtype
=
'float32'
)
label
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int32'
)
hidden
=
layers
.
fc
(
input
=
images
,
size
=
128
)
crf
=
layers
.
linear_chain_crf
(
input
=
hidden
,
label
=
label
,
param_attr
=
ParamAttr
(
name
=
"crfw"
))
crf_decode
=
layers
.
crf_decoding
(
input
=
hidden
,
param_attr
=
ParamAttr
(
name
=
"crfw"
))
layers
.
chunk_eval
(
input
=
crf_decode
,
label
=
label
,
chunk_scheme
=
"IOB"
,
num_chunk_types
=
(
label_dict_len
-
1
)
//
2
)
self
.
assertFalse
(
crf
is
None
)
self
.
assertFalse
(
crf_decode
is
None
)
print
(
str
(
program
))
def
test_sigmoid_cross_entropy
(
self
):
program
=
Program
()
with
program_guard
(
program
):
dat
=
layers
.
data
(
name
=
'data'
,
shape
=
[
10
],
dtype
=
'float32'
)
lbl
=
layers
.
data
(
name
=
'label'
,
shape
=
[
10
],
dtype
=
'float32'
)
def
make_sigmoid_cross_entropy
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
dat
=
self
.
_get_data
(
name
=
'data'
,
shape
=
[
10
],
dtype
=
'float32'
)
lbl
=
self
.
_get_data
(
name
=
'label'
,
shape
=
[
10
],
dtype
=
'float32'
)
ignore_index
=
-
1
self
.
assertIsNotNone
(
layers
.
sigmoid_cross_entropy_with_logits
(
x
=
dat
,
label
=
lbl
,
ignore_index
=
ignore_index
))
print
(
str
(
program
))
def
test_hsigmoid
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
2
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
2
],
dtype
=
'int64'
)
self
.
assertIsNotNone
(
layers
.
hsigmoid
(
input
=
x
,
label
=
y
,
num_classes
=
2
))
print
(
str
(
program
))
return
(
layers
.
sigmoid_cross_entropy_with_logits
(
x
=
dat
,
label
=
lbl
,
ignore_index
=
ignore_index
))
def
make_hsigmoid
(
self
):
self
.
_force_to_use_cpu
=
True
with
fluid
.
framework
.
_dygraph_place_guard
(
place
=
fluid
.
CPUPlace
()):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
2
],
dtype
=
'float32'
)
y
=
self
.
_get_data
(
name
=
'y'
,
shape
=
[
2
],
dtype
=
'int64'
)
return
(
layers
.
hsigmoid
(
input
=
x
,
label
=
y
,
num_classes
=
2
))
# test hsigmod with custom tree structure
program2
=
Program
()
with
program_guard
(
program2
):
x2
=
layers
.
data
(
name
=
'x2'
,
shape
=
[
4
,
8
],
dtype
=
'float32'
)
y2
=
layers
.
data
(
name
=
'y2'
,
shape
=
[
4
],
dtype
=
'int64'
)
path_table
=
layers
.
data
(
x2
=
self
.
_get_
data
(
name
=
'x2'
,
shape
=
[
4
,
8
],
dtype
=
'float32'
)
y2
=
self
.
_get_
data
(
name
=
'y2'
,
shape
=
[
4
],
dtype
=
'int64'
)
path_table
=
self
.
_get_
data
(
name
=
'path_table'
,
shape
=
[
4
,
6
],
dtype
=
'int64'
)
path_code
=
layers
.
data
(
path_code
=
self
.
_get_
data
(
name
=
'path_code'
,
shape
=
[
4
,
6
],
dtype
=
'int64'
)
self
.
assertIsNotNone
(
layers
.
hsigmoid
(
input
=
x2
,
label
=
y2
,
num_classes
=
6
,
path_table
=
path_table
,
path_code
=
path_code
,
is_custom
=
True
))
return
(
layers
.
hsigmoid
(
input
=
x2
,
label
=
y2
,
num_classes
=
6
,
path_table
=
path_table
,
path_code
=
path_code
,
is_custom
=
True
))
print
(
str
(
program2
))
def
test_sequence_expand
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
10
,
20
],
dtype
=
'float32'
,
lod_level
=
2
)
self
.
assertIsNotNone
(
layers
.
sequence_expand
(
x
=
x
,
y
=
y
,
ref_level
=
1
))
print
(
str
(
program
))
def
test_sequence_unpad
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
,
5
],
dtype
=
'float32'
)
length
=
layers
.
data
(
name
=
'length'
,
shape
=
[
1
],
dtype
=
'int64'
)
self
.
assertIsNotNone
(
layers
.
sequence_unpad
(
x
=
x
,
length
=
length
))
print
(
str
(
program
))
def
test_pool2d
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
224
,
224
],
dtype
=
'float32'
)
self
.
assertIsNotNone
(
layers
.
pool2d
(
x
,
pool_size
=
[
5
,
3
],
pool_stride
=
[
1
,
2
],
pool_padding
=
(
2
,
1
)))
def
test_adaptive_pool2d
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
224
,
224
],
dtype
=
'float32'
)
self
.
assertIsNotNone
(
layers
.
adaptive_pool2d
(
x
,
[
3
,
3
],
pool_type
=
'avg'
))
def
make_pool2d
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
3
,
224
,
224
],
dtype
=
'float32'
)
return
(
layers
.
pool2d
(
x
,
pool_size
=
[
5
,
3
],
pool_stride
=
[
1
,
2
],
pool_padding
=
(
2
,
1
)))
def
make_adaptive_pool2d
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
3
,
224
,
224
],
dtype
=
'float32'
)
return
(
layers
.
adaptive_pool2d
(
x
,
[
3
,
3
],
pool_type
=
'avg'
))
pool
,
mask
=
layers
.
adaptive_pool2d
(
x
,
[
3
,
3
],
require_index
=
True
)
self
.
assertIsNotNone
(
pool
)
self
.
assertIsNotNone
(
mask
)
self
.
assertIsNotNone
(
layers
.
adaptive_pool2d
(
x
,
3
,
pool_type
=
'avg'
))
return
(
pool
)
return
(
mask
)
return
(
layers
.
adaptive_pool2d
(
x
,
3
,
pool_type
=
'avg'
))
pool
,
mask
=
layers
.
adaptive_pool2d
(
x
,
3
,
require_index
=
True
)
self
.
assertIsNotNone
(
pool
)
self
.
assertIsNotNone
(
mask
)
def
test_adaptive_pool3d
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
244
,
224
,
224
],
dtype
=
'float32'
)
self
.
assertIsNotNone
(
layers
.
adaptive_pool3d
(
x
,
[
3
,
3
,
3
],
pool_type
=
'avg'
))
return
(
pool
)
return
(
mask
)
def
make_adaptive_pool3d
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
3
,
244
,
224
,
224
],
dtype
=
'float32'
)
return
(
layers
.
adaptive_pool3d
(
x
,
[
3
,
3
,
3
],
pool_type
=
'avg'
))
pool
,
mask
=
layers
.
adaptive_pool3d
(
x
,
[
3
,
3
,
3
],
require_index
=
True
)
self
.
assertIsNotNone
(
pool
)
self
.
assertIsNotNone
(
mask
)
self
.
assertIsNotNone
(
layers
.
adaptive_pool3d
(
x
,
3
,
pool_type
=
'avg'
))
return
(
pool
)
return
(
mask
)
return
(
layers
.
adaptive_pool3d
(
x
,
3
,
pool_type
=
'avg'
))
pool
,
mask
=
layers
.
adaptive_pool3d
(
x
,
3
,
require_index
=
True
)
self
.
assertIsNotNone
(
pool
)
self
.
assertIsNotNone
(
mask
)
return
(
pool
)
return
(
mask
)
def
test
_lstm_unit
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x_t_data
=
layers
.
data
(
def
make
_lstm_unit
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
x_t_data
=
self
.
_get_
data
(
name
=
'x_t_data'
,
shape
=
[
10
,
10
],
dtype
=
'float32'
)
x_t
=
layers
.
fc
(
input
=
x_t_data
,
size
=
10
)
prev_hidden_data
=
layers
.
data
(
prev_hidden_data
=
self
.
_get_
data
(
name
=
'prev_hidden_data'
,
shape
=
[
10
,
30
],
dtype
=
'float32'
)
prev_hidden
=
layers
.
fc
(
input
=
prev_hidden_data
,
size
=
30
)
prev_cell_data
=
layers
.
data
(
prev_cell_data
=
self
.
_get_
data
(
name
=
'prev_cell'
,
shape
=
[
10
,
30
],
dtype
=
'float32'
)
prev_cell
=
layers
.
fc
(
input
=
prev_cell_data
,
size
=
30
)
self
.
assertIsNotNone
(
layers
.
lstm_unit
(
x_t
=
x_t
,
hidden_t_prev
=
prev_hidden
,
cell_t_prev
=
prev_cell
))
print
(
str
(
program
))
return
(
layers
.
lstm_unit
(
x_t
=
x_t
,
hidden_t_prev
=
prev_hidden
,
cell_t_prev
=
prev_cell
))
def
test_dynamic_lstmp
(
self
):
program
=
Program
()
with
program_guard
(
program
):
hidden_dim
,
proj_dim
=
16
,
8
seq_data
=
layers
.
data
(
name
=
'seq_data'
,
shape
=
[
10
,
10
],
dtype
=
'float32'
,
lod_level
=
1
)
fc_out
=
layers
.
fc
(
input
=
seq_data
,
size
=
4
*
hidden_dim
)
self
.
assertIsNotNone
(
layers
.
dynamic_lstmp
(
input
=
fc_out
,
size
=
4
*
hidden_dim
,
proj_size
=
proj_dim
))
print
(
str
(
program
))
def
test_sequence_softmax
(
self
):
program
=
Program
()
with
program_guard
(
program
):
seq_data
=
layers
.
data
(
name
=
'seq_data'
,
shape
=
[
10
,
10
],
dtype
=
'float32'
,
lod_level
=
1
)
seq
=
layers
.
fc
(
input
=
seq_data
,
size
=
20
)
self
.
assertIsNotNone
(
layers
.
sequence_softmax
(
seq
))
print
(
str
(
program
))
def
test_softmax
(
self
):
program
=
Program
()
with
program_guard
(
program
):
data
=
layers
.
data
(
name
=
'data'
,
shape
=
[
10
],
dtype
=
'float32'
)
def
make_softmax
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
data
=
self
.
_get_data
(
name
=
'data'
,
shape
=
[
10
],
dtype
=
'float32'
)
hid
=
layers
.
fc
(
input
=
data
,
size
=
20
)
self
.
assertIsNotNone
(
layers
.
softmax
(
hid
,
axis
=
1
))
print
(
str
(
program
))
return
(
layers
.
softmax
(
hid
,
axis
=
1
))
def
test
_space_to_depth
(
self
):
program
=
Program
()
with
program_guard
(
program
):
data
=
layers
.
data
(
def
make
_space_to_depth
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
data
=
self
.
_get_
data
(
name
=
'data'
,
shape
=
[
32
,
9
,
6
,
6
],
append_batch_size
=
False
,
dtype
=
'float32'
)
self
.
assertIsNotNone
(
layers
.
space_to_depth
(
data
,
3
))
print
(
str
(
program
))
def
test_sequence_unsqueeze
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
,
2
],
dtype
=
'float32'
)
out
=
layers
.
unsqueeze
(
input
=
x
,
axes
=
[
1
])
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_squeeze
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
1
,
1
,
4
],
dtype
=
'float32'
)
out
=
layers
.
squeeze
(
input
=
x
,
axes
=
[
2
])
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_lrn
(
self
):
program
=
Program
()
with
program_guard
(
program
):
data
=
layers
.
data
(
name
=
'data'
,
shape
=
[
6
,
2
,
2
],
dtype
=
'float32'
)
self
.
assertIsNotNone
(
layers
.
lrn
(
data
))
print
(
str
(
program
))
def
test_get_places
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
get_places
(
device_count
=
4
)
self
.
assertIsNotNone
(
x
)
print
(
str
(
program
))
return
(
layers
.
space_to_depth
(
data
,
3
))
def
test_sequence_reshape
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
],
dtype
=
'float32'
,
lod_level
=
1
)
out
=
layers
.
sequence_reshape
(
input
=
x
,
new_dim
=
16
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
make_lrn
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
data
=
self
.
_get_data
(
name
=
'data'
,
shape
=
[
6
,
2
,
2
],
dtype
=
'float32'
)
return
(
layers
.
lrn
(
data
))
def
test_im2sequence
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
128
,
128
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[],
dtype
=
'float32'
)
output
=
layers
.
im2sequence
(
input
=
x
,
input_image_size
=
y
,
stride
=
[
1
,
1
],
filter_size
=
[
2
,
2
],
out_stride
=
[
1
,
1
])
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
def
test_sampled_softmax_with_cross_entropy
(
self
):
program
=
Program
()
with
program_guard
(
program
):
logits
=
layers
.
data
(
name
=
'Logits'
,
shape
=
[
256
],
dtype
=
'float64'
)
label
=
layers
.
data
(
name
=
'Label'
,
shape
=
[
1
],
dtype
=
'int64'
)
num_samples
=
25
output
=
layers
.
sampled_softmax_with_cross_entropy
(
logits
,
label
,
num_samples
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
def
make_get_places
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
get_places
(
device_count
=
1
)
@
decorators
.
prog_scope
()
def
test
_nce
(
self
):
def
make
_nce
(
self
):
window_size
=
5
words
=
[]
for
i
in
range
(
window_size
):
words
.
append
(
layers
.
data
(
self
.
_get_
data
(
name
=
'word_{0}'
.
format
(
i
),
shape
=
[
1
],
dtype
=
'int64'
))
dict_size
=
10000
...
...
@@ -989,278 +930,171 @@ class TestBook(unittest.TestCase):
param_attr
=
'nce.w'
,
bias_attr
=
'nce.b'
)
avg_loss
=
layers
.
mean
(
loss
)
self
.
assertIsNotNone
(
avg_loss
)
return
(
avg_loss
)
print
(
str
(
default_main_program
()))
def
test_row_conv
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
16
],
dtype
=
'float32'
,
lod_level
=
1
)
out
=
layers
.
row_conv
(
input
=
x
,
future_context_size
=
2
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_multiplex
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x1
=
layers
.
data
(
name
=
'x1'
,
shape
=
[
4
],
dtype
=
'float32'
)
x2
=
layers
.
data
(
name
=
'x2'
,
shape
=
[
4
],
dtype
=
'float32'
)
index
=
layers
.
data
(
name
=
'index'
,
shape
=
[
1
],
dtype
=
'int32'
)
def
make_multiplex
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x1
=
self
.
_get_data
(
name
=
'x1'
,
shape
=
[
4
],
dtype
=
'float32'
)
x2
=
self
.
_get_data
(
name
=
'x2'
,
shape
=
[
4
],
dtype
=
'float32'
)
index
=
self
.
_get_data
(
name
=
'index'
,
shape
=
[
1
],
dtype
=
'int32'
)
out
=
layers
.
multiplex
(
inputs
=
[
x1
,
x2
],
index
=
index
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_softmax_with_cross_entropy
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
16
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
return
(
out
)
def
make_softmax_with_cross_entropy
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
16
],
dtype
=
'float32'
)
y
=
self
.
_get_data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
loss
,
softmax
=
layers
.
softmax_with_cross_entropy
(
x
,
y
,
return_softmax
=
True
)
self
.
assertIsNotNone
(
loss
)
self
.
assertIsNotNone
(
softmax
)
return
(
loss
)
return
(
softmax
)
loss
=
layers
.
softmax_with_cross_entropy
(
x
,
y
)
self
.
assertIsNotNone
(
loss
)
print
(
str
(
program
))
def
test_smooth_l1
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
4
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'label'
,
shape
=
[
4
],
dtype
=
'float32'
)
return
(
loss
)
def
make_smooth_l1
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
4
],
dtype
=
'float32'
)
y
=
self
.
_get_data
(
name
=
'label'
,
shape
=
[
4
],
dtype
=
'float32'
)
loss
=
layers
.
smooth_l1
(
x
,
y
)
self
.
assertIsNotNone
(
loss
)
print
(
str
(
program
))
return
(
loss
)
def
test
_scatter
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
def
make
_scatter
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
x
=
self
.
_get_
data
(
name
=
'x'
,
shape
=
[
3
,
3
],
append_batch_size
=
False
,
dtype
=
'float32'
)
idx
=
layers
.
data
(
idx
=
self
.
_get_
data
(
name
=
'idx'
,
shape
=
[
2
],
append_batch_size
=
False
,
dtype
=
'int32'
)
updates
=
layers
.
data
(
updates
=
self
.
_get_
data
(
name
=
'updates'
,
shape
=
[
2
,
3
],
append_batch_size
=
False
,
dtype
=
'float32'
)
out
=
layers
.
scatter
(
input
=
x
,
index
=
idx
,
updates
=
updates
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_sequence_scatter
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
6
],
append_batch_size
=
False
,
dtype
=
'float32'
)
idx
=
layers
.
data
(
name
=
'idx'
,
shape
=
[
12
,
1
],
append_batch_size
=
False
,
dtype
=
'int32'
,
lod_level
=
1
)
updates
=
layers
.
data
(
name
=
'updates'
,
shape
=
[
12
,
1
],
append_batch_size
=
False
,
dtype
=
'float32'
,
lod_level
=
1
)
out
=
layers
.
sequence_scatter
(
input
=
x
,
index
=
idx
,
updates
=
updates
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test_sequence_slice
(
self
):
program
=
Program
()
with
program_guard
(
program
):
import
numpy
as
np
seqs
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
,
5
],
dtype
=
'float32'
,
lod_level
=
1
)
offset
=
layers
.
assign
(
input
=
np
.
array
([[
0
,
1
]]).
astype
(
'int32'
))
length
=
layers
.
assign
(
input
=
np
.
array
([[
2
,
1
]]).
astype
(
'int32'
))
out
=
layers
.
sequence_slice
(
input
=
seqs
,
offset
=
offset
,
length
=
length
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_lod_reset
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
10
,
20
],
dtype
=
'float32'
,
lod_level
=
2
)
print
(
layers
.
lod_reset
(
x
=
x
,
y
=
y
))
print
(
str
(
program
))
def
test_label_smooth
(
self
):
program
=
Program
()
with
program_guard
(
program
):
label
=
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
"float32"
)
def
make_label_smooth
(
self
):
# TODO(minqiyang): support gpu ut
self
.
_force_to_use_cpu
=
True
with
fluid
.
framework
.
_dygraph_place_guard
(
place
=
fluid
.
CPUPlace
()):
label
=
self
.
_get_data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
"int32"
)
one_hot_label
=
layers
.
one_hot
(
input
=
label
,
depth
=
10
)
smooth_label
=
layers
.
label_smooth
(
label
=
one_hot_label
,
epsilon
=
0.1
,
dtype
=
"float32"
)
self
.
assertIsNotNone
(
smooth_label
)
print
(
str
(
program
))
def
test_topk
(
self
):
program
=
Program
()
with
program_guard
(
program
):
data
=
layers
.
data
(
name
=
"label"
,
shape
=
[
200
],
dtype
=
"float32"
)
values
,
indices
=
layers
.
topk
(
data
,
k
=
5
)
self
.
assertIsNotNone
(
values
)
self
.
assertIsNotNone
(
indices
)
print
(
str
(
program
))
def
test_roi_pool
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
256
,
30
,
30
],
dtype
=
"float32"
)
rois
=
layers
.
data
(
name
=
"rois"
,
shape
=
[
4
],
dtype
=
"float32"
,
lod_level
=
1
)
output
=
layers
.
roi_pool
(
x
,
rois
,
7
,
7
,
0.6
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
label
=
one_hot_label
,
epsilon
=
0.1
,
dtype
=
"int32"
)
return
(
smooth_label
)
def
test_psroi_pool
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
245
,
30
,
30
],
dtype
=
"float32"
)
rois
=
layers
.
data
(
name
=
"rois"
,
shape
=
[
4
],
dtype
=
"float32"
,
lod_level
=
1
)
output
=
layers
.
psroi_pool
(
x
,
rois
,
5
,
0.25
,
7
,
7
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
def
test_roi_align
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
256
,
30
,
30
],
dtype
=
"float32"
)
rois
=
layers
.
data
(
name
=
"rois"
,
shape
=
[
4
],
dtype
=
"float32"
,
lod_level
=
1
)
output
=
layers
.
roi_align
(
x
,
rois
,
14
,
14
,
0.5
,
2
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
def
make_topk
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
data
=
self
.
_get_data
(
name
=
"label"
,
shape
=
[
200
],
dtype
=
"float32"
)
values
,
indices
=
layers
.
topk
(
data
,
k
=
5
)
return
(
values
)
return
(
indices
)
def
test
_resize_bilinear
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
9
,
6
],
dtype
=
"float32"
)
def
make
_resize_bilinear
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
x
=
self
.
_get_
data
(
name
=
'x'
,
shape
=
[
3
,
9
,
6
],
dtype
=
"float32"
)
output
=
layers
.
resize_bilinear
(
x
,
out_shape
=
[
12
,
12
])
self
.
assertIsNotNone
(
output
)
return
(
output
)
output
=
layers
.
resize_bilinear
(
x
,
scale
=
3
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
return
(
output
)
def
test
_resize_nearest
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
9
,
6
],
dtype
=
"float32"
)
def
make
_resize_nearest
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
x
=
self
.
_get_
data
(
name
=
'x'
,
shape
=
[
3
,
9
,
6
],
dtype
=
"float32"
)
output
=
layers
.
resize_nearest
(
x
,
out_shape
=
[
12
,
12
])
self
.
assertIsNotNone
(
output
)
return
(
output
)
output
=
layers
.
resize_nearest
(
x
,
scale
=
3
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
return
(
output
)
def
test
_polygon_box_transform
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
,
4
,
4
],
dtype
=
"float32"
)
def
make
_polygon_box_transform
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
x
=
self
.
_get_
data
(
name
=
'x'
,
shape
=
[
8
,
4
,
4
],
dtype
=
"float32"
)
output
=
layers
.
polygon_box_transform
(
input
=
x
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
return
(
output
)
def
test
_l2_normalize
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
,
7
,
10
],
dtype
=
"float32"
)
def
make
_l2_normalize
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
x
=
self
.
_get_
data
(
name
=
'x'
,
shape
=
[
8
,
7
,
10
],
dtype
=
"float32"
)
output
=
layers
.
l2_normalize
(
x
,
axis
=
1
)
return
output
def
test
_maxout
(
self
):
program
=
Program
()
with
program_guard
(
program
):
data
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
,
6
,
6
],
dtype
=
"float32"
)
def
make
_maxout
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
data
=
self
.
_get_
data
(
name
=
'x'
,
shape
=
[
8
,
6
,
6
],
dtype
=
"float32"
)
output
=
layers
.
maxout
(
x
=
data
,
groups
=
2
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
def
test_crop
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
5
],
dtype
=
"float32"
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
2
,
3
],
dtype
=
"float32"
)
return
(
output
)
def
make_crop
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
3
,
5
],
dtype
=
"float32"
)
y
=
self
.
_get_data
(
name
=
'y'
,
shape
=
[
2
,
3
],
dtype
=
"float32"
)
output
=
layers
.
crop
(
x
,
shape
=
y
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
def
test_mean_iou
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
16
],
dtype
=
'floa
t32'
)
y
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64
'
)
return
(
output
)
def
make_mean_iou
(
self
):
# TODO(minqiyang): support gpu ut
self
.
_force_to_use_cpu
=
True
with
fluid
.
framework
.
_dygraph_place_guard
(
place
=
fluid
.
CPUPlace
()
):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
16
],
dtype
=
'in
t32'
)
y
=
self
.
_get_data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int32
'
)
iou
=
layers
.
mean_iou
(
x
,
y
,
2
)
self
.
assertIsNotNone
(
iou
)
print
(
str
(
program
))
return
(
iou
)
def
test
_argsort
(
self
):
program
=
Program
()
with
program_guard
(
program
):
data
=
layers
.
data
(
name
=
'x'
,
shape
=
[
2
,
3
,
3
],
dtype
=
"float32"
)
def
make
_argsort
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
data
=
self
.
_get_
data
(
name
=
'x'
,
shape
=
[
2
,
3
,
3
],
dtype
=
"float32"
)
out
,
ids
=
layers
.
argsort
(
input
=
data
,
axis
=
1
)
self
.
assertIsNotNone
(
out
)
self
.
assertIsNotNone
(
ids
)
print
(
str
(
program
))
def
test_rank_loss
(
self
):
program
=
Program
()
with
program_guard
(
program
):
label
=
layers
.
data
(
return
(
out
)
return
(
ids
)
def
make_rank_loss
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
label
=
self
.
_get_data
(
name
=
'label'
,
append_batch_size
=
False
,
shape
=
[
16
,
1
],
dtype
=
"float32"
)
left
=
layers
.
data
(
left
=
self
.
_get_
data
(
name
=
'left'
,
append_batch_size
=
False
,
shape
=
[
16
,
1
],
dtype
=
"float32"
)
right
=
layers
.
data
(
right
=
self
.
_get_
data
(
name
=
'right'
,
append_batch_size
=
False
,
shape
=
[
16
,
1
],
dtype
=
"float32"
)
out
=
layers
.
rank_loss
(
label
,
left
,
right
,
name
=
"rank_loss"
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test_flatten
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
append_batch_size
=
False
,
shape
=
[
4
,
4
,
3
],
dtype
=
"float32"
)
out
=
layers
.
flatten
(
x
,
axis
=
1
,
name
=
"flatten"
)
self
.
assertIsNotNone
(
out
)
def
test_shape
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
def
make_shape
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
input
=
self
.
_get_data
(
name
=
"input"
,
shape
=
[
3
,
100
,
100
],
dtype
=
"float32"
)
out
=
layers
.
shape
(
input
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_pad2d
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
def
make
_pad2d
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
3
,
100
,
100
],
dtype
=
"float32"
)
paddings
=
layers
.
fill_constant
(
shape
=
[
4
],
dtype
=
'int32'
,
value
=
1
)
out
=
layers
.
pad2d
(
...
...
@@ -1275,14 +1109,13 @@ class TestBook(unittest.TestCase):
mode
=
'reflect'
,
data_format
=
'NCHW'
,
name
=
"shape"
)
self
.
assertIsNotNone
(
out
)
self
.
assertIsNotNone
(
out_1
)
print
(
str
(
program
))
return
(
out
)
return
(
out_1
)
def
test
_prelu
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
def
make
_prelu
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
5
,
200
,
100
,
100
],
dtype
=
"float32"
)
mode
=
'channel'
out
=
layers
.
prelu
(
...
...
@@ -1290,291 +1123,365 @@ class TestBook(unittest.TestCase):
mode
,
param_attr
=
ParamAttr
(
initializer
=
Constant
(
1.0
)),
name
=
'prelu'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_brelu
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_brelu
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
brelu
(
input
,
t_min
=
1.0
,
t_max
=
20.0
,
name
=
'brelu'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_leaky_relu
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_leaky_relu
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
leaky_relu
(
input
,
alpha
=
0.1
,
name
=
'leaky_relu'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_soft_relu
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_soft_relu
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
soft_relu
(
input
,
threshold
=
30.0
,
name
=
'soft_relu'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_sigmoid
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_sigmoid
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
sigmoid
(
input
,
name
=
'sigmoid'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_logsigmoid
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_logsigmoid
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
logsigmoid
(
input
,
name
=
'logsigmoid'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_exp
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_exp
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
exp
(
input
,
name
=
'exp'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_tanh
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_tanh
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
tanh
(
input
,
name
=
'tanh'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_tanh_shrink
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_tanh_shrink
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
tanh_shrink
(
input
,
name
=
'tanh_shrink'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_sqrt
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_sqrt
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
sqrt
(
input
,
name
=
'sqrt'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_abs
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_abs
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
abs
(
input
,
name
=
'abs'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_ceil
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_ceil
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
ceil
(
input
,
name
=
'ceil'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_floor
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_floor
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
floor
(
input
,
name
=
'floor'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_cos
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_cos
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
cos
(
input
,
name
=
'cos'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_sin
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_sin
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
sin
(
input
,
name
=
'sin'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_round
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_round
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
round
(
input
,
name
=
'round'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_reciprocal
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_reciprocal
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
reciprocal
(
input
,
name
=
'reciprocal'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_square
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_square
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
square
(
input
,
name
=
'square'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_softplus
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_softplus
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
softplus
(
input
,
name
=
'softplus'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_softsign
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_softsign
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
softsign
(
input
,
name
=
'softsign'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_roi_perspective_transform
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
256
,
30
,
30
],
dtype
=
"float32"
)
rois
=
layers
.
data
(
name
=
"rois"
,
shape
=
[
8
],
dtype
=
"float32"
,
lod_level
=
1
)
output
=
layers
.
roi_perspective_transform
(
x
,
rois
,
7
,
7
,
0.6
)
self
.
assertIsNotNone
(
output
)
print
(
str
(
program
))
return
(
out
)
def
test_sequence_enumerate
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"input"
,
shape
=
[
1
],
dtype
=
'int32'
,
lod_level
=
1
)
out
=
layers
.
sequence_enumerate
(
input
=
x
,
win_size
=
2
,
pad_value
=
0
)
print
(
str
(
program
))
def
test_cross_entropy
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
30
,
10
],
dtype
=
"float32"
)
label
=
layers
.
data
(
name
=
"label"
,
shape
=
[
30
,
1
],
dtype
=
"int32"
)
def
make_cross_entropy
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
"x"
,
shape
=
[
30
,
10
],
dtype
=
"float32"
)
label
=
self
.
_get_data
(
name
=
"label"
,
shape
=
[
30
,
1
],
dtype
=
"int64"
)
mode
=
'channel'
out
=
layers
.
cross_entropy
(
x
,
label
,
False
,
4
)
self
.
assertIsNotNone
(
out
)
return
(
out
)
def
test
_bpr_loss
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
30
,
10
],
dtype
=
"float32"
)
label
=
layers
.
data
(
name
=
"label"
,
shape
=
[
30
,
1
],
dtype
=
"int32
"
)
def
make
_bpr_loss
(
self
):
self
.
_force_to_use_cpu
=
True
with
fluid
.
framework
.
_dygraph_place_guard
(
place
=
fluid
.
CPUPlace
()
):
x
=
self
.
_get_
data
(
name
=
"x"
,
shape
=
[
30
,
10
],
dtype
=
"float32"
)
label
=
self
.
_get_data
(
name
=
"label"
,
shape
=
[
30
,
1
],
dtype
=
"int64
"
)
out
=
layers
.
bpr_loss
(
x
,
label
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_expand
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"input"
,
shape
=
[
10
],
dtype
=
'int32'
)
def
make
_expand
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
x
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
10
],
dtype
=
'int32'
)
out
=
layers
.
expand
(
x
,
[
1
,
2
])
print
(
str
(
program
))
return
out
def
test_uniform_random_batch_size_like
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
def
make_uniform_random_batch_size_like
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
input
=
self
.
_get_data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
out
=
layers
.
uniform_random_batch_size_like
(
input
,
[
-
1
,
11
])
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_gaussian_random
(
self
):
program
=
Program
()
with
program_guard
(
program
):
def
make
_gaussian_random
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
out
=
layers
.
gaussian_random
(
shape
=
[
20
,
30
])
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_sampling_id
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
def
make
_sampling_id
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
x
=
self
.
_get_
data
(
name
=
"X"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
,
append_batch_size
=
False
)
out
=
layers
.
sampling_id
(
x
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test_gaussian_random_batch_size_like
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
def
make_gaussian_random_batch_size_like
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
input
=
self
.
_get_data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
out
=
layers
.
gaussian_random_batch_size_like
(
input
,
shape
=
[
-
1
,
11
],
mean
=
1.0
,
std
=
2.0
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test_sum
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
def
make_sum
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
input
=
self
.
_get_data
(
name
=
"input"
,
shape
=
[
13
,
11
],
dtype
=
'float32'
)
out
=
layers
.
sum
(
input
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
test
_slice
(
self
):
def
make
_slice
(
self
):
starts
=
[
1
,
0
,
2
]
ends
=
[
3
,
3
,
4
]
axes
=
[
0
,
1
,
2
]
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
3
,
4
,
5
,
6
],
dtype
=
'float32'
)
out
=
layers
.
slice
(
input
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
)
return
out
def
test
_softshrink
(
self
):
program
=
Program
()
with
program_guard
(
program
):
input
=
layers
.
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
def
make
_softshrink
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
input
=
self
.
_get_
data
(
name
=
"input"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
softshrink
(
input
,
name
=
'softshrink'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
iou_similarity
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
16
],
dtype
=
"float32"
)
y
=
layers
.
data
(
name
=
"y"
,
shape
=
[
16
],
dtype
=
"float32"
)
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()
):
x
=
self
.
_get_
data
(
name
=
"x"
,
shape
=
[
16
],
dtype
=
"float32"
)
y
=
self
.
_get_
data
(
name
=
"y"
,
shape
=
[
16
],
dtype
=
"float32"
)
out
=
layers
.
iou_similarity
(
x
,
y
,
name
=
'iou_similarity'
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_grid_sampler
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
5
,
7
],
dtype
=
'float32'
)
grid
=
layers
.
data
(
name
=
'grid'
,
shape
=
[
5
,
7
,
2
],
dtype
=
'float32'
)
return
(
out
)
def
make_grid_sampler
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
3
,
5
,
7
],
dtype
=
'float32'
)
grid
=
self
.
_get_data
(
name
=
'grid'
,
shape
=
[
5
,
7
,
2
],
dtype
=
'float32'
)
out
=
layers
.
grid_sampler
(
x
,
grid
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
return
(
out
)
def
make_bilinear_tensor_product_layer
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
data
=
self
.
_get_data
(
name
=
'data'
,
shape
=
[
4
],
dtype
=
"float32"
)
theta
=
self
.
_get_data
(
name
=
"theta"
,
shape
=
[
5
],
dtype
=
"float32"
)
out
=
layers
.
bilinear_tensor_product
(
data
,
theta
,
6
)
return
(
out
)
def
make_batch_norm
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
data
=
self
.
_get_data
(
name
=
'data'
,
shape
=
[
32
,
128
,
128
],
dtype
=
"float32"
)
out
=
layers
.
batch_norm
(
data
)
return
(
out
)
def
make_range
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
layers
.
range
(
0
,
10
,
2
,
'int32'
)
y
=
layers
.
range
(
0.1
,
10.0
,
0.2
,
'float32'
)
return
y
def
make_spectral_norm
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
weight
=
self
.
_get_data
(
name
=
'weight'
,
shape
=
[
2
,
3
,
32
,
32
],
dtype
=
"float32"
,
append_batch_size
=
False
)
out
=
layers
.
spectral_norm
(
weight
,
dim
=
1
,
power_iters
=
1
)
return
(
out
)
def
make_kldiv_loss
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
'x'
,
shape
=
[
32
,
128
,
128
],
dtype
=
"float32"
)
target
=
self
.
_get_data
(
name
=
'target'
,
shape
=
[
32
,
128
,
128
],
dtype
=
"float32"
)
loss
=
layers
.
kldiv_loss
(
x
=
x
,
target
=
target
,
reduction
=
'batchmean'
)
return
(
loss
)
def
make_temporal_shift
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
"X"
,
shape
=
[
16
,
4
,
4
],
dtype
=
"float32"
)
out
=
layers
.
temporal_shift
(
x
,
seg_num
=
2
,
shift_ratio
=
0.2
)
return
(
out
)
def
make_shuffle_channel
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
"X"
,
shape
=
[
16
,
4
,
4
],
dtype
=
"float32"
)
out
=
layers
.
shuffle_channel
(
x
,
group
=
4
)
return
(
out
)
def
make_fsp
(
self
):
with
program_guard
(
fluid
.
default_main_program
(),
fluid
.
default_startup_program
()):
x
=
self
.
_get_data
(
name
=
"X"
,
shape
=
[
16
,
4
,
4
],
dtype
=
"float32"
)
y
=
self
.
_get_data
(
name
=
"Y"
,
shape
=
[
8
,
4
,
4
],
dtype
=
"float32"
)
out
=
layers
.
fsp_matrix
(
x
,
y
)
return
(
out
)
def
test_dynamic_lstmp
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
hidden_dim
,
proj_dim
=
16
,
8
seq_data
=
layers
.
data
(
name
=
'seq_data'
,
shape
=
[
10
,
10
],
dtype
=
'float32'
,
lod_level
=
1
)
fc_out
=
layers
.
fc
(
input
=
seq_data
,
size
=
4
*
hidden_dim
)
self
.
assertIsNotNone
(
layers
.
dynamic_lstmp
(
input
=
fc_out
,
size
=
4
*
hidden_dim
,
proj_size
=
proj_dim
))
def
test_linear_chain_crf
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
label_dict_len
=
10
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
784
],
dtype
=
'float32'
)
label
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int32'
)
hidden
=
layers
.
fc
(
input
=
images
,
size
=
2
)
crf
=
layers
.
linear_chain_crf
(
input
=
hidden
,
label
=
label
,
param_attr
=
ParamAttr
(
name
=
"crfw"
))
crf_decode
=
layers
.
crf_decoding
(
input
=
hidden
,
param_attr
=
ParamAttr
(
name
=
"crfw"
))
self
.
assertFalse
(
crf
is
None
)
self
.
assertFalse
(
crf_decode
is
None
)
return
layers
.
chunk_eval
(
input
=
crf_decode
,
label
=
label
,
chunk_scheme
=
"IOB"
,
num_chunk_types
=
(
label_dict_len
-
1
)
//
2
)
def
test_im2sequence
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
128
,
128
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[],
dtype
=
'float32'
)
output
=
layers
.
im2sequence
(
input
=
x
,
input_image_size
=
y
,
stride
=
[
1
,
1
],
filter_size
=
[
2
,
2
],
out_stride
=
[
1
,
1
])
return
(
output
)
def
test_lod_reset
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
10
,
20
],
dtype
=
'float32'
,
lod_level
=
2
)
return
(
layers
.
lod_reset
(
x
=
x
,
y
=
y
))
def
test_affine_grid
(
self
):
program
=
Program
()
with
program_guard
(
program
):
with
self
.
static_graph
():
data
=
layers
.
data
(
name
=
'data'
,
shape
=
[
2
,
3
,
3
],
dtype
=
"float32"
)
out
,
ids
=
layers
.
argsort
(
input
=
data
,
axis
=
1
)
...
...
@@ -1586,81 +1493,153 @@ class TestBook(unittest.TestCase):
self
.
assertIsNotNone
(
data_0
)
self
.
assertIsNotNone
(
data_1
)
print
(
str
(
program
))
def
test_bilinear_tensor_product_layer
(
self
):
program
=
Program
()
with
program_guard
(
program
):
data
=
layers
.
data
(
name
=
'data'
,
shape
=
[
4
],
dtype
=
"float32"
)
def
test_psroi_pool
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
245
,
30
,
30
],
dtype
=
"float32"
)
rois
=
layers
.
data
(
name
=
"rois"
,
shape
=
[
4
],
dtype
=
"float32"
,
lod_level
=
1
)
output
=
layers
.
psroi_pool
(
x
,
rois
,
5
,
0.25
,
7
,
7
)
return
(
output
)
theta
=
layers
.
data
(
name
=
"theta"
,
shape
=
[
5
],
dtype
=
"float32"
)
out
=
layers
.
bilinear_tensor_product
(
data
,
theta
,
6
)
def
test_sequence_expand
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
],
dtype
=
'float32'
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
10
,
20
],
dtype
=
'float32'
,
lod_level
=
2
)
return
(
layers
.
sequence_expand
(
x
=
x
,
y
=
y
,
ref_level
=
1
))
print
(
str
(
program
))
def
test_sequence_reshape
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
],
dtype
=
'float32'
,
lod_level
=
1
)
out
=
layers
.
sequence_reshape
(
input
=
x
,
new_dim
=
16
)
return
(
out
)
def
test_
batch_norm
(
self
):
program
=
Program
()
with
program_guard
(
program
):
data
=
layers
.
data
(
name
=
'data'
,
shape
=
[
32
,
128
,
128
],
dtype
=
"float32"
)
out
=
layers
.
batch_norm
(
data
)
def
test_
sequence_unpad
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
(
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
,
5
],
dtype
=
'float32'
)
length
=
layers
.
data
(
name
=
'length'
,
shape
=
[
1
],
dtype
=
'int64'
)
return
(
layers
.
sequence_unpad
(
x
=
x
,
length
=
length
)
)
print
(
str
(
program
))
def
test_sequence_softmax
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
seq_data
=
layers
.
data
(
name
=
'seq_data'
,
shape
=
[
10
,
10
],
dtype
=
'float32'
,
lod_level
=
1
)
seq
=
layers
.
fc
(
input
=
seq_data
,
size
=
20
)
return
(
layers
.
sequence_softmax
(
seq
))
def
test_range
(
self
):
program
=
Program
()
with
program_guard
(
program
):
layers
.
range
(
0
,
10
,
2
,
'int32'
)
layers
.
range
(
0.1
,
10.0
,
0.2
,
'float32'
)
def
test_sequence_unsqueeze
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
8
,
2
],
dtype
=
'float32'
)
out
=
layers
.
unsqueeze
(
input
=
x
,
axes
=
[
1
])
return
(
out
)
print
(
str
(
program
))
def
test_sequence_scatter
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
3
,
6
],
append_batch_size
=
False
,
dtype
=
'float32'
)
idx
=
layers
.
data
(
name
=
'idx'
,
shape
=
[
12
,
1
],
append_batch_size
=
False
,
dtype
=
'int32'
,
lod_level
=
1
)
updates
=
layers
.
data
(
name
=
'updates'
,
shape
=
[
12
,
1
],
append_batch_size
=
False
,
dtype
=
'float32'
,
lod_level
=
1
)
out
=
layers
.
sequence_scatter
(
input
=
x
,
index
=
idx
,
updates
=
updates
)
return
(
out
)
def
test_spectral_norm
(
self
):
program
=
Program
()
with
program_guard
(
program
):
weight
=
layers
.
data
(
name
=
'weight'
,
shape
=
[
2
,
3
,
32
,
32
],
dtype
=
"float32"
,
append_batch_size
=
False
)
out
=
layers
.
spectral_norm
(
weight
,
dim
=
1
,
power_iters
=
1
)
self
.
assertIsNotNone
(
out
)
def
test_sequence_slice
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
import
numpy
as
np
seqs
=
layers
.
data
(
name
=
'x'
,
shape
=
[
10
,
5
],
dtype
=
'float32'
,
lod_level
=
1
)
offset
=
layers
.
assign
(
input
=
np
.
array
([[
0
,
1
]]).
astype
(
'int32'
))
length
=
layers
.
assign
(
input
=
np
.
array
([[
2
,
1
]]).
astype
(
'int32'
))
out
=
layers
.
sequence_slice
(
input
=
seqs
,
offset
=
offset
,
length
=
length
)
return
(
out
)
def
test_kldiv_loss
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
32
,
128
,
128
],
dtype
=
"float32"
)
target
=
layers
.
data
(
name
=
'target'
,
shape
=
[
32
,
128
,
128
],
dtype
=
"float32"
)
loss
=
layers
.
kldiv_loss
(
x
=
x
,
target
=
target
,
reduction
=
'batchmean'
)
self
.
assertIsNotNone
(
loss
)
print
(
str
(
program
))
def
test_temporal_shift
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"X"
,
shape
=
[
16
,
4
,
4
],
dtype
=
"float32"
)
out
=
layers
.
temporal_shift
(
x
,
seg_num
=
4
,
shift_ratio
=
0.2
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_shuffle_channel
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"X"
,
shape
=
[
16
,
4
,
4
],
dtype
=
"float32"
)
out
=
layers
.
shuffle_channel
(
x
,
group
=
4
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_fsp
(
self
):
program
=
Program
()
with
program_guard
(
program
):
x
=
layers
.
data
(
name
=
"X"
,
shape
=
[
16
,
4
,
4
],
dtype
=
"float32"
)
y
=
layers
.
data
(
name
=
"Y"
,
shape
=
[
8
,
4
,
4
],
dtype
=
"float32"
)
out
=
layers
.
fsp_matrix
(
x
,
y
)
self
.
assertIsNotNone
(
out
)
print
(
str
(
program
))
def
test_roi_pool
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
256
,
30
,
30
],
dtype
=
"float32"
)
rois
=
layers
.
data
(
name
=
"rois"
,
shape
=
[
4
],
dtype
=
"float32"
,
lod_level
=
1
)
output
=
layers
.
roi_pool
(
x
,
rois
,
7
,
7
,
0.6
)
return
(
output
)
def
test_sequence_enumerate
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
"input"
,
shape
=
[
1
],
dtype
=
'int32'
,
lod_level
=
1
)
out
=
layers
.
sequence_enumerate
(
input
=
x
,
win_size
=
2
,
pad_value
=
0
)
def
test_roi_align
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
256
,
30
,
30
],
dtype
=
"float32"
)
rois
=
layers
.
data
(
name
=
"rois"
,
shape
=
[
4
],
dtype
=
"float32"
,
lod_level
=
1
)
output
=
layers
.
roi_align
(
x
,
rois
,
14
,
14
,
0.5
,
2
)
return
(
output
)
def
test_roi_perspective_transform
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
"x"
,
shape
=
[
256
,
30
,
30
],
dtype
=
"float32"
)
rois
=
layers
.
data
(
name
=
"rois"
,
shape
=
[
8
],
dtype
=
"float32"
,
lod_level
=
1
)
output
=
layers
.
roi_perspective_transform
(
x
,
rois
,
7
,
7
,
0.6
)
return
(
output
)
def
test_row_conv
(
self
):
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
16
],
dtype
=
'float32'
,
lod_level
=
1
)
out
=
layers
.
row_conv
(
input
=
x
,
future_context_size
=
2
)
return
(
out
)
def
test_simple_conv2d
(
self
):
# TODO(minqiyang): dygraph do not support layers with param now
with
self
.
static_graph
():
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
48
,
48
],
dtype
=
'float32'
)
return
layers
.
conv2d
(
input
=
images
,
num_filters
=
3
,
filter_size
=
[
4
,
4
])
def
test_squeeze
(
self
):
# TODO(minqiyang): dygraph do not support layers with param now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
1
,
1
,
4
],
dtype
=
'float32'
)
out
=
layers
.
squeeze
(
input
=
x
,
axes
=
[
2
])
return
(
out
)
def
test_flatten
(
self
):
# TODO(minqiyang): dygraph do not support op without kernel now
with
self
.
static_graph
():
x
=
layers
.
data
(
name
=
'x'
,
append_batch_size
=
False
,
shape
=
[
4
,
4
,
3
],
dtype
=
"float32"
)
out
=
layers
.
flatten
(
x
,
axis
=
1
,
name
=
"flatten"
)
return
(
out
)
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录