Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
e6ca78c2
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e6ca78c2
编写于
3月 13, 2023
作者:
W
wanghuancoder
提交者:
GitHub
3月 13, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Del old dygraph optest2 (#51458)
* delete old dygraph op test
上级
48090c72
变更
18
隐藏空白更改
内联
并排
Showing
18 changed file
with
1017 addition
and
1048 deletion
+1017
-1048
python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py
...fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py
+1
-0
python/paddle/fluid/tests/unittests/test_activation_op.py
python/paddle/fluid/tests/unittests/test_activation_op.py
+917
-938
python/paddle/fluid/tests/unittests/test_atan2_op.py
python/paddle/fluid/tests/unittests/test_atan2_op.py
+3
-4
python/paddle/fluid/tests/unittests/test_attention_lstm_op.py
...on/paddle/fluid/tests/unittests/test_attention_lstm_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_auc_op.py
python/paddle/fluid/tests/unittests/test_auc_op.py
+3
-3
python/paddle/fluid/tests/unittests/test_auc_single_pred_op.py
...n/paddle/fluid/tests/unittests/test_auc_single_pred_op.py
+3
-3
python/paddle/fluid/tests/unittests/test_batch_fc_op.py
python/paddle/fluid/tests/unittests/test_batch_fc_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_bernoulli_op.py
python/paddle/fluid/tests/unittests/test_bernoulli_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py
...on/paddle/fluid/tests/unittests/test_bicubic_interp_op.py
+3
-9
python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py
...paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py
+3
-9
python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py
...n/paddle/fluid/tests/unittests/test_bilateral_slice_op.py
+22
-21
python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py
...addle/fluid/tests/unittests/test_bilinear_interp_v2_op.py
+6
-8
python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py
.../fluid/tests/unittests/test_bilinear_tensor_product_op.py
+26
-25
python/paddle/fluid/tests/unittests/test_bincount_op.py
python/paddle/fluid/tests/unittests/test_bincount_op.py
+2
-2
python/paddle/fluid/tests/unittests/test_bipartite_match_op.py
...n/paddle/fluid/tests/unittests/test_bipartite_match_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_bmm_op.py
python/paddle/fluid/tests/unittests/test_bmm_op.py
+22
-20
python/paddle/fluid/tests/unittests/test_box_clip_op.py
python/paddle/fluid/tests/unittests/test_box_clip_op.py
+1
-1
未找到文件。
python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py
浏览文件 @
e6ca78c2
...
...
@@ -437,6 +437,7 @@ class TestMKLDNNExpOp(TestActivation):
# Check if primitives already exist in backward
class
TestMKLDNNAbsPrimitivesAlreadyExist
(
unittest
.
TestCase
):
def
setUp
(
self
):
paddle
.
enable_static
()
super
().
setUp
()
np
.
random
.
seed
(
123
)
...
...
python/paddle/fluid/tests/unittests/test_activation_op.py
浏览文件 @
e6ca78c2
...
...
@@ -17,7 +17,7 @@ import unittest
import
warnings
import
numpy
as
np
from
op_test
import
OpTest
,
convert_float_to_uint16
from
eager_op_test
import
OpTest
,
convert_float_to_uint16
,
paddle_static_guard
from
scipy.special
import
erf
,
expit
import
paddle
...
...
@@ -28,25 +28,24 @@ import paddle.static as static
from
paddle.fluid
import
Program
,
program_guard
from
paddle.fluid.layer_helper
import
LayerHelper
paddle
.
enable_static
()
class
TestSqrtOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
program_guard
(
Program
(),
Program
()):
# The input type of sqrt op must be Variable or numpy.ndarray.
in1
=
1
self
.
assertRaises
(
TypeError
,
paddle
.
sqrt
,
in1
)
# The input dtype of sqrt op must be float16, float32, float64.
in2
=
paddle
.
static
.
data
(
name
=
'input2'
,
shape
=
[
-
1
,
12
,
10
],
dtype
=
"int32"
)
self
.
assertRaises
(
TypeError
,
paddle
.
sqrt
,
in2
)
with
paddle_static_guard
():
with
program_guard
(
Program
(),
Program
()):
# The input type of sqrt op must be Variable or numpy.ndarray.
in1
=
1
self
.
assertRaises
(
TypeError
,
paddle
.
sqrt
,
in1
)
# The input dtype of sqrt op must be float16, float32, float64.
in2
=
paddle
.
static
.
data
(
name
=
'input2'
,
shape
=
[
-
1
,
12
,
10
],
dtype
=
"int32"
)
self
.
assertRaises
(
TypeError
,
paddle
.
sqrt
,
in2
)
in3
=
paddle
.
static
.
data
(
name
=
'input3'
,
shape
=
[
-
1
,
12
,
10
],
dtype
=
"float16"
)
paddle
.
sqrt
(
x
=
in3
)
in3
=
paddle
.
static
.
data
(
name
=
'input3'
,
shape
=
[
-
1
,
12
,
10
],
dtype
=
"float16"
)
paddle
.
sqrt
(
x
=
in3
)
class
TestActivation
(
OpTest
):
...
...
@@ -56,7 +55,6 @@ class TestActivation(OpTest):
self
.
init_dtype
()
self
.
init_shape
()
self
.
init_kernel_type
()
self
.
check_eager
=
True
self
.
python_api
=
paddle
.
exp
np
.
random
.
seed
(
2049
)
...
...
@@ -67,18 +65,15 @@ class TestActivation(OpTest):
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
check_eager
=
False
if
hasattr
(
self
,
'check_eager'
):
check_eager
=
self
.
check_eager
self
.
check_output
(
check_eager
=
check_eager
)
self
.
check_output
()
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
check_eager
=
False
if
hasattr
(
self
,
'check_eager'
):
check_eager
=
self
.
check_eager
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
check_eager
)
self
.
check_grad
(
[
'X'
],
'Out'
,
)
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float64
...
...
@@ -155,10 +150,10 @@ class TestExpm1(TestActivation):
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestExpm1_ZeroDim
(
TestExpm1
):
...
...
@@ -181,14 +176,13 @@ class TestExpm1API(unittest.TestCase):
self
.
place
.
append
(
paddle
.
CUDAPlace
(
0
))
def
test_static_api
(
self
):
paddle
.
enable_static
()
def
run
(
place
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
X
=
paddle
.
fluid
.
data
(
'X'
,
self
.
shape
,
dtype
=
self
.
dtype
)
out
=
paddle
.
expm1
(
X
)
exe
=
paddle
.
static
.
Executor
(
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x
})
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
X
=
paddle
.
fluid
.
data
(
'X'
,
self
.
shape
,
dtype
=
self
.
dtype
)
out
=
paddle
.
expm1
(
X
)
exe
=
paddle
.
static
.
Executor
(
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x
})
for
r
in
res
:
np
.
testing
.
assert_allclose
(
self
.
out_ref
,
r
,
rtol
=
1e-05
)
...
...
@@ -197,36 +191,35 @@ class TestExpm1API(unittest.TestCase):
def
test_dygraph_api
(
self
):
def
run
(
place
):
paddle
.
disable_static
(
place
)
X
=
paddle
.
to_tensor
(
self
.
x
)
out
=
paddle
.
expm1
(
X
)
np
.
testing
.
assert_allclose
(
self
.
out_ref
,
out
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
for
place
in
self
.
place
:
run
(
place
)
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
X
=
paddle
.
fluid
.
data
(
'X'
,
self
.
shape
,
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
paddle
.
expm1
,
X
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
X
=
paddle
.
fluid
.
data
(
'X'
,
self
.
shape
,
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
paddle
.
expm1
,
X
)
# The input dtype must be float16, float32, float64.
class
TestParameter
:
def
test_out_name
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
if
paddle
.
fluid
.
framework
.
in_dygraph_mode
():
paddle
.
enable_static
()
np_x
=
np
.
array
([
0.1
]).
astype
(
'float32'
).
reshape
((
-
1
,
1
))
data
=
paddle
.
static
.
data
(
name
=
"X"
,
shape
=
[
-
1
,
1
],
dtype
=
"float32"
)
out
=
eval
(
"paddle.%s(data, name='Y')"
%
self
.
op_type
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
(
result
,)
=
exe
.
run
(
feed
=
{
"X"
:
np_x
},
fetch_list
=
[
out
])
expected
=
eval
(
"np.%s(np_x)"
%
self
.
op_type
)
np
.
testing
.
assert_allclose
(
result
,
expected
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
fluid
.
program_guard
(
fluid
.
Program
()):
np_x
=
np
.
array
([
0.1
]).
astype
(
'float32'
).
reshape
((
-
1
,
1
))
data
=
paddle
.
static
.
data
(
name
=
"X"
,
shape
=
[
-
1
,
1
],
dtype
=
"float32"
)
out
=
eval
(
"paddle.%s(data, name='Y')"
%
self
.
op_type
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
(
result
,)
=
exe
.
run
(
feed
=
{
"X"
:
np_x
},
fetch_list
=
[
out
])
expected
=
eval
(
"np.%s(np_x)"
%
self
.
op_type
)
np
.
testing
.
assert_allclose
(
result
,
expected
,
rtol
=
1e-05
)
def
test_dygraph
(
self
):
with
fluid
.
dygraph
.
guard
():
...
...
@@ -357,20 +350,19 @@ class TestSiluAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
11
,
17
])
out1
=
F
.
silu
(
x
)
m
=
paddle
.
nn
.
Silu
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
self
.
x_np
/
(
1
+
np
.
exp
(
-
self
.
x_np
))
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
11
,
17
])
out1
=
F
.
silu
(
x
)
m
=
paddle
.
nn
.
Silu
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
self
.
x_np
/
(
1
+
np
.
exp
(
-
self
.
x_np
))
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
silu
(
x
)
m
=
paddle
.
nn
.
Silu
()
...
...
@@ -378,27 +370,28 @@ class TestSiluAPI(unittest.TestCase):
out_ref
=
self
.
x_np
/
(
1
+
np
.
exp
(
-
self
.
x_np
))
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
silu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
11
,
17
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
silu
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
11
,
17
],
dtype
=
'float16'
)
F
.
silu
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
silu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
11
,
17
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
silu
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
11
,
17
],
dtype
=
'float16'
)
F
.
silu
(
x_fp16
)
class
TestLogSigmoid
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"logsigmoid"
self
.
python_api
=
paddle
.
nn
.
functional
.
log_sigmoid
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -432,20 +425,19 @@ class TestLogSigmoidAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
11
,
17
])
out1
=
F
.
log_sigmoid
(
x
)
m
=
paddle
.
nn
.
LogSigmoid
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
np
.
log
(
1
/
(
1
+
np
.
exp
(
-
self
.
x_np
)))
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
11
,
17
])
out1
=
F
.
log_sigmoid
(
x
)
m
=
paddle
.
nn
.
LogSigmoid
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
np
.
log
(
1
/
(
1
+
np
.
exp
(
-
self
.
x_np
)))
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
log_sigmoid
(
x
)
m
=
paddle
.
nn
.
LogSigmoid
()
...
...
@@ -453,28 +445,28 @@ class TestLogSigmoidAPI(unittest.TestCase):
out_ref
=
np
.
log
(
1
/
(
1
+
np
.
exp
(
-
self
.
x_np
)))
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
log_sigmoid
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
11
,
17
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
log_sigmoid
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
11
,
17
],
dtype
=
'float16'
)
F
.
log_sigmoid
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
log_sigmoid
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
11
,
17
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
log_sigmoid
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
11
,
17
],
dtype
=
'float16'
)
F
.
log_sigmoid
(
x_fp16
)
class
TestTanh
(
TestActivation
,
TestParameter
):
def
setUp
(
self
):
self
.
op_type
=
"tanh"
self
.
python_api
=
paddle
.
tanh
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -519,20 +511,19 @@ class TestTanhAPI(unittest.TestCase):
self
.
tanh
=
F
.
tanh
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
],
self
.
dtype
)
out1
=
self
.
tanh
(
x
)
th
=
paddle
.
nn
.
Tanh
()
out2
=
th
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
np
.
tanh
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
],
self
.
dtype
)
out1
=
self
.
tanh
(
x
)
th
=
paddle
.
nn
.
Tanh
()
out2
=
th
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
np
.
tanh
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
tanh
(
x
)
out2
=
paddle
.
tanh
(
x
)
...
...
@@ -541,23 +532,22 @@ class TestTanhAPI(unittest.TestCase):
out_ref
=
np
.
tanh
(
self
.
x_np
)
for
r
in
[
out1
,
out2
,
out3
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
self
.
tanh
,
1
)
# The input dtype must be float16, float32.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
self
.
tanh
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
self
.
tanh
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
self
.
tanh
,
1
)
# The input dtype must be float16, float32.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
self
.
tanh
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
self
.
tanh
(
x_fp16
)
class
TestTanhInplaceAPI
(
TestTanhAPI
):
...
...
@@ -569,6 +559,7 @@ class TestTanhInplaceAPI(TestTanhAPI):
class
TestAtan
(
TestActivation
,
TestParameter
):
def
setUp
(
self
):
self
.
op_type
=
"atan"
self
.
python_api
=
paddle
.
atan
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -585,15 +576,18 @@ class TestAtan(TestActivation, TestParameter):
self
.
check_grad
([
'X'
],
'Out'
)
def
test_out_name
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
np_x
=
np
.
array
([
0.1
]).
astype
(
'float32'
).
reshape
((
-
1
,
1
))
data
=
paddle
.
static
.
data
(
name
=
"X"
,
shape
=
[
-
1
,
1
],
dtype
=
"float32"
)
out
=
paddle
.
atan
(
data
,
name
=
'Y'
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
(
result
,)
=
exe
.
run
(
feed
=
{
"X"
:
np_x
},
fetch_list
=
[
out
])
expected
=
np
.
arctan
(
np_x
)
self
.
assertEqual
(
result
,
expected
)
with
paddle_static_guard
():
with
fluid
.
program_guard
(
fluid
.
Program
()):
np_x
=
np
.
array
([
0.1
]).
astype
(
'float32'
).
reshape
((
-
1
,
1
))
data
=
paddle
.
static
.
data
(
name
=
"X"
,
shape
=
[
-
1
,
1
],
dtype
=
"float32"
)
out
=
paddle
.
atan
(
data
,
name
=
'Y'
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
(
result
,)
=
exe
.
run
(
feed
=
{
"X"
:
np_x
},
fetch_list
=
[
out
])
expected
=
np
.
arctan
(
np_x
)
self
.
assertEqual
(
result
,
expected
)
def
test_dygraph
(
self
):
with
fluid
.
dygraph
.
guard
():
...
...
@@ -612,6 +606,7 @@ class TestAtan_ZeroDim(TestTanh):
class
TestSinh
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"sinh"
self
.
python_api
=
paddle
.
sinh
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -643,28 +638,29 @@ class TestSinhAPI(unittest.TestCase):
np
.
testing
.
assert_allclose
(
z
,
z_expected
,
rtol
=
1e-05
)
def
test_api
(
self
):
test_data_shape
=
[
11
,
17
]
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
input_x
=
np
.
random
.
uniform
(
0.1
,
1
,
test_data_shape
).
astype
(
"float32"
)
data_x
=
paddle
.
static
.
data
(
name
=
"data_x"
,
shape
=
test_data_shape
,
dtype
=
"float32"
,
)
with
paddle_static_guard
():
test_data_shape
=
[
11
,
17
]
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
input_x
=
np
.
random
.
uniform
(
0.1
,
1
,
test_data_shape
).
astype
(
"float32"
)
data_x
=
paddle
.
static
.
data
(
name
=
"data_x"
,
shape
=
test_data_shape
,
dtype
=
"float32"
,
)
pd_sinh_out
=
paddle
.
sinh
(
data_x
)
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
(
np_sinh_res
,)
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"data_x"
:
input_x
},
fetch_list
=
[
pd_sinh_out
],
)
pd_sinh_out
=
paddle
.
sinh
(
data_x
)
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
(
np_sinh_res
,)
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"data_x"
:
input_x
},
fetch_list
=
[
pd_sinh_out
],
)
expected_res
=
np
.
sinh
(
input_x
)
np
.
testing
.
assert_allclose
(
np_sinh_res
,
expected_res
,
rtol
=
1e-05
)
expected_res
=
np
.
sinh
(
input_x
)
np
.
testing
.
assert_allclose
(
np_sinh_res
,
expected_res
,
rtol
=
1e-05
)
def
test_backward
(
self
):
test_data_shape
=
[
11
,
17
]
...
...
@@ -682,20 +678,26 @@ class TestSinhAPI(unittest.TestCase):
class
TestSinhOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
program_guard
(
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
paddle
.
sinh
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
paddle
.
sinh
,
x_int32
)
# support the input dtype is float16
x_fp16
=
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
paddle
.
sinh
(
x_fp16
)
with
paddle_static_guard
():
with
program_guard
(
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
paddle
.
sinh
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
paddle
.
sinh
,
x_int32
)
# support the input dtype is float16
x_fp16
=
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
paddle
.
sinh
(
x_fp16
)
class
TestCosh
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"cosh"
self
.
python_api
=
paddle
.
cosh
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -727,28 +729,29 @@ class TestCoshAPI(unittest.TestCase):
np
.
testing
.
assert_allclose
(
z
,
z_expected
,
rtol
=
1e-05
)
def
test_api
(
self
):
test_data_shape
=
[
11
,
17
]
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
input_x
=
np
.
random
.
uniform
(
0.1
,
1
,
test_data_shape
).
astype
(
"float32"
)
data_x
=
paddle
.
static
.
data
(
name
=
"data_x"
,
shape
=
test_data_shape
,
dtype
=
"float32"
,
)
with
paddle_static_guard
():
test_data_shape
=
[
11
,
17
]
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
input_x
=
np
.
random
.
uniform
(
0.1
,
1
,
test_data_shape
).
astype
(
"float32"
)
data_x
=
paddle
.
static
.
data
(
name
=
"data_x"
,
shape
=
test_data_shape
,
dtype
=
"float32"
,
)
pd_cosh_out
=
paddle
.
cosh
(
data_x
)
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
(
np_cosh_res
,)
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"data_x"
:
input_x
},
fetch_list
=
[
pd_cosh_out
],
)
pd_cosh_out
=
paddle
.
cosh
(
data_x
)
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
(
np_cosh_res
,)
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"data_x"
:
input_x
},
fetch_list
=
[
pd_cosh_out
],
)
expected_res
=
np
.
cosh
(
input_x
)
np
.
testing
.
assert_allclose
(
np_cosh_res
,
expected_res
,
rtol
=
1e-05
)
expected_res
=
np
.
cosh
(
input_x
)
np
.
testing
.
assert_allclose
(
np_cosh_res
,
expected_res
,
rtol
=
1e-05
)
def
test_backward
(
self
):
test_data_shape
=
[
11
,
17
]
...
...
@@ -766,15 +769,20 @@ class TestCoshAPI(unittest.TestCase):
class
TestCoshOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
program_guard
(
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
paddle
.
cosh
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
paddle
.
cosh
,
x_int32
)
# support the input dtype is float16
x_fp16
=
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
paddle
.
cosh
(
x_fp16
)
with
paddle_static_guard
():
with
program_guard
(
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
paddle
.
cosh
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
paddle
.
cosh
,
x_int32
)
# support the input dtype is float16
x_fp16
=
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
paddle
.
cosh
(
x_fp16
)
def
ref_tanhshrink
(
x
):
...
...
@@ -785,6 +793,7 @@ def ref_tanhshrink(x):
class
TestTanhshrink
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"tanh_shrink"
self
.
python_api
=
paddle
.
nn
.
functional
.
tanhshrink
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -818,20 +827,19 @@ class TestTanhshrinkAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
tanhshrink
(
x
)
tanhshrink
=
paddle
.
nn
.
Tanhshrink
()
out2
=
tanhshrink
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_tanhshrink
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
tanhshrink
(
x
)
tanhshrink
=
paddle
.
nn
.
Tanhshrink
()
out2
=
tanhshrink
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_tanhshrink
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
tanhshrink
(
x
)
tanhshrink
=
paddle
.
nn
.
Tanhshrink
()
...
...
@@ -839,23 +847,22 @@ class TestTanhshrinkAPI(unittest.TestCase):
out_ref
=
ref_tanhshrink
(
self
.
x_np
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
tanhshrink
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
tanhshrink
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
tanhshrink
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
tanhshrink
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
tanhshrink
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
tanhshrink
(
x_fp16
)
def
ref_hardshrink
(
x
,
threshold
):
...
...
@@ -867,6 +874,7 @@ def ref_hardshrink(x, threshold):
class
TestHardShrink
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"hard_shrink"
self
.
python_api
=
paddle
.
nn
.
functional
.
hardshrink
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -917,20 +925,19 @@ class TestHardShrinkAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out1
=
F
.
hardshrink
(
x
)
hd
=
paddle
.
nn
.
Hardshrink
()
out2
=
hd
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_hardshrink
(
self
.
x_np
,
0.5
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out1
=
F
.
hardshrink
(
x
)
hd
=
paddle
.
nn
.
Hardshrink
()
out2
=
hd
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_hardshrink
(
self
.
x_np
,
0.5
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
hardshrink
(
x
)
hd
=
paddle
.
nn
.
Hardshrink
()
...
...
@@ -945,23 +952,22 @@ class TestHardShrinkAPI(unittest.TestCase):
out_ref
=
ref_hardshrink
(
self
.
x_np
,
0.6
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
hardshrink
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
hardshrink
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
hardshrink
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
hardshrink
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
hardshrink
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
hardshrink
(
x_fp16
)
def
ref_hardtanh
(
x
,
min
=-
1.0
,
max
=
1.0
):
...
...
@@ -984,20 +990,19 @@ class TestHardtanhAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out1
=
F
.
hardtanh
(
x
)
m
=
paddle
.
nn
.
Hardtanh
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_hardtanh
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out1
=
F
.
hardtanh
(
x
)
m
=
paddle
.
nn
.
Hardtanh
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_hardtanh
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
hardtanh
(
x
)
m
=
paddle
.
nn
.
Hardtanh
()
...
...
@@ -1012,23 +1017,22 @@ class TestHardtanhAPI(unittest.TestCase):
out_ref
=
ref_hardtanh
(
self
.
x_np
,
-
2.0
,
2.0
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
hardtanh
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
hardtanh
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
hardtanh
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
hardtanh
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
hardtanh
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
hardtanh
(
x_fp16
)
def
ref_softshrink
(
x
,
threshold
=
0.5
):
...
...
@@ -1042,7 +1046,6 @@ def ref_softshrink(x, threshold=0.5):
class
TestSoftshrink
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"softshrink"
self
.
check_eager
=
True
self
.
python_api
=
paddle
.
nn
.
functional
.
softshrink
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -1059,7 +1062,7 @@ class TestSoftshrink(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestSoftshrink_ZeroDim
(
TestSoftshrink
):
...
...
@@ -1080,20 +1083,19 @@ class TestSoftshrinkAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
softshrink
(
x
,
self
.
threshold
)
softshrink
=
paddle
.
nn
.
Softshrink
(
self
.
threshold
)
out2
=
softshrink
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_softshrink
(
self
.
x_np
,
self
.
threshold
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
softshrink
(
x
,
self
.
threshold
)
softshrink
=
paddle
.
nn
.
Softshrink
(
self
.
threshold
)
out2
=
softshrink
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_softshrink
(
self
.
x_np
,
self
.
threshold
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
softshrink
(
x
,
self
.
threshold
)
softshrink
=
paddle
.
nn
.
Softshrink
(
self
.
threshold
)
...
...
@@ -1101,28 +1103,27 @@ class TestSoftshrinkAPI(unittest.TestCase):
out_ref
=
ref_softshrink
(
self
.
x_np
,
self
.
threshold
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
softshrink
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
softshrink
,
x_int32
)
# The threshold must be no less than zero
x_fp32
=
paddle
.
fluid
.
data
(
name
=
'x_fp32'
,
shape
=
[
12
,
10
],
dtype
=
'float32'
)
self
.
assertRaises
(
ValueError
,
F
.
softshrink
,
x_fp32
,
-
1.0
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
softshrink
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
softshrink
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
softshrink
,
x_int32
)
# The threshold must be no less than zero
x_fp32
=
paddle
.
fluid
.
data
(
name
=
'x_fp32'
,
shape
=
[
12
,
10
],
dtype
=
'float32'
)
self
.
assertRaises
(
ValueError
,
F
.
softshrink
,
x_fp32
,
-
1.0
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
softshrink
(
x_fp16
)
class
TestSqrt
(
TestActivation
,
TestParameter
):
...
...
@@ -1145,10 +1146,10 @@ class TestSqrt(TestActivation, TestParameter):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestSqrtPrimFp32
(
TestActivation
):
...
...
@@ -1169,10 +1170,10 @@ class TestSqrtPrimFp32(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
True
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float32
...
...
@@ -1227,13 +1228,11 @@ class TestSqrtBF16(OpTest):
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_eager
=
True
)
self
.
check_output_with_place
(
place
)
def
test_check_grad
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_eager
=
True
,
check_prim
=
True
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_prim
=
True
)
class
TestRsqrt
(
TestActivation
):
...
...
@@ -1256,9 +1255,7 @@ class TestRsqrt(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
(
[
'X'
],
'Out'
,
max_relative_error
=
0.0005
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
max_relative_error
=
0.0005
)
'''
...
...
@@ -1296,7 +1293,7 @@ class TestAbs(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
False
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestAbs_ZeroDim
(
TestAbs
):
...
...
@@ -1307,7 +1304,6 @@ class TestAbs_ZeroDim(TestAbs):
class
TestCeil
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"ceil"
self
.
check_eager
=
True
self
.
python_api
=
paddle
.
ceil
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -1336,7 +1332,6 @@ class TestFloor(TestActivation):
def
setUp
(
self
):
self
.
op_type
=
"floor"
self
.
prim_op_type
=
"prim"
self
.
check_eager
=
True
self
.
python_api
=
paddle
.
floor
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -1367,7 +1362,6 @@ class TestFloor_Prim(TestActivation):
def
setUp
(
self
):
self
.
op_type
=
"floor"
self
.
prim_op_type
=
"prim"
self
.
check_eager
=
True
self
.
python_api
=
paddle
.
floor
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -1407,6 +1401,7 @@ class TestFloorFp16_Prim(TestFloor_Prim):
class
TestCos
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"cos"
self
.
python_api
=
paddle
.
cos
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -1435,6 +1430,7 @@ class TestTan(TestActivation):
def
setUp
(
self
):
np
.
random
.
seed
(
1024
)
self
.
op_type
=
"tan"
self
.
python_api
=
paddle
.
tan
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -1477,22 +1473,20 @@ class TestTanAPI(unittest.TestCase):
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out_test
=
paddle
.
tan
(
x
)
out_ref
=
np
.
tan
(
self
.
x_np
)
np
.
testing
.
assert_allclose
(
out_ref
,
out_test
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
static
.
data
(
'X'
,
[
11
,
17
],
self
.
dtype
)
out
=
paddle
.
tan
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
np
.
tan
(
self
.
x_np
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
static
.
data
(
'X'
,
[
11
,
17
],
self
.
dtype
)
out
=
paddle
.
tan
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
np
.
tan
(
self
.
x_np
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-05
)
def
test_backward
(
self
):
test_data_shape
=
[
11
,
17
]
...
...
@@ -1511,6 +1505,7 @@ class TestTanAPI(unittest.TestCase):
class
TestAcos
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"acos"
self
.
python_api
=
paddle
.
acos
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -1538,6 +1533,7 @@ class TestAcos_ZeroDim(TestAcos):
class
TestSin
(
TestActivation
,
TestParameter
):
def
setUp
(
self
):
self
.
op_type
=
"sin"
self
.
python_api
=
paddle
.
sin
self
.
init_dtype
()
self
.
init_shape
()
# prim not support now
...
...
@@ -1567,6 +1563,7 @@ class TestSin_ZeroDim(TestSin):
class
TestAsin
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"asin"
self
.
python_api
=
paddle
.
asin
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -1594,6 +1591,7 @@ class TestAsin_ZeroDim(TestAsin):
class
TestAcosh
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"acosh"
self
.
python_api
=
paddle
.
acosh
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -1621,6 +1619,7 @@ class TestAcosh_ZeroDim(TestAcosh):
class
TestAsinh
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"asinh"
self
.
python_api
=
paddle
.
asinh
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -1648,6 +1647,7 @@ class TestAsinh_ZeroDim(TestAsinh):
class
TestAtanh
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"atanh"
self
.
python_api
=
paddle
.
atanh
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -1675,7 +1675,6 @@ class TestAtanh_ZeroDim(TestAtanh):
class
TestRound
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"round"
self
.
check_eager
=
True
self
.
python_api
=
paddle
.
round
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -1760,20 +1759,19 @@ class TestReluAPI(unittest.TestCase):
self
.
relu
=
F
.
relu
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out1
=
self
.
relu
(
x
)
m
=
paddle
.
nn
.
ReLU
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
np
.
maximum
(
self
.
x_np
,
0
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out1
=
self
.
relu
(
x
)
m
=
paddle
.
nn
.
ReLU
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
np
.
maximum
(
self
.
x_np
,
0
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
m
=
paddle
.
nn
.
ReLU
()
out1
=
m
(
x
)
...
...
@@ -1781,23 +1779,23 @@ class TestReluAPI(unittest.TestCase):
out_ref
=
np
.
maximum
(
self
.
x_np
,
0
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
self
.
relu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
10
,
12
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
self
.
relu
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
10
,
12
],
dtype
=
'float16'
)
self
.
relu
(
x_fp16
)
with
paddle_static_guard
():
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
self
.
relu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
10
,
12
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
self
.
relu
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
10
,
12
],
dtype
=
'float16'
)
self
.
relu
(
x_fp16
)
class
TestReluInplaceAPI
(
TestReluAPI
):
...
...
@@ -1818,6 +1816,7 @@ class TestLeakyRelu(TestActivation):
def
setUp
(
self
):
self
.
op_type
=
"leaky_relu"
self
.
python_api
=
paddle
.
nn
.
functional
.
leaky_relu
self
.
init_dtype
()
self
.
init_shape
()
alpha
=
self
.
get_alpha
()
...
...
@@ -1870,20 +1869,19 @@ class TestLeakyReluAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out1
=
F
.
leaky_relu
(
x
)
m
=
paddle
.
nn
.
LeakyReLU
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_leaky_relu
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out1
=
F
.
leaky_relu
(
x
)
m
=
paddle
.
nn
.
LeakyReLU
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_leaky_relu
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
leaky_relu
(
x
)
m
=
paddle
.
nn
.
LeakyReLU
()
...
...
@@ -1898,23 +1896,22 @@ class TestLeakyReluAPI(unittest.TestCase):
out_ref
=
ref_leaky_relu
(
self
.
x_np
,
0.6
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
leaky_relu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
leaky_relu
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
leaky_relu
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
leaky_relu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
leaky_relu
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
leaky_relu
(
x_fp16
)
def
gelu
(
x
,
approximate
):
...
...
@@ -2007,20 +2004,19 @@ class TestGELUAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
11
,
17
])
out1
=
F
.
gelu
(
x
)
m
=
paddle
.
nn
.
GELU
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
gelu
(
self
.
x_np
,
False
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
11
,
17
])
out1
=
F
.
gelu
(
x
)
m
=
paddle
.
nn
.
GELU
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
gelu
(
self
.
x_np
,
False
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
gelu
(
x
)
m
=
paddle
.
nn
.
GELU
()
...
...
@@ -2035,28 +2031,28 @@ class TestGELUAPI(unittest.TestCase):
out_ref
=
gelu
(
self
.
x_np
,
True
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
gelu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
11
,
17
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
gelu
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
11
,
17
],
dtype
=
'float16'
)
F
.
gelu
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
gelu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
11
,
17
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
gelu
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
11
,
17
],
dtype
=
'float16'
)
F
.
gelu
(
x_fp16
)
class
TestBRelu
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"brelu"
self
.
python_api
=
paddle
.
nn
.
functional
.
hardtanh
self
.
init_dtype
()
np
.
random
.
seed
(
1024
)
...
...
@@ -2109,7 +2105,7 @@ class TestRelu6(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestRelu6_ZeroDim
(
TestRelu6
):
...
...
@@ -2130,20 +2126,19 @@ class TestRelu6API(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
relu6
(
x
)
relu6
=
paddle
.
nn
.
ReLU6
()
out2
=
relu6
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_relu6
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
relu6
(
x
)
relu6
=
paddle
.
nn
.
ReLU6
()
out2
=
relu6
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_relu6
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
relu6
(
x
)
relu6
=
paddle
.
nn
.
ReLU6
()
...
...
@@ -2151,57 +2146,59 @@ class TestRelu6API(unittest.TestCase):
out_ref
=
ref_relu6
(
self
.
x_np
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
paddle
.
enable_static
()
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
paddle
.
nn
.
functional
.
relu6
(
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_relu6
(
self
.
x_np
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-05
)
with
paddle_static_guard
():
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
paddle
.
nn
.
functional
.
relu6
(
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_relu6
(
self
.
x_np
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-05
)
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
relu6
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
relu6
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
relu6
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
relu6
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
relu6
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
relu6
(
x_fp16
)
class
TestRelu6APIWarnings
(
unittest
.
TestCase
):
def
test_warnings
(
self
):
with
warnings
.
catch_warnings
(
record
=
True
)
as
context
:
warnings
.
simplefilter
(
"always"
)
with
paddle_static_guard
():
with
warnings
.
catch_warnings
(
record
=
True
)
as
context
:
warnings
.
simplefilter
(
"always"
)
paddle
.
enable_static
()
helper
=
LayerHelper
(
"relu6"
)
data
=
paddle
.
static
.
data
(
name
=
'data'
,
shape
=
[
None
,
3
,
32
,
32
],
dtype
=
'float32'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
data
.
dtype
)
os
.
environ
[
'FLAGS_print_extra_attrs'
]
=
"1"
helper
.
append_op
(
type
=
"relu6"
,
inputs
=
{
'X'
:
data
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'threshold'
:
6.0
},
)
self
.
assertTrue
(
"op relu6 use extra_attr: threshold"
in
str
(
context
[
-
1
].
message
)
)
os
.
environ
[
'FLAGS_print_extra_attrs'
]
=
"0"
helper
=
LayerHelper
(
"relu6"
)
data
=
paddle
.
static
.
data
(
name
=
'data'
,
shape
=
[
None
,
3
,
32
,
32
],
dtype
=
'float32'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
data
.
dtype
)
os
.
environ
[
'FLAGS_print_extra_attrs'
]
=
"1"
helper
.
append_op
(
type
=
"relu6"
,
inputs
=
{
'X'
:
data
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'threshold'
:
6.0
},
)
self
.
assertTrue
(
"op relu6 use extra_attr: threshold"
in
str
(
context
[
-
1
].
message
)
)
os
.
environ
[
'FLAGS_print_extra_attrs'
]
=
"0"
def
ref_hardswish
(
x
,
threshold
=
6.0
,
scale
=
6.0
,
offset
=
3.0
):
...
...
@@ -2247,13 +2244,12 @@ class TestHardSwish(TestActivation):
self
.
check_grad
(
[
'X'
],
'Out'
,
check_eager
=
True
,
check_prim
=
True
,
only_check_prim
=
self
.
if_only_check_prim
(),
)
def
test_check_output
(
self
):
self
.
check_output
(
check_
eager
=
True
,
check_
prim
=
True
)
self
.
check_output
(
check_prim
=
True
)
class
TestHardSwish_ZeroDim
(
TestHardSwish
):
...
...
@@ -2276,19 +2272,19 @@ class TestHardswishAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
hardswish
(
x
)
m
=
paddle
.
nn
.
Hardswish
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_hardswish
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
hardswish
(
x
)
m
=
paddle
.
nn
.
Hardswish
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_hardswish
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
([
11648.0
,
11448.0
])
out1
=
F
.
hardswish
(
x
)
m
=
paddle
.
nn
.
Hardswish
()
...
...
@@ -2296,37 +2292,36 @@ class TestHardswishAPI(unittest.TestCase):
out_ref
=
[
11648.0
,
11448.0
]
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()
):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
paddle
.
nn
.
functional
.
hardswish
(
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
]
)
out_ref
=
ref_hardswish
(
self
.
x_np
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-05
)
paddle
.
disable_static
(
self
.
place
)
with
paddle_static_guard
(
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
paddle
.
nn
.
functional
.
hardswish
(
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
]
)
out_ref
=
ref_hardswish
(
self
.
x_np
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-05
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out
=
paddle
.
nn
.
functional
.
hardswish
(
x
)
np
.
testing
.
assert_allclose
(
out_ref
,
out
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
hardswish
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
hardswish
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
hardswish
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
hardswish
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
hardswish
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
hardswish
(
x_fp16
)
class
TestSoftRelu
(
TestActivation
):
...
...
@@ -2365,6 +2360,7 @@ class TestELU(TestActivation):
self
.
op_type
=
"elu"
self
.
init_dtype
()
self
.
init_shape
()
self
.
python_api
=
paddle
.
nn
.
functional
.
elu
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
-
3
,
3
,
self
.
shape
).
astype
(
self
.
dtype
)
...
...
@@ -2414,20 +2410,19 @@ class TestELUAPI(unittest.TestCase):
self
.
elu
=
F
.
elu
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out1
=
self
.
elu
(
x
)
m
=
paddle
.
nn
.
ELU
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
elu
(
self
.
x_np
,
1.0
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out1
=
self
.
elu
(
x
)
m
=
paddle
.
nn
.
ELU
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
elu
(
self
.
x_np
,
1.0
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
self
.
elu
(
x
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
...
...
@@ -2444,23 +2439,22 @@ class TestELUAPI(unittest.TestCase):
out_ref
=
elu
(
self
.
x_np
,
0.2
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
self
.
elu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
10
,
12
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
self
.
elu
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
10
,
12
],
dtype
=
'float16'
)
self
.
elu
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
self
.
elu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
10
,
12
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
self
.
elu
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
10
,
12
],
dtype
=
'float16'
)
self
.
elu
(
x_fp16
)
class
TestELUInplaceAPI
(
TestELUAPI
):
...
...
@@ -2469,10 +2463,8 @@ class TestELUInplaceAPI(TestELUAPI):
self
.
elu
=
F
.
elu_
def
test_alpha_error
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
self
.
assertRaises
(
Exception
,
F
.
elu_
,
x
,
-
0.2
)
paddle
.
enable_static
()
def
celu
(
x
,
alpha
):
...
...
@@ -2501,7 +2493,7 @@ class TestCELU(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestCELU_ZeroDim
(
TestCELU
):
...
...
@@ -2525,20 +2517,19 @@ class TestCELUAPI(unittest.TestCase):
self
.
celu
=
F
.
celu
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out1
=
self
.
celu
(
x
,
1.5
)
m
=
paddle
.
nn
.
CELU
(
1.5
)
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
celu
(
self
.
x_np
,
1.5
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out1
=
self
.
celu
(
x
,
1.5
)
m
=
paddle
.
nn
.
CELU
(
1.5
)
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
celu
(
self
.
x_np
,
1.5
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
self
.
celu
(
x
,
1.5
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
...
...
@@ -2555,28 +2546,27 @@ class TestCELUAPI(unittest.TestCase):
out_ref
=
celu
(
self
.
x_np
,
0.2
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
self
.
celu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
10
,
12
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
self
.
celu
,
x_int32
)
# The alpha must be not equal 0
x_fp32
=
paddle
.
fluid
.
data
(
name
=
'x_fp32'
,
shape
=
[
10
,
12
],
dtype
=
'float32'
)
self
.
assertRaises
(
ZeroDivisionError
,
F
.
celu
,
x_fp32
,
0
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
10
,
12
],
dtype
=
'float16'
)
self
.
celu
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
self
.
celu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
10
,
12
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
self
.
celu
,
x_int32
)
# The alpha must be not equal 0
x_fp32
=
paddle
.
fluid
.
data
(
name
=
'x_fp32'
,
shape
=
[
10
,
12
],
dtype
=
'float32'
)
self
.
assertRaises
(
ZeroDivisionError
,
F
.
celu
,
x_fp32
,
0
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
10
,
12
],
dtype
=
'float16'
)
self
.
celu
(
x_fp16
)
class
TestReciprocal
(
TestActivation
):
...
...
@@ -2596,10 +2586,10 @@ class TestReciprocal(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
max_relative_error
=
0.01
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
max_relative_error
=
0.01
)
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestReciprocal_ZeroDim
(
TestReciprocal
):
...
...
@@ -2610,7 +2600,6 @@ class TestReciprocal_ZeroDim(TestReciprocal):
class
TestLog
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"log"
self
.
check_eager
=
True
self
.
prim_op_type
=
"prim"
self
.
python_api
=
paddle
.
log
self
.
init_dtype
()
...
...
@@ -2630,29 +2619,35 @@ class TestLog(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
True
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
def
test_error
(
self
):
in1
=
paddle
.
static
.
data
(
name
=
"in1"
,
shape
=
[
11
,
17
],
dtype
=
"int32"
)
in2
=
paddle
.
static
.
data
(
name
=
"in2"
,
shape
=
[
11
,
17
],
dtype
=
"int64"
)
with
paddle_static_guard
():
with
paddle_static_guard
():
in1
=
paddle
.
static
.
data
(
name
=
"in1"
,
shape
=
[
11
,
17
],
dtype
=
"int32"
)
in2
=
paddle
.
static
.
data
(
name
=
"in2"
,
shape
=
[
11
,
17
],
dtype
=
"int64"
)
self
.
assertRaises
(
TypeError
,
paddle
.
log
,
in1
)
self
.
assertRaises
(
TypeError
,
paddle
.
log
,
in2
)
self
.
assertRaises
(
TypeError
,
paddle
.
log
,
in1
)
self
.
assertRaises
(
TypeError
,
paddle
.
log
,
in2
)
class
Test_Log_Op_Fp16
(
unittest
.
TestCase
):
def
test_api_fp16
(
self
):
paddle
.
enable_static
()
with
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
x
=
[[
2
,
3
,
4
],
[
7
,
8
,
9
]]
x
=
paddle
.
to_tensor
(
x
,
dtype
=
'float16'
)
out
=
paddle
.
log
(
x
)
if
core
.
is_compiled_with_cuda
():
place
=
paddle
.
CUDAPlace
(
0
)
exe
=
paddle
.
static
.
Executor
(
place
)
(
res
,)
=
exe
.
run
(
fetch_list
=
[
out
])
with
paddle_static_guard
():
with
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
x
=
[[
2
,
3
,
4
],
[
7
,
8
,
9
]]
x
=
paddle
.
to_tensor
(
x
,
dtype
=
'float16'
)
out
=
paddle
.
log
(
x
)
if
core
.
is_compiled_with_cuda
():
place
=
paddle
.
CUDAPlace
(
0
)
exe
=
paddle
.
static
.
Executor
(
place
)
(
res
,)
=
exe
.
run
(
fetch_list
=
[
out
])
class
TestLog_ZeroDim
(
TestLog
):
...
...
@@ -2663,7 +2658,6 @@ class TestLog_ZeroDim(TestLog):
class
TestLog2
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"log2"
self
.
check_eager
=
True
self
.
python_api
=
paddle
.
log2
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -2677,34 +2671,36 @@ class TestLog2(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
def
test_error
(
self
):
in1
=
paddle
.
static
.
data
(
name
=
"in1"
,
shape
=
[
11
,
17
],
dtype
=
"int32"
)
in2
=
paddle
.
static
.
data
(
name
=
"in2"
,
shape
=
[
11
,
17
],
dtype
=
"int64"
)
with
paddle_static_guard
():
in1
=
paddle
.
static
.
data
(
name
=
"in1"
,
shape
=
[
11
,
17
],
dtype
=
"int32"
)
in2
=
paddle
.
static
.
data
(
name
=
"in2"
,
shape
=
[
11
,
17
],
dtype
=
"int64"
)
self
.
assertRaises
(
TypeError
,
paddle
.
log2
,
in1
)
self
.
assertRaises
(
TypeError
,
paddle
.
log2
,
in2
)
self
.
assertRaises
(
TypeError
,
paddle
.
log2
,
in1
)
self
.
assertRaises
(
TypeError
,
paddle
.
log2
,
in2
)
def
test_api
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
input_x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
"float64"
)
data_x
=
paddle
.
static
.
data
(
name
=
"data_x"
,
shape
=
[
11
,
17
],
dtype
=
"float64"
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
input_x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
"float64"
)
data_x
=
paddle
.
static
.
data
(
name
=
"data_x"
,
shape
=
[
11
,
17
],
dtype
=
"float64"
)
out1
=
paddle
.
log2
(
data_x
)
exe
=
paddle
.
static
.
Executor
(
place
=
fluid
.
CPUPlace
())
exe
.
run
(
paddle
.
static
.
default_startup_program
())
(
res1
,)
=
exe
.
run
(
paddle
.
static
.
default_main_program
(),
feed
=
{
"data_x"
:
input_x
},
fetch_list
=
[
out1
],
)
expected_res
=
np
.
log2
(
input_x
)
np
.
testing
.
assert_allclose
(
res1
,
expected_res
,
rtol
=
1e-05
)
out1
=
paddle
.
log2
(
data_x
)
exe
=
paddle
.
static
.
Executor
(
place
=
fluid
.
CPUPlace
())
exe
.
run
(
paddle
.
static
.
default_startup_program
())
(
res1
,)
=
exe
.
run
(
paddle
.
static
.
default_main_program
(),
feed
=
{
"data_x"
:
input_x
},
fetch_list
=
[
out1
],
)
expected_res
=
np
.
log2
(
input_x
)
np
.
testing
.
assert_allclose
(
res1
,
expected_res
,
rtol
=
1e-05
)
# dygraph
with
fluid
.
dygraph
.
guard
():
...
...
@@ -2724,7 +2720,6 @@ class TestLog2_ZeroDim(TestLog2):
class
TestLog10
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"log10"
self
.
check_eager
=
True
self
.
python_api
=
paddle
.
log10
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -2738,7 +2733,7 @@ class TestLog10(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestLog10_ZeroDim
(
TestLog10
):
...
...
@@ -2748,31 +2743,33 @@ class TestLog10_ZeroDim(TestLog10):
class
TestLog10API
(
unittest
.
TestCase
):
def
test_error
(
self
):
in1
=
paddle
.
static
.
data
(
name
=
"in1"
,
shape
=
[
11
,
17
],
dtype
=
"int32"
)
in2
=
paddle
.
static
.
data
(
name
=
"in2"
,
shape
=
[
11
,
17
],
dtype
=
"int64"
)
with
paddle_static_guard
():
in1
=
paddle
.
static
.
data
(
name
=
"in1"
,
shape
=
[
11
,
17
],
dtype
=
"int32"
)
in2
=
paddle
.
static
.
data
(
name
=
"in2"
,
shape
=
[
11
,
17
],
dtype
=
"int64"
)
self
.
assertRaises
(
TypeError
,
paddle
.
log10
,
in1
)
self
.
assertRaises
(
TypeError
,
paddle
.
log10
,
in2
)
self
.
assertRaises
(
TypeError
,
paddle
.
log10
,
in1
)
self
.
assertRaises
(
TypeError
,
paddle
.
log10
,
in2
)
def
test_api
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
input_x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
"float64"
)
data_x
=
paddle
.
static
.
data
(
name
=
"data_x"
,
shape
=
[
11
,
17
],
dtype
=
"float64"
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
input_x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
"float64"
)
data_x
=
paddle
.
static
.
data
(
name
=
"data_x"
,
shape
=
[
11
,
17
],
dtype
=
"float64"
)
out1
=
paddle
.
log10
(
data_x
)
exe
=
paddle
.
static
.
Executor
(
place
=
paddle
.
CPUPlace
())
exe
.
run
(
paddle
.
static
.
default_startup_program
())
(
res1
,)
=
exe
.
run
(
paddle
.
static
.
default_main_program
(),
feed
=
{
"data_x"
:
input_x
},
fetch_list
=
[
out1
],
)
expected_res
=
np
.
log10
(
input_x
)
np
.
testing
.
assert_allclose
(
res1
,
expected_res
,
rtol
=
1e-05
)
out1
=
paddle
.
log10
(
data_x
)
exe
=
paddle
.
static
.
Executor
(
place
=
paddle
.
CPUPlace
())
exe
.
run
(
paddle
.
static
.
default_startup_program
())
(
res1
,)
=
exe
.
run
(
paddle
.
static
.
default_main_program
(),
feed
=
{
"data_x"
:
input_x
},
fetch_list
=
[
out1
],
)
expected_res
=
np
.
log10
(
input_x
)
np
.
testing
.
assert_allclose
(
res1
,
expected_res
,
rtol
=
1e-05
)
# dygraph
with
fluid
.
dygraph
.
guard
():
...
...
@@ -2787,7 +2784,6 @@ class TestLog10API(unittest.TestCase):
class
TestLog1p
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"log1p"
self
.
check_eager
=
True
self
.
python_api
=
paddle
.
log1p
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -2802,22 +2798,22 @@ class TestLog1p(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
Test_Log1p_Op_Fp16
(
unittest
.
TestCase
):
def
test_api_fp16
(
self
):
paddle
.
enable_static
()
with
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
x
=
[[
2
,
3
,
4
],
[
7
,
8
,
9
]]
x
=
paddle
.
to_tensor
(
x
,
dtype
=
'float16'
)
out
=
paddle
.
log1p
(
x
)
if
core
.
is_compiled_with_cuda
():
place
=
paddle
.
CUDAPlace
(
0
)
exe
=
paddle
.
static
.
Executor
(
place
)
(
res
,)
=
exe
.
run
(
fetch_list
=
[
out
])
with
paddle_static_guard
():
with
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
x
=
[[
2
,
3
,
4
],
[
7
,
8
,
9
]]
x
=
paddle
.
to_tensor
(
x
,
dtype
=
'float16'
)
out
=
paddle
.
log1p
(
x
)
if
core
.
is_compiled_with_cuda
():
place
=
paddle
.
CUDAPlace
(
0
)
exe
=
paddle
.
static
.
Executor
(
place
)
(
res
,)
=
exe
.
run
(
fetch_list
=
[
out
])
class
TestLog1p_ZeroDim
(
TestLog1p
):
...
...
@@ -2827,24 +2823,25 @@ class TestLog1p_ZeroDim(TestLog1p):
class
TestLog1pAPI
(
unittest
.
TestCase
):
def
test_api
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
input_x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
"float64"
)
data_x
=
paddle
.
static
.
data
(
name
=
"data_x"
,
shape
=
[
11
,
17
],
dtype
=
"float64"
,
)
with
paddle_static_guard
():
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
input_x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
"float64"
)
data_x
=
paddle
.
static
.
data
(
name
=
"data_x"
,
shape
=
[
11
,
17
],
dtype
=
"float64"
,
)
out1
=
paddle
.
log1p
(
data_x
)
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
(
res1
,)
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"data_x"
:
input_x
},
fetch_list
=
[
out1
],
)
expected_res
=
np
.
log1p
(
input_x
)
np
.
testing
.
assert_allclose
(
res1
,
expected_res
,
rtol
=
1e-05
)
out1
=
paddle
.
log1p
(
data_x
)
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
(
res1
,)
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"data_x"
:
input_x
},
fetch_list
=
[
out1
],
)
expected_res
=
np
.
log1p
(
input_x
)
np
.
testing
.
assert_allclose
(
res1
,
expected_res
,
rtol
=
1e-05
)
# dygraph
with
fluid
.
dygraph
.
guard
():
...
...
@@ -2873,12 +2870,10 @@ class TestSquare(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
(
[
'X'
],
'Out'
,
max_relative_error
=
0.007
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
max_relative_error
=
0.007
)
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestSquare_ZeroDim
(
TestSquare
):
...
...
@@ -2909,20 +2904,17 @@ class TestSquareBF16(OpTest):
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_eager
=
True
)
self
.
check_output_with_place
(
place
)
def
test_check_grad
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
numeric_grad_delta
=
0.5
,
check_eager
=
True
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
numeric_grad_delta
=
0.5
)
class
TestPow
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"pow"
self
.
python_api
=
paddle
.
pow
self
.
check_eager
=
True
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -2935,12 +2927,12 @@ class TestPow(TestActivation):
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
self
.
check_eager
)
self
.
check_output
()
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
self
.
check_eager
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestPow_ZeroDim
(
TestPow
):
...
...
@@ -2951,7 +2943,6 @@ class TestPow_ZeroDim(TestPow):
class
TestPow_factor_tensor
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"pow"
self
.
check_eager
=
False
self
.
python_api
=
paddle
.
pow
self
.
init_dtype
()
...
...
@@ -2961,43 +2952,46 @@ class TestPow_factor_tensor(TestActivation):
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
),
'FactorTensor'
:
np
.
array
([
3.0
]).
astype
(
"float32"
),
'FactorTensor'
:
np
.
array
([
3.0
]).
astype
(
self
.
dtype
),
}
self
.
attrs
=
{}
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
self
.
check_eager
)
self
.
check_output
()
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
self
.
check_eager
)
self
.
check_grad
([
'X'
],
'Out'
)
def
test_api
(
self
):
input
=
np
.
random
.
uniform
(
1
,
2
,
[
11
,
17
]).
astype
(
"float32"
)
x
=
paddle
.
static
.
data
(
name
=
"x"
,
shape
=
[
11
,
17
],
dtype
=
"float32"
)
res
=
paddle
.
static
.
data
(
name
=
"res"
,
shape
=
[
11
,
17
],
dtype
=
"float32"
)
factor_1
=
2.0
factor_2
=
fluid
.
layers
.
fill_constant
([
1
],
"float32"
,
3.0
)
out_1
=
paddle
.
pow
(
x
,
factor_1
)
out_2
=
paddle
.
pow
(
x
,
factor_2
)
out_4
=
paddle
.
pow
(
x
,
factor_1
,
name
=
'pow_res'
)
out_6
=
paddle
.
pow
(
x
,
factor_2
)
self
.
assertEqual
((
'pow_res'
in
out_4
.
name
),
True
)
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
res_1
,
res_2
,
res
,
res_6
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
input
},
fetch_list
=
[
out_1
,
out_2
,
res
,
out_6
],
)
with
paddle_static_guard
():
input
=
np
.
random
.
uniform
(
1
,
2
,
[
11
,
17
]).
astype
(
"float32"
)
x
=
paddle
.
static
.
data
(
name
=
"x"
,
shape
=
[
11
,
17
],
dtype
=
"float32"
)
res
=
paddle
.
static
.
data
(
name
=
"res"
,
shape
=
[
11
,
17
],
dtype
=
"float32"
)
factor_1
=
2.0
factor_2
=
fluid
.
layers
.
fill_constant
([
1
],
"float32"
,
3.0
)
out_1
=
paddle
.
pow
(
x
,
factor_1
)
out_2
=
paddle
.
pow
(
x
,
factor_2
)
out_4
=
paddle
.
pow
(
x
,
factor_1
,
name
=
'pow_res'
)
out_6
=
paddle
.
pow
(
x
,
factor_2
)
self
.
assertEqual
((
'pow_res'
in
out_4
.
name
),
True
)
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
res_1
,
res_2
,
res
,
res_6
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
input
},
fetch_list
=
[
out_1
,
out_2
,
res
,
out_6
],
)
assert
np
.
allclose
(
res_1
,
np
.
power
(
input
,
2
))
assert
np
.
allclose
(
res_2
,
np
.
power
(
input
,
3
))
assert
np
.
allclose
(
res_6
,
np
.
power
(
input
,
3
))
assert
np
.
allclose
(
res_1
,
np
.
power
(
input
,
2
))
assert
np
.
allclose
(
res_2
,
np
.
power
(
input
,
3
))
assert
np
.
allclose
(
res_6
,
np
.
power
(
input
,
3
))
def
ref_stanh
(
x
,
scale_a
=
0.67
,
scale_b
=
1.7159
):
...
...
@@ -3014,6 +3008,7 @@ class TestSTanh(TestActivation):
def
setUp
(
self
):
self
.
op_type
=
"stanh"
self
.
python_api
=
paddle
.
stanh
self
.
init_dtype
()
self
.
init_shape
()
...
...
@@ -3070,50 +3065,48 @@ class TestSTanhAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out
=
paddle
.
stanh
(
x
,
self
.
scale_a
,
self
.
scale_b
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_stanh
(
self
.
x_np
,
self
.
scale_a
,
self
.
scale_b
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out
=
paddle
.
stanh
(
x
,
self
.
scale_a
,
self
.
scale_b
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_stanh
(
self
.
x_np
,
self
.
scale_a
,
self
.
scale_b
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out
=
paddle
.
stanh
(
x
,
self
.
scale_a
,
self
.
scale_b
)
out_ref
=
ref_stanh
(
self
.
x_np
,
self
.
scale_a
,
self
.
scale_b
)
for
r
in
[
out
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
paddle
.
enable_static
()
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
[
10
,
12
])
out
=
paddle
.
stanh
(
x
,
self
.
scale_a
,
self
.
scale_b
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_stanh
(
self
.
x_np
,
self
.
scale_a
,
self
.
scale_b
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-05
)
with
paddle_static_guard
():
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
[
10
,
12
])
out
=
paddle
.
stanh
(
x
,
self
.
scale_a
,
self
.
scale_b
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_stanh
(
self
.
x_np
,
self
.
scale_a
,
self
.
scale_b
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-05
)
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
paddle
.
stanh
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
paddle
.
stanh
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
paddle
.
stanh
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
paddle
.
stanh
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
paddle
.
stanh
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
paddle
.
stanh
(
x_fp16
)
class
TestSTanhAPIScaleA
(
TestSTanhAPI
):
...
...
@@ -3152,17 +3145,13 @@ class TestSoftplus(TestActivation):
self
.
attrs
=
{
'beta'
:
beta
,
"threshold"
:
threshold
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
check_eager
=
True
def
init_shape
(
self
):
self
.
shape
=
[
10
,
12
]
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
if
hasattr
(
self
,
'check_eager'
):
check_eager
=
self
.
check_eager
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
check_eager
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestSoftplus_ZeroDim
(
TestSoftplus
):
...
...
@@ -3177,6 +3166,7 @@ class TestSoftplusBF16(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"softplus"
self
.
init_dtype
()
self
.
python_api
=
paddle
.
nn
.
functional
.
softplus
beta
=
2
threshold
=
15
...
...
@@ -3214,20 +3204,19 @@ class TestSoftplusAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
softplus
(
x
,
self
.
beta
,
self
.
threshold
)
softplus
=
paddle
.
nn
.
Softplus
(
self
.
beta
,
self
.
threshold
)
out2
=
softplus
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_softplus
(
self
.
x_np
,
self
.
beta
,
self
.
threshold
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
softplus
(
x
,
self
.
beta
,
self
.
threshold
)
softplus
=
paddle
.
nn
.
Softplus
(
self
.
beta
,
self
.
threshold
)
out2
=
softplus
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_softplus
(
self
.
x_np
,
self
.
beta
,
self
.
threshold
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
softplus
(
x
,
self
.
beta
,
self
.
threshold
)
softplus
=
paddle
.
nn
.
Softplus
(
self
.
beta
,
self
.
threshold
)
...
...
@@ -3235,23 +3224,22 @@ class TestSoftplusAPI(unittest.TestCase):
out_ref
=
ref_softplus
(
self
.
x_np
,
self
.
beta
,
self
.
threshold
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
softplus
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
softplus
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
softplus
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
softplus
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
softplus
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
softplus
(
x_fp16
)
def
ref_softsign
(
x
):
...
...
@@ -3279,7 +3267,7 @@ class TestSoftsign(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestSoftsign_ZeroDim
(
TestSoftsign
):
...
...
@@ -3299,20 +3287,19 @@ class TestSoftsignAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
softsign
(
x
)
softsign
=
paddle
.
nn
.
Softsign
()
out2
=
softsign
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_softsign
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
softsign
(
x
)
softsign
=
paddle
.
nn
.
Softsign
()
out2
=
softsign
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_softsign
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
softsign
(
x
)
softsign
=
paddle
.
nn
.
Softsign
()
...
...
@@ -3320,23 +3307,22 @@ class TestSoftsignAPI(unittest.TestCase):
out_ref
=
ref_softsign
(
self
.
x_np
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
softsign
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
softsign
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
softsign
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
softsign
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
softsign
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
softsign
(
x_fp16
)
def
ref_thresholded_relu
(
x
,
threshold
=
1.0
):
...
...
@@ -3349,6 +3335,7 @@ class TestThresholdedRelu(TestActivation):
self
.
op_type
=
"thresholded_relu"
self
.
init_dtype
()
self
.
init_shape
()
self
.
python_api
=
paddle
.
nn
.
functional
.
thresholded_relu
threshold
=
15
...
...
@@ -3388,20 +3375,19 @@ class TestThresholdedReluAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
thresholded_relu
(
x
,
self
.
threshold
)
thresholded_relu
=
paddle
.
nn
.
ThresholdedReLU
(
self
.
threshold
)
out2
=
thresholded_relu
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_thresholded_relu
(
self
.
x_np
,
self
.
threshold
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
thresholded_relu
(
x
,
self
.
threshold
)
thresholded_relu
=
paddle
.
nn
.
ThresholdedReLU
(
self
.
threshold
)
out2
=
thresholded_relu
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_thresholded_relu
(
self
.
x_np
,
self
.
threshold
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
thresholded_relu
(
x
,
self
.
threshold
)
thresholded_relu
=
paddle
.
nn
.
ThresholdedReLU
(
self
.
threshold
)
...
...
@@ -3409,23 +3395,22 @@ class TestThresholdedReluAPI(unittest.TestCase):
out_ref
=
ref_thresholded_relu
(
self
.
x_np
,
self
.
threshold
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
thresholded_relu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
thresholded_relu
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
thresholded_relu
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
thresholded_relu
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
thresholded_relu
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
thresholded_relu
(
x_fp16
)
def
ref_hardsigmoid
(
x
,
slope
=
0.166666666666667
,
offset
=
0.5
):
...
...
@@ -3440,6 +3425,7 @@ class TestHardSigmoid(TestActivation):
self
.
offset
=
0.5
self
.
set_attrs
()
self
.
init_shape
()
self
.
python_api
=
paddle
.
nn
.
functional
.
hardsigmoid
x
=
np
.
random
.
uniform
(
-
5
,
5
,
self
.
shape
).
astype
(
self
.
dtype
)
lower_threshold
=
-
self
.
offset
/
self
.
slope
...
...
@@ -3490,19 +3476,19 @@ class TestHardsigmoidAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
static
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
hardsigmoid
(
x
)
m
=
paddle
.
nn
.
Hardsigmoid
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_hardsigmoid
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
static
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
hardsigmoid
(
x
)
m
=
paddle
.
nn
.
Hardsigmoid
()
out2
=
m
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_hardsigmoid
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
hardsigmoid
(
x
)
m
=
paddle
.
nn
.
Hardsigmoid
()
...
...
@@ -3510,37 +3496,36 @@ class TestHardsigmoidAPI(unittest.TestCase):
out_ref
=
ref_hardsigmoid
(
self
.
x_np
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()
):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
paddle
.
nn
.
functional
.
hardsigmoid
(
x
,
slope
=
0.2
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
]
)
out_ref
=
ref_hardsigmoid
(
self
.
x_np
,
0.2
,
0.5
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-0
5
)
paddle
.
disable_static
(
self
.
place
)
with
paddle_static_guard
(
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
paddle
.
nn
.
functional
.
hardsigmoid
(
x
,
slope
=
0.2
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
]
)
out_ref
=
ref_hardsigmoid
(
self
.
x_np
,
0.2
,
0.
5
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-05
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out
=
paddle
.
nn
.
functional
.
hardsigmoid
(
x
,
slope
=
0.2
)
np
.
testing
.
assert_allclose
(
out_ref
,
out
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_errors
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
hardsigmoid
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
hardsigmoid
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
hardsigmoid
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
hardsigmoid
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
hardsigmoid
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
hardsigmoid
(
x_fp16
)
def
ref_swish
(
x
):
...
...
@@ -3555,8 +3540,6 @@ class TestSwish(TestActivation):
self
.
init_dtype
()
self
.
init_shape
()
self
.
check_eager
=
True
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
out
=
ref_swish
(
x
)
...
...
@@ -3570,10 +3553,10 @@ class TestSwish(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
check_eager
=
False
if
hasattr
(
self
,
'check_eager'
):
check_eager
=
self
.
check_eager
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
check_eager
)
self
.
check_grad
(
[
'X'
],
'Out'
,
)
class
TestSwish_ZeroDim
(
TestSwish
):
...
...
@@ -3593,20 +3576,19 @@ class TestSwishAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
static
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
swish
(
x
)
swish
=
paddle
.
nn
.
Swish
()
out2
=
swish
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_swish
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
static
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
swish
(
x
)
swish
=
paddle
.
nn
.
Swish
()
out2
=
swish
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_swish
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
swish
(
x
)
swish
=
paddle
.
nn
.
Swish
()
...
...
@@ -3614,33 +3596,32 @@ class TestSwishAPI(unittest.TestCase):
out_ref
=
ref_swish
(
self
.
x_np
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
paddle
.
enable_static
()
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
paddle
.
nn
.
functional
.
swish
(
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_swish
(
self
.
x_np
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-05
)
with
paddle_static_guard
():
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
paddle
.
nn
.
functional
.
swish
(
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_swish
(
self
.
x_np
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-05
)
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
swish
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
swish
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
swish
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
swish
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
swish
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
swish
(
x_fp16
)
def
ref_mish
(
x
,
threshold
=
20.0
):
...
...
@@ -3667,12 +3648,12 @@ class TestMish(TestActivation):
self
.
shape
=
[
10
,
12
]
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestMish_ZeroDim
(
TestMish
):
...
...
@@ -3692,20 +3673,19 @@ class TestMishAPI(unittest.TestCase):
)
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
static
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
mish
(
x
)
mish
=
paddle
.
nn
.
Mish
()
out2
=
mish
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_mish
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
static
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
mish
(
x
)
mish
=
paddle
.
nn
.
Mish
()
out2
=
mish
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_mish
(
self
.
x_np
)
for
r
in
res
:
np
.
testing
.
assert_allclose
(
out_ref
,
r
,
rtol
=
1e-05
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
mish
(
x
)
mish
=
paddle
.
nn
.
Mish
()
...
...
@@ -3713,33 +3693,32 @@ class TestMishAPI(unittest.TestCase):
out_ref
=
ref_mish
(
self
.
x_np
)
for
r
in
[
out1
,
out2
]:
np
.
testing
.
assert_allclose
(
out_ref
,
r
.
numpy
(),
rtol
=
1e-05
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
paddle
.
enable_static
()
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
paddle
.
nn
.
functional
.
mish
(
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_mish
(
self
.
x_np
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-05
)
with
paddle_static_guard
():
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
paddle
.
nn
.
functional
.
mish
(
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_mish
(
self
.
x_np
)
np
.
testing
.
assert_allclose
(
out_ref
,
res
[
0
],
rtol
=
1e-05
)
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
mish
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
mish
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
mish
(
x_fp16
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
mish
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
mish
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
mish
(
x_fp16
)
# ------------------ Test Cudnn Activation----------------------
...
...
python/paddle/fluid/tests/unittests/test_atan2_op.py
浏览文件 @
e6ca78c2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
import
paddle.fluid.core
as
core
...
...
@@ -44,10 +44,10 @@ class TestAtan2(OpTest):
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_grad
(
self
):
self
.
check_grad
([
'X1'
,
'X2'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X1'
,
'X2'
],
'Out'
)
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float64
...
...
@@ -67,7 +67,6 @@ class TestAtan2_float(TestAtan2):
self
.
inputs
[
'X2'
],
1
/
self
.
inputs
[
'X1'
].
size
,
),
check_eager
=
True
,
)
...
...
python/paddle/fluid/tests/unittests/test_attention_lstm_op.py
浏览文件 @
e6ca78c2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
test_fusion_lstm_op
import
ACTIVATION
,
fc
from
test_softmax_op
import
stable_softmax
...
...
python/paddle/fluid/tests/unittests/test_auc_op.py
浏览文件 @
e6ca78c2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
import
paddle.fluid
as
fluid
...
...
@@ -65,7 +65,7 @@ class TestAucOp(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
class
TestGlobalAucOp
(
OpTest
):
...
...
@@ -105,7 +105,7 @@ class TestGlobalAucOp(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
class
TestAucAPI
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/test_auc_single_pred_op.py
浏览文件 @
e6ca78c2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
paddle.fluid
import
metrics
...
...
@@ -66,7 +66,7 @@ class TestAucSinglePredOp(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
class
TestAucGlobalSinglePredOp
(
OpTest
):
...
...
@@ -109,7 +109,7 @@ class TestAucGlobalSinglePredOp(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_batch_fc_op.py
浏览文件 @
e6ca78c2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle.fluid.core
as
core
...
...
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
浏览文件 @
e6ca78c2
...
...
@@ -16,7 +16,7 @@ import os
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
_set_use_system_allocator
from
eager_
op_test
import
OpTest
,
_set_use_system_allocator
import
paddle
import
paddle.fluid
as
fluid
...
...
python/paddle/fluid/tests/unittests/test_bernoulli_op.py
浏览文件 @
e6ca78c2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
...
...
python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py
浏览文件 @
e6ca78c2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
import
paddle.fluid
as
fluid
...
...
@@ -135,8 +135,6 @@ class TestBicubicInterpOp(OpTest):
self
.
init_test_case
()
self
.
op_type
=
"bicubic_interp"
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self
.
check_eager
=
True
input_np
=
np
.
random
.
random
(
self
.
input_shape
).
astype
(
"float64"
)
if
self
.
data_layout
==
"NCHW"
:
...
...
@@ -165,10 +163,8 @@ class TestBicubicInterpOp(OpTest):
self
.
inputs
=
{
'X'
:
input_np
}
if
self
.
out_size
is
not
None
:
self
.
inputs
[
'OutSize'
]
=
self
.
out_size
self
.
check_eager
=
False
if
self
.
actual_shape
is
not
None
:
self
.
inputs
[
'OutSize'
]
=
self
.
actual_shape
self
.
check_eager
=
False
self
.
attrs
=
{
'out_h'
:
self
.
out_h
,
...
...
@@ -181,12 +177,10 @@ class TestBicubicInterpOp(OpTest):
self
.
outputs
=
{
'Out'
:
output_np
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
self
.
check_eager
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
[
'X'
],
'Out'
,
in_place
=
True
,
check_eager
=
self
.
check_eager
)
self
.
check_grad
([
'X'
],
'Out'
,
in_place
=
True
)
def
init_test_case
(
self
):
self
.
interp_method
=
'bicubic'
...
...
python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py
浏览文件 @
e6ca78c2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
import
paddle.fluid
as
fluid
...
...
@@ -186,8 +186,6 @@ class TestBicubicInterpOp(OpTest):
self
.
init_test_case
()
self
.
op_type
=
"bicubic_interp_v2"
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self
.
check_eager
=
True
input_np
=
np
.
random
.
random
(
self
.
input_shape
).
astype
(
"float64"
)
scale_h
=
0
scale_w
=
0
...
...
@@ -227,10 +225,8 @@ class TestBicubicInterpOp(OpTest):
self
.
inputs
=
{
'X'
:
input_np
}
if
self
.
out_size
is
not
None
:
self
.
inputs
[
'OutSize'
]
=
self
.
out_size
self
.
check_eager
=
False
if
self
.
actual_shape
is
not
None
:
self
.
inputs
[
'OutSize'
]
=
self
.
actual_shape
self
.
check_eager
=
False
self
.
attrs
=
{
'out_h'
:
self
.
out_h
,
...
...
@@ -249,12 +245,10 @@ class TestBicubicInterpOp(OpTest):
self
.
outputs
=
{
'Out'
:
output_np
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
self
.
check_eager
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
[
'X'
],
'Out'
,
in_place
=
True
,
check_eager
=
self
.
check_eager
)
self
.
check_grad
([
'X'
],
'Out'
,
in_place
=
True
)
def
init_test_case
(
self
):
self
.
interp_method
=
'bicubic'
...
...
python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py
浏览文件 @
e6ca78c2
...
...
@@ -16,7 +16,7 @@ import math
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_op_test
import
OpTest
,
paddle_static_guard
import
paddle
...
...
@@ -192,26 +192,27 @@ class TestBilateralSliceOp1(TestBilateralSliceOp):
class
TestBilateralSliceApi
(
unittest
.
TestCase
):
def
test_api
(
self
):
x
=
paddle
.
fluid
.
data
(
name
=
'x'
,
shape
=
[
None
,
3
,
25
,
15
],
dtype
=
'float32'
)
guide
=
paddle
.
fluid
.
data
(
name
=
'guide'
,
shape
=
[
None
,
25
,
15
],
dtype
=
'float32'
)
grid
=
paddle
.
fluid
.
data
(
name
=
'grid'
,
shape
=
[
None
,
None
,
8
,
5
,
3
],
dtype
=
'float32'
)
paddle
.
fluid
.
contrib
.
layers
.
bilateral_slice
(
x
,
guide
,
grid
,
False
)
if
not
paddle
.
fluid
.
is_compiled_with_cuda
():
return
with
paddle
.
fluid
.
dygraph
.
guard
():
x1
=
paddle
.
rand
([
3
,
1
,
50
,
30
])
guide1
=
paddle
.
rand
([
3
,
50
,
30
])
grid1
=
paddle
.
rand
([
3
,
2
,
2
,
5
,
3
])
paddle
.
fluid
.
contrib
.
bilateral_slice
(
x1
,
guide1
,
grid1
,
False
)
with
paddle_static_guard
():
x
=
paddle
.
fluid
.
data
(
name
=
'x'
,
shape
=
[
None
,
3
,
25
,
15
],
dtype
=
'float32'
)
guide
=
paddle
.
fluid
.
data
(
name
=
'guide'
,
shape
=
[
None
,
25
,
15
],
dtype
=
'float32'
)
grid
=
paddle
.
fluid
.
data
(
name
=
'grid'
,
shape
=
[
None
,
None
,
8
,
5
,
3
],
dtype
=
'float32'
)
paddle
.
fluid
.
contrib
.
layers
.
bilateral_slice
(
x
,
guide
,
grid
,
False
)
if
not
paddle
.
fluid
.
is_compiled_with_cuda
():
return
with
paddle
.
fluid
.
dygraph
.
guard
():
x1
=
paddle
.
rand
([
3
,
1
,
50
,
30
])
guide1
=
paddle
.
rand
([
3
,
50
,
30
])
grid1
=
paddle
.
rand
([
3
,
2
,
2
,
5
,
3
])
paddle
.
fluid
.
contrib
.
bilateral_slice
(
x1
,
guide1
,
grid1
,
False
)
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py
浏览文件 @
e6ca78c2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
import
paddle.fluid
as
fluid
...
...
@@ -219,10 +219,10 @@ class TestBilinearInterpOp(OpTest):
self
.
outputs
=
{
'Out'
:
output_np
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
in_place
=
True
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
in_place
=
True
)
def
init_test_case
(
self
):
self
.
interp_method
=
'bilinear'
...
...
@@ -409,9 +409,7 @@ class TestBilinearInterpOpUint8(OpTest):
self
.
outputs
=
{
'Out'
:
output_np
}
def
test_check_output
(
self
):
self
.
check_output_with_place
(
place
=
core
.
CPUPlace
(),
atol
=
1
,
check_eager
=
True
)
self
.
check_output_with_place
(
place
=
core
.
CPUPlace
(),
atol
=
1
)
def
init_test_case
(
self
):
self
.
interp_method
=
'bilinear'
...
...
@@ -585,10 +583,10 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
self
.
outputs
=
{
'Out'
:
output_np
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
in_place
=
True
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
in_place
=
True
)
def
init_test_case
(
self
):
self
.
interp_method
=
'bilinear'
...
...
python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py
浏览文件 @
e6ca78c2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_op_test
import
OpTest
,
paddle_static_guard
import
paddle
import
paddle.fluid
as
fluid
...
...
@@ -23,28 +23,29 @@ import paddle.fluid as fluid
class
TestDygraphBilinearTensorProductAPIError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
layer
=
paddle
.
nn
.
Bilinear
(
5
,
4
,
1000
)
# the input must be Variable.
x0
=
fluid
.
create_lod_tensor
(
np
.
array
([
-
1
,
3
,
5
,
5
]),
[[
1
,
1
,
1
,
1
]],
fluid
.
CPUPlace
()
)
self
.
assertRaises
(
TypeError
,
layer
,
x0
)
# the input dtype must be float32 or float64
x1
=
fluid
.
data
(
name
=
'x1'
,
shape
=
[
-
1
,
5
],
dtype
=
"float16"
)
x2
=
fluid
.
data
(
name
=
'x2'
,
shape
=
[
-
1
,
4
],
dtype
=
"float32"
)
self
.
assertRaises
(
TypeError
,
layer
,
x1
,
x2
)
# the dimensions of x and y must be 2
paddle
.
enable_static
()
x3
=
paddle
.
static
.
data
(
""
,
shape
=
[
0
],
dtype
=
"float32"
)
x4
=
paddle
.
static
.
data
(
""
,
shape
=
[
0
],
dtype
=
"float32"
)
self
.
assertRaises
(
ValueError
,
paddle
.
static
.
nn
.
bilinear_tensor_product
,
x3
,
x4
,
1000
,
)
with
paddle_static_guard
():
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
layer
=
paddle
.
nn
.
Bilinear
(
5
,
4
,
1000
)
# the input must be Variable.
x0
=
fluid
.
create_lod_tensor
(
np
.
array
([
-
1
,
3
,
5
,
5
]),
[[
1
,
1
,
1
,
1
]],
fluid
.
CPUPlace
()
)
self
.
assertRaises
(
TypeError
,
layer
,
x0
)
# the input dtype must be float32 or float64
x1
=
fluid
.
data
(
name
=
'x1'
,
shape
=
[
-
1
,
5
],
dtype
=
"float16"
)
x2
=
fluid
.
data
(
name
=
'x2'
,
shape
=
[
-
1
,
4
],
dtype
=
"float32"
)
self
.
assertRaises
(
TypeError
,
layer
,
x1
,
x2
)
# the dimensions of x and y must be 2
paddle
.
enable_static
()
x3
=
paddle
.
static
.
data
(
""
,
shape
=
[
0
],
dtype
=
"float32"
)
x4
=
paddle
.
static
.
data
(
""
,
shape
=
[
0
],
dtype
=
"float32"
)
self
.
assertRaises
(
ValueError
,
paddle
.
static
.
nn
.
bilinear_tensor_product
,
x3
,
x4
,
1000
,
)
class
TestBilinearTensorProductOp
(
OpTest
):
...
...
@@ -73,10 +74,10 @@ class TestBilinearTensorProductOp(OpTest):
self
.
outputs
=
{
'Out'
:
output
+
bias
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
,
'Y'
,
'Weight'
,
'Bias'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
,
'Y'
,
'Weight'
,
'Bias'
],
'Out'
)
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_bincount_op.py
浏览文件 @
e6ca78c2
...
...
@@ -17,7 +17,7 @@ import tempfile
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
import
paddle.fluid
as
fluid
...
...
@@ -150,7 +150,7 @@ class TestBincountOp(OpTest):
self
.
Out
=
np
.
bincount
(
self
.
np_input
,
minlength
=
self
.
minlength
)
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
False
)
self
.
check_output
()
class
TestCase1
(
TestBincountOp
):
...
...
python/paddle/fluid/tests/unittests/test_bipartite_match_op.py
浏览文件 @
e6ca78c2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
def
bipartite_match
(
distance
,
match_indices
,
match_dist
):
...
...
python/paddle/fluid/tests/unittests/test_bmm_op.py
浏览文件 @
e6ca78c2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_op_test
import
OpTest
,
paddle_static_guard
import
paddle
import
paddle.fluid
as
fluid
...
...
@@ -32,31 +32,33 @@ class TestBmmOp(OpTest):
self
.
outputs
=
{
'Out'
:
Out
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_checkout_grad
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
class
API_TestBmm
(
unittest
.
TestCase
):
def
test_out
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
data1
=
paddle
.
static
.
data
(
'data1'
,
shape
=
[
-
1
,
3
,
4
],
dtype
=
'float64'
)
data2
=
paddle
.
static
.
data
(
'data2'
,
shape
=
[
-
1
,
4
,
5
],
dtype
=
'float64'
)
result_bmm
=
paddle
.
bmm
(
data1
,
data2
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
input1
=
np
.
random
.
random
([
10
,
3
,
4
]).
astype
(
'float64'
)
input2
=
np
.
random
.
random
([
10
,
4
,
5
]).
astype
(
'float64'
)
(
result
,)
=
exe
.
run
(
feed
=
{
"data1"
:
input1
,
"data2"
:
input2
},
fetch_list
=
[
result_bmm
]
)
expected_result
=
np
.
matmul
(
input1
,
input2
)
np
.
testing
.
assert_allclose
(
expected_result
,
result
,
rtol
=
1e-05
)
with
paddle_static_guard
():
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
data1
=
paddle
.
static
.
data
(
'data1'
,
shape
=
[
-
1
,
3
,
4
],
dtype
=
'float64'
)
data2
=
paddle
.
static
.
data
(
'data2'
,
shape
=
[
-
1
,
4
,
5
],
dtype
=
'float64'
)
result_bmm
=
paddle
.
bmm
(
data1
,
data2
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
input1
=
np
.
random
.
random
([
10
,
3
,
4
]).
astype
(
'float64'
)
input2
=
np
.
random
.
random
([
10
,
4
,
5
]).
astype
(
'float64'
)
(
result
,)
=
exe
.
run
(
feed
=
{
"data1"
:
input1
,
"data2"
:
input2
},
fetch_list
=
[
result_bmm
],
)
expected_result
=
np
.
matmul
(
input1
,
input2
)
np
.
testing
.
assert_allclose
(
expected_result
,
result
,
rtol
=
1e-05
)
class
API_TestDygraphBmm
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/test_box_clip_op.py
浏览文件 @
e6ca78c2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
def
box_clip
(
input_box
,
im_info
,
output_box
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录