Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
6d0fa6f2
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
6d0fa6f2
编写于
3月 28, 2023
作者:
W
wanghuancoder
提交者:
GitHub
3月 28, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Del old dygraph optest7 (#51999)
* delete old dygraph op test
上级
789aac8a
变更
69
隐藏空白更改
内联
并排
Showing
69 changed file
with
1396 addition
and
943 deletion
+1396
-943
paddle/fluid/pybind/eager_generator.h
paddle/fluid/pybind/eager_generator.h
+3
-0
python/paddle/fluid/tests/unittests/fft/test_spectral_op.py
python/paddle/fluid/tests/unittests/fft/test_spectral_op.py
+3
-6
python/paddle/fluid/tests/unittests/prim_op_test.py
python/paddle/fluid/tests/unittests/prim_op_test.py
+2
-0
python/paddle/fluid/tests/unittests/test_accuracy_op.py
python/paddle/fluid/tests/unittests/test_accuracy_op.py
+49
-50
python/paddle/fluid/tests/unittests/test_assign_op.py
python/paddle/fluid/tests/unittests/test_assign_op.py
+7
-7
python/paddle/fluid/tests/unittests/test_assign_pos_op.py
python/paddle/fluid/tests/unittests/test_assign_pos_op.py
+2
-2
python/paddle/fluid/tests/unittests/test_assign_value_op.py
python/paddle/fluid/tests/unittests/test_assign_value_op.py
+40
-29
python/paddle/fluid/tests/unittests/test_compare_op.py
python/paddle/fluid/tests/unittests/test_compare_op.py
+5
-5
python/paddle/fluid/tests/unittests/test_compare_reduce_op.py
...on/paddle/fluid/tests/unittests/test_compare_reduce_op.py
+5
-5
python/paddle/fluid/tests/unittests/test_complex_op.py
python/paddle/fluid/tests/unittests/test_complex_op.py
+2
-5
python/paddle/fluid/tests/unittests/test_concat_op.py
python/paddle/fluid/tests/unittests/test_concat_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py
python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py
+5
-3
python/paddle/fluid/tests/unittests/test_conv2d_op.py
python/paddle/fluid/tests/unittests/test_conv2d_op.py
+29
-1
python/paddle/fluid/tests/unittests/test_conv2d_op_depthwise_conv.py
...le/fluid/tests/unittests/test_conv2d_op_depthwise_conv.py
+46
-0
python/paddle/fluid/tests/unittests/test_cumsum_op.py
python/paddle/fluid/tests/unittests/test_cumsum_op.py
+9
-5
python/paddle/fluid/tests/unittests/test_elementwise_max_op.py
...n/paddle/fluid/tests/unittests/test_elementwise_max_op.py
+14
-16
python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py
...n/paddle/fluid/tests/unittests/test_elementwise_pow_op.py
+18
-21
python/paddle/fluid/tests/unittests/test_exponential_op.py
python/paddle/fluid/tests/unittests/test_exponential_op.py
+3
-4
python/paddle/fluid/tests/unittests/test_fake_quantize_op.py
python/paddle/fluid/tests/unittests/test_fake_quantize_op.py
+12
-12
python/paddle/fluid/tests/unittests/test_gather_nd_op.py
python/paddle/fluid/tests/unittests/test_gather_nd_op.py
+29
-43
python/paddle/fluid/tests/unittests/test_gaussian_random_op.py
...n/paddle/fluid/tests/unittests/test_gaussian_random_op.py
+73
-77
python/paddle/fluid/tests/unittests/test_group_norm_op.py
python/paddle/fluid/tests/unittests/test_group_norm_op.py
+87
-57
python/paddle/fluid/tests/unittests/test_gru_op.py
python/paddle/fluid/tests/unittests/test_gru_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_gru_rnn_op.py
python/paddle/fluid/tests/unittests/test_gru_rnn_op.py
+37
-1
python/paddle/fluid/tests/unittests/test_hash_op.py
python/paddle/fluid/tests/unittests/test_hash_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_index_select_op.py
python/paddle/fluid/tests/unittests/test_index_select_op.py
+5
-5
python/paddle/fluid/tests/unittests/test_label_smooth_op.py
python/paddle/fluid/tests/unittests/test_label_smooth_op.py
+3
-3
python/paddle/fluid/tests/unittests/test_log_softmax.py
python/paddle/fluid/tests/unittests/test_log_softmax.py
+4
-1
python/paddle/fluid/tests/unittests/test_logcumsumexp_op.py
python/paddle/fluid/tests/unittests/test_logcumsumexp_op.py
+8
-1
python/paddle/fluid/tests/unittests/test_logit_op.py
python/paddle/fluid/tests/unittests/test_logit_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py
...paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_lookup_table_op.py
python/paddle/fluid/tests/unittests/test_lookup_table_op.py
+39
-29
python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py
...dle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py
+6
-1
python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py
python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py
+3
-2
python/paddle/fluid/tests/unittests/test_matmul_op_with_head.py
.../paddle/fluid/tests/unittests/test_matmul_op_with_head.py
+1
-1
python/paddle/fluid/tests/unittests/test_matrix_rank_op.py
python/paddle/fluid/tests/unittests/test_matrix_rank_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_momentum_op.py
python/paddle/fluid/tests/unittests/test_momentum_op.py
+33
-2
python/paddle/fluid/tests/unittests/test_nce.py
python/paddle/fluid/tests/unittests/test_nce.py
+152
-135
python/paddle/fluid/tests/unittests/test_nn_grad.py
python/paddle/fluid/tests/unittests/test_nn_grad.py
+2
-2
python/paddle/fluid/tests/unittests/test_norm_op.py
python/paddle/fluid/tests/unittests/test_norm_op.py
+1
-2
python/paddle/fluid/tests/unittests/test_number_count_op.py
python/paddle/fluid/tests/unittests/test_number_count_op.py
+2
-2
python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py
python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py
+8
-1
python/paddle/fluid/tests/unittests/test_pool2d_op.py
python/paddle/fluid/tests/unittests/test_pool2d_op.py
+68
-1
python/paddle/fluid/tests/unittests/test_pool3d_op.py
python/paddle/fluid/tests/unittests/test_pool3d_op.py
+67
-1
python/paddle/fluid/tests/unittests/test_pool_max_op.py
python/paddle/fluid/tests/unittests/test_pool_max_op.py
+35
-1
python/paddle/fluid/tests/unittests/test_precision_recall_op.py
.../paddle/fluid/tests/unittests/test_precision_recall_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_prelu_op.py
python/paddle/fluid/tests/unittests/test_prelu_op.py
+6
-13
python/paddle/fluid/tests/unittests/test_rank_attention_op.py
...on/paddle/fluid/tests/unittests/test_rank_attention_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_rank_loss_op.py
python/paddle/fluid/tests/unittests/test_rank_loss_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_reduce_op.py
python/paddle/fluid/tests/unittests/test_reduce_op.py
+106
-52
python/paddle/fluid/tests/unittests/test_rnn_op.py
python/paddle/fluid/tests/unittests/test_rnn_op.py
+2
-2
python/paddle/fluid/tests/unittests/test_roll_op.py
python/paddle/fluid/tests/unittests/test_roll_op.py
+6
-6
python/paddle/fluid/tests/unittests/test_scale_op.py
python/paddle/fluid/tests/unittests/test_scale_op.py
+8
-9
python/paddle/fluid/tests/unittests/test_sgd_op.py
python/paddle/fluid/tests/unittests/test_sgd_op.py
+11
-1
python/paddle/fluid/tests/unittests/test_sgd_op_bf16.py
python/paddle/fluid/tests/unittests/test_sgd_op_bf16.py
+1
-1
python/paddle/fluid/tests/unittests/test_simple_rnn_op.py
python/paddle/fluid/tests/unittests/test_simple_rnn_op.py
+2
-2
python/paddle/fluid/tests/unittests/test_slice_op.py
python/paddle/fluid/tests/unittests/test_slice_op.py
+167
-205
python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py
...paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py
+3
-1
python/paddle/fluid/tests/unittests/test_stft_op.py
python/paddle/fluid/tests/unittests/test_stft_op.py
+3
-3
python/paddle/fluid/tests/unittests/test_strided_slice_op.py
python/paddle/fluid/tests/unittests/test_strided_slice_op.py
+11
-4
python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py
...n/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_temporal_shift_op.py
...on/paddle/fluid/tests/unittests/test_temporal_shift_op.py
+6
-2
python/paddle/fluid/tests/unittests/test_transfer_dtype_op.py
...on/paddle/fluid/tests/unittests/test_transfer_dtype_op.py
+10
-6
python/paddle/fluid/tests/unittests/test_unique.py
python/paddle/fluid/tests/unittests/test_unique.py
+62
-65
python/paddle/fluid/tests/unittests/test_unique_with_counts.py
...n/paddle/fluid/tests/unittests/test_unique_with_counts.py
+8
-9
python/paddle/fluid/tests/unittests/test_warpctc_op.py
python/paddle/fluid/tests/unittests/test_warpctc_op.py
+21
-6
python/paddle/fluid/tests/unittests/test_where_op.py
python/paddle/fluid/tests/unittests/test_where_op.py
+5
-5
python/paddle/fluid/tests/unittests/test_yolo_box_op.py
python/paddle/fluid/tests/unittests/test_yolo_box_op.py
+27
-1
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+4
-1
未找到文件。
paddle/fluid/pybind/eager_generator.h
浏览文件 @
6d0fa6f2
...
...
@@ -28,6 +28,8 @@
// need to manually specify them in this map.
std
::
map
<
std
::
string
,
std
::
set
<
std
::
string
>>
op_ins_map
=
{
{
"fc"
,
{
"Input"
,
"W"
,
"Bias"
}},
{
"precision_recall"
,
{
"MaxProbs"
,
"Indices"
,
"Labels"
,
"Weights"
,
"StatesInfo"
}},
{
"layer_norm"
,
{
"X"
,
"Scale"
,
"Bias"
}},
{
"conv2d_fusion_cutlass"
,
{
"Input"
,
"Filter"
,
"Bias"
,
"ResidualData"
}},
{
"conv2d_fusion"
,
{
"Input"
,
"Filter"
,
"Bias"
,
"ResidualData"
}},
...
...
@@ -279,6 +281,7 @@ std::map<std::string, std::set<std::string>> op_ins_map = {
// functions. While, for very few OPs, the dispensable outputs are used, we
// need to manually specify them in this map.
std
::
map
<
std
::
string
,
std
::
set
<
std
::
string
>>
op_outs_map
=
{
{
"rank_attention"
,
{
"InputHelp"
,
"Out"
,
"InsRank"
}},
{
"fake_quantize_dequantize_moving_average_abs_max"
,
{
"Out"
,
"OutScale"
,
"OutAccum"
,
"OutState"
}},
{
"batch_norm"
,
...
...
python/paddle/fluid/tests/unittests/fft/test_spectral_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -163,7 +163,7 @@ class TestFFTC2COp(OpTest):
)
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
...
...
@@ -171,7 +171,6 @@ class TestFFTC2COp(OpTest):
"Out"
,
user_defined_grads
=
[
self
.
x_grad
],
user_defined_grad_outputs
=
[
self
.
out_grad
],
check_eager
=
True
,
)
...
...
@@ -260,7 +259,7 @@ class TestFFTC2ROp(OpTest):
)
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
...
...
@@ -268,7 +267,6 @@ class TestFFTC2ROp(OpTest):
"Out"
,
user_defined_grads
=
[
self
.
x_grad
],
user_defined_grad_outputs
=
[
self
.
out_grad
],
check_eager
=
True
,
)
...
...
@@ -345,7 +343,7 @@ class TestFFTR2COp(OpTest):
)
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
...
...
@@ -353,5 +351,4 @@ class TestFFTR2COp(OpTest):
"Out"
,
user_defined_grads
=
[
self
.
x_grad
],
user_defined_grad_outputs
=
[
self
.
out_grad
],
check_eager
=
True
,
)
python/paddle/fluid/tests/unittests/prim_op_test.py
浏览文件 @
6d0fa6f2
...
...
@@ -169,6 +169,8 @@ class OpTestUtils:
for
idx
,
arg_name
in
enumerate
(
api_params
):
if
arg_name
in
api_ignore_param_list
:
results
.
append
(
get_default
(
idx
,
api_defaults
))
if
idx_of_op_proto_arguments
<
len
(
input_arguments
):
idx_of_op_proto_arguments
+=
1
else
:
if
idx_of_op_proto_arguments
<
len
(
input_arguments
):
tmp
=
input_arguments
[
idx_of_op_proto_arguments
]
...
...
python/paddle/fluid/tests/unittests/test_accuracy_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,12 +15,12 @@
import
unittest
import
numpy
as
np
from
eager_op_test
import
OpTest
from
eager_op_test
import
OpTest
,
paddle_static_guard
import
paddle
from
paddle
import
fluid
from
paddle.fluid
import
Program
,
core
,
program_guard
from
paddle.fluid.tests.unittests.op_test
import
convert_float_to_uint16
from
paddle.fluid.tests.unittests.
eager_
op_test
import
convert_float_to_uint16
def
accuracy_wrapper
(
infer
,
indices
,
label
):
...
...
@@ -109,31 +109,30 @@ class TestAccuracyOpBf16(OpTest):
class
TestAccuracyOpError
(
unittest
.
TestCase
):
def
test_type_errors
(
self
):
with
program_guard
(
Program
(),
Program
()):
# The input type of accuracy_op must be Variable.
x1
=
fluid
.
create_lod_tensor
(
np
.
array
([[
-
1
]]),
[[
1
]],
fluid
.
CPUPlace
()
)
label
=
paddle
.
static
.
data
(
name
=
'label'
,
shape
=
[
-
1
,
1
],
dtype
=
"int32"
)
self
.
assertRaises
(
TypeError
,
paddle
.
static
.
accuracy
,
x1
,
label
)
self
.
assertRaises
(
TypeError
,
paddle
.
metric
.
accuracy
,
x1
,
label
)
# The input dtype of accuracy_op must be float32 or float64.
x2
=
paddle
.
static
.
data
(
name
=
'x2'
,
shape
=
[
-
1
,
4
],
dtype
=
"int32"
)
self
.
assertRaises
(
TypeError
,
paddle
.
static
.
accuracy
,
x2
,
label
)
self
.
assertRaises
(
TypeError
,
paddle
.
metric
.
accuracy
,
x2
,
label
)
x3
=
paddle
.
static
.
data
(
name
=
'input'
,
shape
=
[
-
1
,
2
],
dtype
=
"float16"
)
paddle
.
static
.
accuracy
(
input
=
x3
,
label
=
label
)
paddle
.
metric
.
accuracy
(
input
=
x3
,
label
=
label
)
with
paddle_static_guard
():
with
program_guard
(
Program
(),
Program
()):
# The input type of accuracy_op must be Variable.
x1
=
fluid
.
create_lod_tensor
(
np
.
array
([[
-
1
]]),
[[
1
]],
fluid
.
CPUPlace
()
)
label
=
paddle
.
static
.
data
(
name
=
'label'
,
shape
=
[
-
1
,
1
],
dtype
=
"int32"
)
self
.
assertRaises
(
TypeError
,
paddle
.
static
.
accuracy
,
x1
,
label
)
self
.
assertRaises
(
TypeError
,
paddle
.
metric
.
accuracy
,
x1
,
label
)
# The input dtype of accuracy_op must be float32 or float64.
x2
=
paddle
.
static
.
data
(
name
=
'x2'
,
shape
=
[
-
1
,
4
],
dtype
=
"int32"
)
self
.
assertRaises
(
TypeError
,
paddle
.
static
.
accuracy
,
x2
,
label
)
self
.
assertRaises
(
TypeError
,
paddle
.
metric
.
accuracy
,
x2
,
label
)
x3
=
paddle
.
static
.
data
(
name
=
'input'
,
shape
=
[
-
1
,
2
],
dtype
=
"float16"
)
paddle
.
static
.
accuracy
(
input
=
x3
,
label
=
label
)
paddle
.
metric
.
accuracy
(
input
=
x3
,
label
=
label
)
def
test_value_errors
(
self
):
with
program_guard
(
Program
(),
Program
()):
paddle
.
disable_static
()
# The input rank of accuracy_op must be 2.
with
self
.
assertRaises
(
ValueError
):
x3
=
paddle
.
to_tensor
([
0.1
],
dtype
=
'float32'
)
...
...
@@ -142,37 +141,37 @@ class TestAccuracyOpError(unittest.TestCase):
)
paddle
.
metric
.
accuracy
(
x3
,
label3
)
paddle
.
enable_static
()
class
TestAccuracyAPI1
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
predictions
=
paddle
.
static
.
data
(
shape
=
[
2
,
5
],
name
=
"predictions"
,
dtype
=
"float32"
)
self
.
label
=
paddle
.
static
.
data
(
shape
=
[
2
,
1
],
name
=
"labels"
,
dtype
=
"int64"
)
self
.
result
=
paddle
.
static
.
accuracy
(
input
=
self
.
predictions
,
label
=
self
.
label
,
k
=
1
)
self
.
input_predictions
=
np
.
array
(
[[
0.2
,
0.1
,
0.4
,
0.1
,
0.1
],
[
0.2
,
0.3
,
0.1
,
0.15
,
0.25
]],
dtype
=
"float32"
,
)
self
.
input_labels
=
np
.
array
([[
2
],
[
0
]],
dtype
=
"int64"
)
self
.
expect_value
=
np
.
array
([
0.5
],
dtype
=
'float32'
)
with
paddle_static_guard
():
self
.
predictions
=
paddle
.
static
.
data
(
shape
=
[
2
,
5
],
name
=
"predictions"
,
dtype
=
"float32"
)
self
.
label
=
paddle
.
static
.
data
(
shape
=
[
2
,
1
],
name
=
"labels"
,
dtype
=
"int64"
)
self
.
result
=
paddle
.
static
.
accuracy
(
input
=
self
.
predictions
,
label
=
self
.
label
,
k
=
1
)
self
.
input_predictions
=
np
.
array
(
[[
0.2
,
0.1
,
0.4
,
0.1
,
0.1
],
[
0.2
,
0.3
,
0.1
,
0.15
,
0.25
]],
dtype
=
"float32"
,
)
self
.
input_labels
=
np
.
array
([[
2
],
[
0
]],
dtype
=
"int64"
)
self
.
expect_value
=
np
.
array
([
0.5
],
dtype
=
'float32'
)
def
test_api
(
self
):
exe
=
paddle
.
static
.
Executor
()
(
result
,)
=
exe
.
run
(
feed
=
{
"predictions"
:
self
.
input_predictions
,
'labels'
:
self
.
input_labels
,
},
fetch_list
=
[
self
.
result
.
name
],
)
self
.
assertEqual
((
result
==
self
.
expect_value
).
all
(),
True
)
with
paddle_static_guard
():
exe
=
paddle
.
static
.
Executor
()
(
result
,)
=
exe
.
run
(
feed
=
{
"predictions"
:
self
.
input_predictions
,
'labels'
:
self
.
input_labels
,
},
fetch_list
=
[
self
.
result
.
name
],
)
self
.
assertEqual
((
result
==
self
.
expect_value
).
all
(),
True
)
class
TestAccuracyAPI2
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/test_assign_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -14,9 +14,9 @@
import
unittest
import
eager_op_test
import
gradient_checker
import
numpy
as
np
import
op_test
from
decorator_helper
import
prog_scope
import
paddle
...
...
@@ -25,7 +25,7 @@ from paddle.fluid import Program, core, program_guard
from
paddle.fluid.backward
import
append_backward
class
TestAssignOp
(
op_test
.
OpTest
):
class
TestAssignOp
(
eager_
op_test
.
OpTest
):
def
setUp
(
self
):
self
.
python_api
=
paddle
.
assign
self
.
public_python_api
=
paddle
.
assign
...
...
@@ -38,16 +38,16 @@ class TestAssignOp(op_test.OpTest):
def
test_forward
(
self
):
paddle
.
enable_static
()
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
paddle
.
disable_static
()
def
test_backward
(
self
):
paddle
.
enable_static
()
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
True
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
paddle
.
disable_static
()
class
TestAssignFP16Op
(
op_test
.
OpTest
):
class
TestAssignFP16Op
(
eager_
op_test
.
OpTest
):
def
setUp
(
self
):
self
.
python_api
=
paddle
.
assign
self
.
public_python_api
=
paddle
.
assign
...
...
@@ -60,12 +60,12 @@ class TestAssignFP16Op(op_test.OpTest):
def
test_forward
(
self
):
paddle
.
enable_static
()
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
paddle
.
disable_static
()
def
test_backward
(
self
):
paddle
.
enable_static
()
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
True
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
paddle
.
disable_static
()
...
...
python/paddle/fluid/tests/unittests/test_assign_pos_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -14,8 +14,8 @@
import
unittest
import
eager_op_test
import
numpy
as
np
import
op_test
import
paddle
from
paddle.distributed.models.moe
import
utils
...
...
@@ -72,7 +72,7 @@ def get_redefined_allclose(cum_count):
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
(),
"core is not compiled with CUDA"
)
class
TestAssignPosOpInt64
(
op_test
.
OpTest
):
class
TestAssignPosOpInt64
(
eager_
op_test
.
OpTest
):
def
setUp
(
self
):
x
=
np
.
random
.
randint
(
0
,
16
,
size
=
(
100
,
2
)).
astype
(
"int64"
)
y
=
count
(
x
,
16
)
...
...
python/paddle/fluid/tests/unittests/test_assign_value_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -14,19 +14,27 @@
import
unittest
import
eager_op_test
import
numpy
as
np
import
op_test
import
paddle
from
paddle
import
fluid
from
paddle.fluid
import
framework
paddle
.
enable_static
()
def
assign_value_wrapper
(
shape
=
[],
dtype
=
fluid
.
core
.
VarDesc
.
VarType
.
FP32
,
values
=
0.0
):
tensor
=
paddle
.
Tensor
()
return
paddle
.
_C_ops
.
assign_value_
(
tensor
,
shape
,
dtype
,
values
,
framework
.
_current_expected_place
()
)
class
TestAssignValueOp
(
op_test
.
OpTest
):
class
TestAssignValueOp
(
eager_op_test
.
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"assign_value"
self
.
python_api
=
assign_value_wrapper
self
.
inputs
=
{}
self
.
attrs
=
{}
self
.
init_data
()
...
...
@@ -66,29 +74,31 @@ class TestAssignValueOp4(TestAssignValueOp):
class
TestAssignApi
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
init_dtype
()
self
.
value
=
(
-
100
+
200
*
np
.
random
.
random
(
size
=
(
2
,
5
))).
astype
(
self
.
dtype
)
self
.
place
=
(
fluid
.
CUDAPlace
(
0
)
if
fluid
.
is_compiled_with_cuda
()
else
fluid
.
CPUPlace
()
)
with
eager_op_test
.
paddle_static_guard
():
self
.
init_dtype
()
self
.
value
=
(
-
100
+
200
*
np
.
random
.
random
(
size
=
(
2
,
5
))).
astype
(
self
.
dtype
)
self
.
place
=
(
fluid
.
CUDAPlace
(
0
)
if
fluid
.
is_compiled_with_cuda
()
else
fluid
.
CPUPlace
()
)
def
init_dtype
(
self
):
self
.
dtype
=
"float32"
def
test_assign
(
self
):
main_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main_program
):
x
=
paddle
.
tensor
.
create_tensor
(
dtype
=
self
.
dtype
)
paddle
.
assign
(
self
.
value
,
output
=
x
)
with
eager_op_test
.
paddle_static_guard
():
main_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main_program
):
x
=
paddle
.
tensor
.
create_tensor
(
dtype
=
self
.
dtype
)
paddle
.
assign
(
self
.
value
,
output
=
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
[
fetched_x
]
=
exe
.
run
(
main_program
,
feed
=
{},
fetch_list
=
[
x
])
np
.
testing
.
assert_array_equal
(
fetched_x
,
self
.
value
)
self
.
assertEqual
(
fetched_x
.
dtype
,
self
.
value
.
dtype
)
exe
=
fluid
.
Executor
(
self
.
place
)
[
fetched_x
]
=
exe
.
run
(
main_program
,
feed
=
{},
fetch_list
=
[
x
])
np
.
testing
.
assert_array_equal
(
fetched_x
,
self
.
value
)
self
.
assertEqual
(
fetched_x
.
dtype
,
self
.
value
.
dtype
)
class
TestAssignApi2
(
TestAssignApi
):
...
...
@@ -103,15 +113,16 @@ class TestAssignApi3(TestAssignApi):
class
TestAssignApi4
(
TestAssignApi
):
def
setUp
(
self
):
self
.
init_dtype
()
self
.
value
=
np
.
random
.
choice
(
a
=
[
False
,
True
],
size
=
(
2
,
5
)).
astype
(
np
.
bool_
)
self
.
place
=
(
fluid
.
CUDAPlace
(
0
)
if
fluid
.
is_compiled_with_cuda
()
else
fluid
.
CPUPlace
()
)
with
eager_op_test
.
paddle_static_guard
():
self
.
init_dtype
()
self
.
value
=
np
.
random
.
choice
(
a
=
[
False
,
True
],
size
=
(
2
,
5
)).
astype
(
np
.
bool_
)
self
.
place
=
(
fluid
.
CUDAPlace
(
0
)
if
fluid
.
is_compiled_with_cuda
()
else
fluid
.
CPUPlace
()
)
def
init_dtype
(
self
):
self
.
dtype
=
"bool"
...
...
python/paddle/fluid/tests/unittests/test_compare_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -14,9 +14,9 @@
import
unittest
import
eager_op_test
import
numpy
import
numpy
as
np
import
op_test
import
paddle
from
paddle
import
fluid
...
...
@@ -24,7 +24,7 @@ from paddle.fluid import Program, core, program_guard
def
create_test_class
(
op_type
,
typename
,
callback
):
class
Cls
(
op_test
.
OpTest
):
class
Cls
(
eager_
op_test
.
OpTest
):
def
setUp
(
self
):
a
=
numpy
.
random
.
random
(
size
=
(
10
,
7
)).
astype
(
typename
)
b
=
numpy
.
random
.
random
(
size
=
(
10
,
7
)).
astype
(
typename
)
...
...
@@ -444,7 +444,7 @@ create_paddle_case('not_equal', lambda _a, _b: _a != _b)
# add bf16 tests
def
create_bf16_case
(
op_type
,
callback
):
class
TestCompareOpBF16Op
(
op_test
.
OpTest
):
class
TestCompareOpBF16Op
(
eager_
op_test
.
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
op_type
self
.
dtype
=
np
.
uint16
...
...
@@ -454,8 +454,8 @@ def create_bf16_case(op_type, callback):
y
=
np
.
random
.
uniform
(
0
,
1
,
[
5
,
5
]).
astype
(
np
.
float32
)
real_result
=
callback
(
x
,
y
)
self
.
inputs
=
{
'X'
:
op_test
.
convert_float_to_uint16
(
x
),
'Y'
:
op_test
.
convert_float_to_uint16
(
y
),
'X'
:
eager_
op_test
.
convert_float_to_uint16
(
x
),
'Y'
:
eager_
op_test
.
convert_float_to_uint16
(
y
),
}
self
.
outputs
=
{
'Out'
:
real_result
}
...
...
python/paddle/fluid/tests/unittests/test_compare_reduce_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -14,14 +14,14 @@
import
unittest
import
eager_op_test
import
numpy
as
np
import
op_test
import
paddle
def
create_test_not_equal_class
(
op_type
,
typename
,
callback
):
class
Cls
(
op_test
.
OpTest
):
class
Cls
(
eager_
op_test
.
OpTest
):
def
setUp
(
self
):
x
=
np
.
random
.
random
(
size
=
(
10
,
7
)).
astype
(
typename
)
y
=
np
.
random
.
random
(
size
=
(
10
,
7
)).
astype
(
typename
)
...
...
@@ -40,7 +40,7 @@ def create_test_not_equal_class(op_type, typename, callback):
def
create_test_not_shape_equal_class
(
op_type
,
typename
,
callback
):
class
Cls
(
op_test
.
OpTest
):
class
Cls
(
eager_
op_test
.
OpTest
):
def
setUp
(
self
):
x
=
np
.
random
.
random
(
size
=
(
10
,
7
)).
astype
(
typename
)
y
=
np
.
random
.
random
(
size
=
(
10
)).
astype
(
typename
)
...
...
@@ -59,7 +59,7 @@ def create_test_not_shape_equal_class(op_type, typename, callback):
def
create_test_equal_class
(
op_type
,
typename
,
callback
):
class
Cls
(
op_test
.
OpTest
):
class
Cls
(
eager_
op_test
.
OpTest
):
def
setUp
(
self
):
x
=
y
=
np
.
random
.
random
(
size
=
(
10
,
7
)).
astype
(
typename
)
z
=
callback
(
x
,
y
)
...
...
@@ -77,7 +77,7 @@ def create_test_equal_class(op_type, typename, callback):
def
create_test_dim1_class
(
op_type
,
typename
,
callback
):
class
Cls
(
op_test
.
OpTest
):
class
Cls
(
eager_
op_test
.
OpTest
):
def
setUp
(
self
):
x
=
y
=
np
.
random
.
random
(
size
=
(
1
)).
astype
(
typename
)
x
=
np
.
array
([
True
,
False
,
True
]).
astype
(
typename
)
...
...
python/paddle/fluid/tests/unittests/test_complex_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
from
paddle
import
static
...
...
@@ -67,7 +67,7 @@ class TestComplexOp(OpTest):
self
.
outputs
=
{
'Out'
:
out_ref
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
dout
=
self
.
out_grad
...
...
@@ -79,7 +79,6 @@ class TestComplexOp(OpTest):
'Out'
,
user_defined_grads
=
[
dx
,
dy
],
user_defined_grad_outputs
=
[
dout
],
check_eager
=
True
,
)
def
test_check_grad_ignore_x
(
self
):
...
...
@@ -95,7 +94,6 @@ class TestComplexOp(OpTest):
no_grad_set
=
set
(
'X'
),
user_defined_grads
=
[
dy
],
user_defined_grad_outputs
=
[
dout
],
check_eager
=
True
,
)
def
test_check_grad_ignore_y
(
self
):
...
...
@@ -109,7 +107,6 @@ class TestComplexOp(OpTest):
no_grad_set
=
set
(
'Y'
),
user_defined_grads
=
[
dx
],
user_defined_grad_outputs
=
[
dout
],
check_eager
=
True
,
)
...
...
python/paddle/fluid/tests/unittests/test_concat_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -17,7 +17,7 @@ import unittest
import
gradient_checker
import
numpy
as
np
from
decorator_helper
import
prog_scope
from
op_test
import
OpTest
,
convert_float_to_uint16
,
skip_check_grad_ci
from
eager_
op_test
import
OpTest
,
convert_float_to_uint16
,
skip_check_grad_ci
import
paddle
from
paddle
import
fluid
...
...
python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
test_conv2d_op
import
conv2d_forward_naive
from
paddle.fluid
import
core
...
...
@@ -60,7 +60,9 @@ def create_test_cudnn_channel_last_class(parent):
print
(
self
.
attrs
)
if
self
.
has_cuda
():
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
,
check_dygraph
=
False
)
cls_name
=
"{0}_{1}"
.
format
(
parent
.
__name__
,
"CudnnChannelLast"
)
TestCudnnChannelLastCase
.
__name__
=
cls_name
...
...
@@ -161,7 +163,7 @@ class TestConv2DFusionOp(OpTest):
def
test_check_output
(
self
):
if
self
.
has_cuda
():
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
,
check_dygraph
=
False
)
def
init_test_case
(
self
):
self
.
pad
=
[
0
,
0
]
...
...
python/paddle/fluid/tests/unittests/test_conv2d_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -19,7 +19,7 @@ import numpy as np
import
paddle
from
paddle
import
fluid
from
paddle.fluid
import
Program
,
core
,
program_guard
from
paddle.fluid.tests.unittests.op_test
import
(
from
paddle.fluid.tests.unittests.
eager_
op_test
import
(
OpTest
,
convert_float_to_uint16
,
get_numeric_gradient
,
...
...
@@ -391,9 +391,36 @@ def create_test_cudnn_padding_VALID_class(parent):
globals
()[
cls_name
]
=
TestCUDNNPaddingVALIDCase
def
conv2d_wrapper
(
x
,
weight
,
stride
=
1
,
padding
=
0
,
padding_algorithm
=
"EXPLICIT"
,
dilation
=
1
,
groups
=
1
,
data_format
=
"NCDHW"
,
):
if
data_format
==
"AnyLayout"
:
data_format
=
"NCDHW"
if
padding_algorithm
is
None
:
padding_algorithm
=
"EXPLICIT"
return
paddle
.
_C_ops
.
conv2d
(
x
,
weight
,
stride
,
padding
,
padding_algorithm
,
dilation
,
groups
,
data_format
,
)
class
TestConv2DOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"conv2d"
self
.
python_api
=
conv2d_wrapper
self
.
use_cudnn
=
False
self
.
exhaustive_search
=
False
self
.
use_cuda
=
False
...
...
@@ -732,6 +759,7 @@ class TestConv2DOpError(unittest.TestCase):
class
TestConv2DOp_v2
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"conv2d"
self
.
python_api
=
conv2d_wrapper
self
.
use_cudnn
=
False
self
.
exhaustive_search
=
False
self
.
use_cuda
=
False
...
...
python/paddle/fluid/tests/unittests/test_conv2d_op_depthwise_conv.py
浏览文件 @
6d0fa6f2
...
...
@@ -36,6 +36,32 @@ from paddle.fluid.tests.unittests.testsuite import create_op
# ----------------TestDepthwiseConv -----
def
depthwise_conv2d_wrapper
(
x
,
weight
,
stride
=
1
,
padding
=
0
,
padding_algorithm
=
"EXPLICIT"
,
groups
=
1
,
dilation
=
1
,
data_format
=
"NCDHW"
,
):
if
data_format
==
"AnyLayout"
:
data_format
=
"NCDHW"
if
padding_algorithm
is
None
:
padding_algorithm
=
"EXPLICIT"
return
paddle
.
_C_ops
.
depthwise_conv2d
(
x
,
weight
,
stride
,
padding
,
padding_algorithm
,
groups
,
dilation
,
data_format
,
)
class
TestDepthwiseConv
(
TestConv2DOp
):
def
init_test_case
(
self
):
self
.
use_cuda
=
True
...
...
@@ -47,6 +73,7 @@ class TestDepthwiseConv(TestConv2DOp):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
12
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
class
TestDepthwiseConv2
(
TestConv2DOp
):
...
...
@@ -60,6 +87,7 @@ class TestDepthwiseConv2(TestConv2DOp):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
12
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
class
TestDepthwiseConv3
(
TestConv2DOp
):
...
...
@@ -73,6 +101,7 @@ class TestDepthwiseConv3(TestConv2DOp):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
24
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
class
TestDepthwiseConvWithDilation
(
TestConv2DOp
):
...
...
@@ -87,6 +116,7 @@ class TestDepthwiseConvWithDilation(TestConv2DOp):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
24
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
class
TestDepthwiseConvWithDilation2
(
TestConv2DOp
):
...
...
@@ -101,6 +131,7 @@ class TestDepthwiseConvWithDilation2(TestConv2DOp):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
24
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
class
TestDepthwiseConvandFuse
(
TestConv2DOp
):
...
...
@@ -115,6 +146,7 @@ class TestDepthwiseConvandFuse(TestConv2DOp):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
12
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
class
TestDepthwiseConv2andFuse
(
TestConv2DOp
):
...
...
@@ -129,6 +161,7 @@ class TestDepthwiseConv2andFuse(TestConv2DOp):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
12
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
class
TestDepthwiseConv3andFuse
(
TestConv2DOp
):
...
...
@@ -143,6 +176,7 @@ class TestDepthwiseConv3andFuse(TestConv2DOp):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
24
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
class
TestDepthwiseConvWithDilationandFuse
(
TestConv2DOp
):
...
...
@@ -158,6 +192,7 @@ class TestDepthwiseConvWithDilationandFuse(TestConv2DOp):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
24
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
class
TestDepthwiseConvWithDilation2andFuse
(
TestConv2DOp
):
...
...
@@ -173,6 +208,7 @@ class TestDepthwiseConvWithDilation2andFuse(TestConv2DOp):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
24
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
class
TestDepthwiseConv_AsyPadding
(
TestConv2DOp_v2
):
...
...
@@ -185,6 +221,7 @@ class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
12
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
def
init_paddings
(
self
):
self
.
pad
=
[
1
,
1
,
0
,
1
]
...
...
@@ -201,6 +238,7 @@ class TestDepthwiseConv2_AsyPadding(TestConv2DOp_v2):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
12
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
def
init_paddings
(
self
):
self
.
pad
=
[
0
,
1
,
0
,
2
]
...
...
@@ -217,6 +255,7 @@ class TestDepthwiseConv3_AsyPadding(TestConv2DOp_v2):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
24
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
def
init_paddings
(
self
):
self
.
pad
=
[
1
,
1
,
0
,
0
]
...
...
@@ -235,6 +274,7 @@ class TestDepthwiseConvWithDilation_AsyPadding(TestConv2DOp_v2):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
24
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
def
init_paddings
(
self
):
self
.
pad
=
[
1
,
1
,
2
,
1
]
...
...
@@ -253,6 +293,7 @@ class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2DOp_v2):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
24
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
def
init_paddings
(
self
):
self
.
pad
=
[
0
,
1
,
1
,
0
]
...
...
@@ -271,6 +312,7 @@ class TestDepthwiseConvandFuse_AsyPadding(TestConv2DOp_v2):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
12
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
def
init_paddings
(
self
):
self
.
pad
=
[
2
,
1
,
2
,
3
]
...
...
@@ -289,6 +331,7 @@ class TestDepthwiseConv2andFuse_AsyPadding(TestConv2DOp_v2):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
12
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
def
init_paddings
(
self
):
self
.
pad
=
[
1
,
1
,
1
,
2
]
...
...
@@ -307,6 +350,7 @@ class TestDepthwiseConv3andFuse_AsyPadding(TestConv2DOp_v2):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
24
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
def
init_paddings
(
self
):
self
.
pad
=
[
1
,
2
,
0
,
2
]
...
...
@@ -326,6 +370,7 @@ class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2DOp_v2):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
24
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
def
init_paddings
(
self
):
self
.
pad
=
[
2
,
1
,
1
,
0
]
...
...
@@ -345,6 +390,7 @@ class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2DOp_v2):
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
24
,
f_c
,
3
,
3
]
self
.
op_type
=
"depthwise_conv2d"
self
.
python_api
=
depthwise_conv2d_wrapper
def
init_paddings
(
self
):
self
.
pad
=
[
1
,
3
,
1
,
3
]
...
...
python/paddle/fluid/tests/unittests/test_cumsum_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -17,7 +17,7 @@ import tempfile
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
convert_float_to_uint16
from
eager_
op_test
import
OpTest
,
convert_float_to_uint16
import
paddle
import
paddle.inference
as
paddle_infer
...
...
@@ -112,11 +112,15 @@ class TestCumsumOp(unittest.TestCase):
self
.
assertTrue
(
'out'
in
y
.
name
)
def
cumsum_wrapper
(
x
,
axis
=-
1
,
flatten
=
False
,
exclusive
=
False
,
reverse
=
False
):
return
paddle
.
_C_ops
.
cumsum
(
x
,
axis
,
flatten
,
exclusive
,
reverse
)
class
TestSumOp1
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"cumsum"
self
.
prim_op_type
=
"prim"
self
.
python_api
=
paddle
.
cumsum
self
.
python_api
=
cumsum_wrapper
self
.
public_python_api
=
paddle
.
cumsum
self
.
set_enable_cinn
()
self
.
init_dtype
()
...
...
@@ -215,7 +219,7 @@ class TestSumOpExclusive1(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"cumsum"
self
.
prim_op_type
=
"prim"
self
.
python_api
=
paddle
.
cumsum
self
.
python_api
=
cumsum_wrapper
self
.
public_python_api
=
paddle
.
cumsum
self
.
set_enable_cinn
()
self
.
init_dtype
()
...
...
@@ -307,7 +311,7 @@ class TestSumOpExclusiveFP16(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"cumsum"
self
.
prim_op_type
=
"prim"
self
.
python_api
=
paddle
.
cumsum
self
.
python_api
=
cumsum_wrapper
self
.
public_python_api
=
paddle
.
cumsum
self
.
init_dtype
()
self
.
enable_cinn
=
False
...
...
@@ -341,7 +345,7 @@ class TestSumOpReverseExclusive(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"cumsum"
self
.
prim_op_type
=
"prim"
self
.
python_api
=
paddle
.
cumsum
self
.
python_api
=
cumsum_wrapper
self
.
public_python_api
=
paddle
.
cumsum
self
.
set_enable_cinn
()
self
.
init_dtype
()
...
...
python/paddle/fluid/tests/unittests/test_elementwise_max_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
convert_float_to_uint16
,
skip_check_grad_ci
from
eager_
op_test
import
OpTest
,
convert_float_to_uint16
,
skip_check_grad_ci
import
paddle
from
paddle.fluid
import
core
...
...
@@ -44,22 +44,20 @@ class TestElementwiseOp(OpTest):
def
test_check_output
(
self
):
if
hasattr
(
self
,
'attrs'
):
self
.
check_output
(
check_
eager
=
False
)
self
.
check_output
(
check_
dygraph
=
False
)
else
:
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad_normal
(
self
):
if
hasattr
(
self
,
'attrs'
):
if
self
.
attrs
[
'axis'
]
==
-
1
:
self
.
check_grad
(
[
'X'
,
'Y'
],
'Out'
,
check_
eager
=
False
,
check_prim
=
True
[
'X'
,
'Y'
],
'Out'
,
check_
dygraph
=
False
,
check_prim
=
True
)
else
:
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_
eager
=
False
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_
dygraph
=
False
)
else
:
self
.
check_grad
(
[
'X'
,
'Y'
],
'Out'
,
check_eager
=
True
,
check_prim
=
True
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_prim
=
True
)
def
test_check_grad_ingore_x
(
self
):
if
hasattr
(
self
,
'attrs'
)
and
self
.
attrs
[
'axis'
]
!=
-
1
:
...
...
@@ -68,6 +66,7 @@ class TestElementwiseOp(OpTest):
'Out'
,
max_relative_error
=
0.005
,
no_grad_set
=
set
(
"X"
),
check_dygraph
=
False
,
)
else
:
self
.
check_grad
(
...
...
@@ -85,6 +84,7 @@ class TestElementwiseOp(OpTest):
'Out'
,
max_relative_error
=
0.005
,
no_grad_set
=
set
(
'Y'
),
check_dygraph
=
False
,
)
else
:
self
.
check_grad
(
...
...
@@ -178,16 +178,16 @@ class TestElementwiseBF16Op(OpTest):
def
test_check_output
(
self
):
if
hasattr
(
self
,
'attrs'
):
self
.
check_output
(
check_
eager
=
False
)
self
.
check_output
(
check_
dygraph
=
False
)
else
:
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad_normal
(
self
):
if
hasattr
(
self
,
'attrs'
):
# check_prim=False, bfloat16 is not supported in `less_equal`
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_
eager
=
False
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_
dygraph
=
False
)
else
:
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
def
test_check_grad_ingore_x
(
self
):
self
.
check_grad
([
'Y'
],
'Out'
,
no_grad_set
=
set
(
"X"
))
...
...
@@ -204,12 +204,10 @@ class TestElementwiseMaxBF16Op_ZeroDim1(TestElementwiseBF16Op):
def
test_check_grad_normal
(
self
):
if
hasattr
(
self
,
'attrs'
):
self
.
check_grad
(
[
'X'
,
'Y'
],
'Out'
,
numeric_grad_delta
=
0.05
,
check_
eager
=
False
[
'X'
,
'Y'
],
'Out'
,
numeric_grad_delta
=
0.05
,
check_
dygraph
=
False
)
else
:
self
.
check_grad
(
[
'X'
,
'Y'
],
'Out'
,
numeric_grad_delta
=
0.05
,
check_eager
=
True
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
numeric_grad_delta
=
0.05
)
def
test_check_grad_ingore_x
(
self
):
self
.
check_grad
(
...
...
python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
convert_float_to_uint16
,
skip_check_grad_ci
from
eager_
op_test
import
OpTest
,
convert_float_to_uint16
,
skip_check_grad_ci
import
paddle
from
paddle
import
fluid
...
...
@@ -41,19 +41,17 @@ class TestElementwisePowOp(OpTest):
def
test_check_output
(
self
):
if
hasattr
(
self
,
'attrs'
):
self
.
check_output
(
check_
eager
=
False
)
self
.
check_output
(
check_
dygraph
=
False
)
else
:
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad_normal
(
self
):
if
hasattr
(
self
,
'attrs'
):
self
.
check_grad
(
[
'X'
,
'Y'
],
'Out'
,
check_
eager
=
False
,
check_prim
=
Tru
e
[
'X'
,
'Y'
],
'Out'
,
check_
prim
=
True
,
check_dygraph
=
Fals
e
)
else
:
self
.
check_grad
(
[
'X'
,
'Y'
],
'Out'
,
check_eager
=
True
,
check_prim
=
True
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_prim
=
True
)
class
TestElementwisePowOp_ZeroDim1
(
TestElementwisePowOp
):
...
...
@@ -190,9 +188,9 @@ class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def
test_check_grad_normal
(
self
):
if
hasattr
(
self
,
'attrs'
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_
eager
=
False
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_
dygraph
=
False
)
else
:
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
class
TestElementwisePowOp_broadcast_2
(
TestElementwisePowOp
):
...
...
@@ -213,9 +211,9 @@ class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def
test_check_grad_normal
(
self
):
if
hasattr
(
self
,
'attrs'
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_
eager
=
False
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_
dygraph
=
False
)
else
:
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
class
TestElementwisePowOp_broadcast_3
(
TestElementwisePowOp
):
...
...
@@ -236,9 +234,9 @@ class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def
test_check_grad_normal
(
self
):
if
hasattr
(
self
,
'attrs'
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_
eager
=
False
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_
dygraph
=
False
)
else
:
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
class
TestElementwisePowOp_broadcast_4
(
TestElementwisePowOp
):
...
...
@@ -265,9 +263,9 @@ class TestElementwisePowOpInt(OpTest):
def
test_check_output
(
self
):
if
hasattr
(
self
,
'attrs'
):
self
.
check_output
(
check_
eager
=
False
)
self
.
check_output
(
check_
dygraph
=
False
)
else
:
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestElementwisePowGradOpInt
(
unittest
.
TestCase
):
...
...
@@ -321,9 +319,9 @@ class TestElementwisePowOpFP16(OpTest):
def
test_check_output
(
self
):
if
hasattr
(
self
,
'attrs'
):
self
.
check_output
(
check_
eager
=
False
)
self
.
check_output
(
check_
dygraph
=
False
)
else
:
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
...
...
@@ -332,7 +330,6 @@ class TestElementwisePowOpFP16(OpTest):
user_defined_grads
=
pow_grad
(
self
.
inputs
[
'X'
],
self
.
inputs
[
'Y'
],
1
/
self
.
inputs
[
'X'
].
size
),
check_eager
=
True
,
check_prim
=
True
,
)
...
...
@@ -354,12 +351,12 @@ class TestElementwisePowBF16Op(OpTest):
def
test_check_output
(
self
):
if
hasattr
(
self
,
'attrs'
):
self
.
check_output
(
check_eager
=
False
)
self
.
check_output
()
else
:
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/test_exponential_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,12 +15,10 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
paddle
.
seed
(
100
)
class
TestExponentialOp1
(
OpTest
):
def
setUp
(
self
):
...
...
@@ -50,7 +48,7 @@ class TestExponentialOp1(OpTest):
hist2
=
hist2
.
astype
(
"float32"
)
hist2
=
hist2
/
float
(
data_np
.
size
)
np
.
testing
.
assert_allclose
(
hist1
,
hist2
,
rtol
=
0.0
2
)
np
.
testing
.
assert_allclose
(
hist1
,
hist2
,
rtol
=
0.0
3
)
def
test_check_grad_normal
(
self
):
self
.
check_grad
(
...
...
@@ -61,6 +59,7 @@ class TestExponentialOp1(OpTest):
user_defined_grad_outputs
=
[
np
.
random
.
rand
(
1024
,
1024
).
astype
(
self
.
dtype
)
],
check_dygraph
=
False
,
# inplace can not call paddle.grad
)
...
...
python/paddle/fluid/tests/unittests/test_fake_quantize_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -17,7 +17,7 @@ import math
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
def
round_c_single_element
(
val
):
...
...
@@ -65,7 +65,7 @@ class TestFakeQuantizeAbsMaxOp(OpTest):
self
.
inputs
=
{
'X'
:
input_data
}
self
.
outputs
=
{
'Out'
:
output_data
,
'OutScale'
:
scale
}
self
.
dtype
=
dtype
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
def
test_fake_quantize_abs_max
(
self
):
self
.
_fake_quantize_abs_max
(
np
.
float32
,
(
124
,
240
),
np
.
random
.
random
)
...
...
@@ -126,7 +126,7 @@ class TestFakeChannelWiseQuantizeAbsMaxOp(OpTest):
self
.
outputs
=
{
'Out'
:
output_data
,
'OutScale'
:
scale
}
self
.
dtype
=
dtype
self
.
attrs
[
'quant_axis'
]
=
quant_axis
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
def
test_fake_channel_wise_quantize_abs_max
(
self
):
dtype_options
=
[
np
.
float32
,
np
.
float16
]
...
...
@@ -200,7 +200,7 @@ class TestFakeQuantizeRangeAbsMaxOp(OpTest):
}
self
.
dtype
=
dtype
self
.
attrs
[
'is_test'
]
=
is_test
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
def
test_fake_quantize_range_abs_max
(
self
):
dtype_options
=
[
np
.
float16
,
np
.
float32
]
...
...
@@ -248,7 +248,7 @@ class TestMovingAverageAbsMaxScaleOp(OpTest):
'OutScale'
:
out_scale
,
}
self
.
dtype
=
dtype
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
def
test_moving_average_abs_max
(
self
):
self
.
_moving_average_abs_max_scale
(
...
...
@@ -313,7 +313,7 @@ class TestFakeQuantizeMovingAverageAbsMaxOp(OpTest):
'OutScale'
:
out_scale
,
}
self
.
dtype
=
dtype
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
if
with_gradient
:
gradient
=
[
np
.
ones
(
input_data
.
shape
)
/
np
.
product
(
input_data
.
shape
)
...
...
@@ -369,7 +369,7 @@ class TestFakeQuantizeDequantizeAbsMaxOp(OpTest):
'OutScale'
:
np
.
array
(
scale
).
astype
(
dtype
),
}
self
.
dtype
=
dtype
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
gradient
=
[
np
.
ones
(
input_data
.
shape
)
/
np
.
product
(
input_data
.
shape
)]
self
.
check_grad
([
'X'
],
'Out'
,
user_defined_grads
=
gradient
)
...
...
@@ -426,7 +426,7 @@ class TestChannelWiseFakeQuantizeDequantizeAbsMaxOp(OpTest):
self
.
outputs
=
{
'Out'
:
output_data
,
'OutScale'
:
scale
}
self
.
dtype
=
dtype
self
.
attrs
[
'quant_axis'
]
=
quant_axis
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
gradient
=
[
np
.
ones
(
input_data
.
shape
)
/
np
.
product
(
input_data
.
shape
)]
self
.
check_grad
([
'X'
],
'Out'
,
user_defined_grads
=
gradient
)
...
...
@@ -504,7 +504,7 @@ class TestChannelWiseQuantizeOp(OpTest):
self
.
outputs
=
{
'Y'
:
yq
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
class
TestChannelWiseQuantizeOp1
(
TestChannelWiseQuantizeOp
):
...
...
@@ -540,7 +540,7 @@ class TestChannelWiseQuantizeOpTrain(OpTest):
self
.
outputs
=
{
'Y'
:
yq
,
'OutScale'
:
scale
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
class
TestquantizeOp
(
OpTest
):
...
...
@@ -566,7 +566,7 @@ class TestquantizeOp(OpTest):
self
.
outputs
=
{
'Y'
:
yq
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
class
TestquantizeOpTrain
(
TestquantizeOp
):
...
...
@@ -618,7 +618,7 @@ class TestquantizeOpTrain(TestquantizeOp):
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/test_gather_nd_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
convert_float_to_uint16
from
eager_
op_test
import
OpTest
,
convert_float_to_uint16
import
paddle
from
paddle
import
fluid
...
...
@@ -49,10 +49,10 @@ class TestGatherNdOpWithEmptyIndex(OpTest):
self
.
dtype
=
np
.
float64
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
False
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
False
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGatherNdOpWithEmptyIndexFP16
(
TestGatherNdOpWithEmptyIndex
):
...
...
@@ -71,13 +71,11 @@ class TestGatherNdOpWithEmptyIndexBF16(TestGatherNdOpWithEmptyIndex):
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_eager
=
False
)
self
.
check_output_with_place
(
place
)
def
test_check_grad
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_eager
=
False
,
check_prim
=
True
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGatherNdOpWithIndex1
(
OpTest
):
...
...
@@ -106,10 +104,10 @@ class TestGatherNdOpWithIndex1(OpTest):
self
.
dtype
=
np
.
float64
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
False
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
False
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGatherNdOpWithIndex1FP16
(
TestGatherNdOpWithIndex1
):
...
...
@@ -128,13 +126,11 @@ class TestGatherNdOpWithIndex1BF16(TestGatherNdOpWithIndex1):
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_eager
=
False
)
self
.
check_output_with_place
(
place
)
def
test_check_grad
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_eager
=
False
,
check_prim
=
True
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGatherNdOpWithLowIndex
(
OpTest
):
...
...
@@ -168,10 +164,10 @@ class TestGatherNdOpWithLowIndex(OpTest):
self
.
dtype
=
np
.
float64
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
False
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
False
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGatherNdOpWithLowIndexFP16
(
TestGatherNdOpWithLowIndex
):
...
...
@@ -190,13 +186,11 @@ class TestGatherNdOpWithLowIndexBF16(TestGatherNdOpWithLowIndex):
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_eager
=
False
)
self
.
check_output_with_place
(
place
)
def
test_check_grad
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_eager
=
False
,
check_prim
=
True
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGatherNdOpIndex1
(
OpTest
):
...
...
@@ -228,10 +222,10 @@ class TestGatherNdOpIndex1(OpTest):
self
.
dtype
=
np
.
float64
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
False
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
False
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGatherNdOpIndex1FP16
(
TestGatherNdOpIndex1
):
...
...
@@ -250,13 +244,11 @@ class TestGatherNdOpIndex1BF16(TestGatherNdOpIndex1):
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_eager
=
False
)
self
.
check_output_with_place
(
place
)
def
test_check_grad
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_eager
=
False
,
check_prim
=
True
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGatherNdOpWithSameIndexAsX
(
OpTest
):
...
...
@@ -287,10 +279,10 @@ class TestGatherNdOpWithSameIndexAsX(OpTest):
self
.
dtype
=
np
.
float64
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
False
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
False
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGatherNdOpWithSameIndexAsXFP16
(
TestGatherNdOpWithSameIndexAsX
):
...
...
@@ -309,13 +301,11 @@ class TestGatherNdOpWithSameIndexAsXBF16(TestGatherNdOpWithSameIndexAsX):
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_eager
=
False
)
self
.
check_output_with_place
(
place
)
def
test_check_grad
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_eager
=
False
,
check_prim
=
True
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGatherNdOpWithHighRankSame
(
OpTest
):
...
...
@@ -347,10 +337,10 @@ class TestGatherNdOpWithHighRankSame(OpTest):
self
.
dtype
=
np
.
float64
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
False
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
False
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGatherNdOpWithHighRankSameFP16
(
TestGatherNdOpWithHighRankSame
):
...
...
@@ -369,13 +359,11 @@ class TestGatherNdOpWithHighRankSameBF16(TestGatherNdOpWithHighRankSame):
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_eager
=
False
)
self
.
check_output_with_place
(
place
)
def
test_check_grad
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_eager
=
False
,
check_prim
=
True
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGatherNdOpWithHighRankDiff
(
OpTest
):
...
...
@@ -408,10 +396,10 @@ class TestGatherNdOpWithHighRankDiff(OpTest):
self
.
dtype
=
np
.
float64
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
False
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
False
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestGatherNdOpWithHighRankDiffFP16
(
TestGatherNdOpWithHighRankDiff
):
...
...
@@ -430,13 +418,11 @@ class TestGatherNdOpWithHighRankDiffBF16(TestGatherNdOpWithHighRankDiff):
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_eager
=
False
)
self
.
check_output_with_place
(
place
)
def
test_check_grad
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_eager
=
False
,
check_prim
=
True
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
check_prim
=
True
)
# Test Python API
...
...
python/paddle/fluid/tests/unittests/test_gaussian_random_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -19,7 +19,11 @@ import numpy as np
import
paddle
from
paddle
import
fluid
from
paddle.fluid
import
core
from
paddle.fluid.tests.unittests.op_test
import
OpTest
,
convert_uint16_to_float
from
paddle.fluid.tests.unittests.eager_op_test
import
(
OpTest
,
convert_uint16_to_float
,
paddle_static_guard
,
)
from
paddle.tensor
import
random
...
...
@@ -209,87 +213,86 @@ class TestGaussianRandomOp1_ShapeTensor(TestGaussianRandomOp):
# Test python API
class
TestGaussianRandomAPI
(
unittest
.
TestCase
):
def
test_api
(
self
):
positive_2_int32
=
paddle
.
tensor
.
fill_constant
([
1
],
"int32"
,
2000
)
with
paddle_static_guard
():
positive_2_int32
=
paddle
.
tensor
.
fill_constant
([
1
],
"int32"
,
2000
)
positive_2_int64
=
paddle
.
tensor
.
fill_constant
([
1
],
"int64"
,
500
)
shape_tensor_int32
=
paddle
.
static
.
data
(
name
=
"shape_tensor_int32"
,
shape
=
[
2
],
dtype
=
"int32"
)
positive_2_int64
=
paddle
.
tensor
.
fill_constant
([
1
],
"int64"
,
500
)
shape_tensor_int32
=
paddle
.
static
.
data
(
name
=
"shape_tensor_int32"
,
shape
=
[
2
],
dtype
=
"int32"
)
shape_tensor_int64
=
paddle
.
static
.
data
(
name
=
"shape_tensor_int64"
,
shape
=
[
2
],
dtype
=
"int64"
)
shape_tensor_int64
=
paddle
.
static
.
data
(
name
=
"shape_tensor_int64"
,
shape
=
[
2
],
dtype
=
"int64"
)
out_1
=
random
.
gaussian
(
shape
=
[
2000
,
500
],
dtype
=
"float32"
,
mean
=
0.0
,
std
=
1.0
,
seed
=
10
)
out_1
=
random
.
gaussian
(
shape
=
[
2000
,
500
],
dtype
=
"float32"
,
mean
=
0.0
,
std
=
1.0
,
seed
=
10
)
out_2
=
random
.
gaussian
(
shape
=
[
2000
,
positive_2_int32
],
dtype
=
"float32"
,
mean
=
0.0
,
std
=
1.0
,
seed
=
10
,
)
out_2
=
random
.
gaussian
(
shape
=
[
2000
,
positive_2_int32
],
dtype
=
"float32"
,
mean
=
0.0
,
std
=
1.0
,
seed
=
10
,
)
out_3
=
random
.
gaussian
(
shape
=
[
2000
,
positive_2_int64
],
dtype
=
"float32"
,
mean
=
0.0
,
std
=
1.0
,
seed
=
10
,
)
out_3
=
random
.
gaussian
(
shape
=
[
2000
,
positive_2_int64
],
dtype
=
"float32"
,
mean
=
0.0
,
std
=
1.0
,
seed
=
10
,
)
out_4
=
random
.
gaussian
(
shape
=
shape_tensor_int32
,
dtype
=
"float32"
,
mean
=
0.0
,
std
=
1.0
,
seed
=
10
,
)
out_4
=
random
.
gaussian
(
shape
=
shape_tensor_int32
,
dtype
=
"float32"
,
mean
=
0.0
,
std
=
1.0
,
seed
=
10
,
)
out_5
=
random
.
gaussian
(
shape
=
shape_tensor_int64
,
dtype
=
"float32"
,
mean
=
0.0
,
std
=
1.0
,
seed
=
10
,
)
out_5
=
random
.
gaussian
(
shape
=
shape_tensor_int64
,
dtype
=
"float32"
,
mean
=
0.0
,
std
=
1.0
,
seed
=
10
,
)
out_6
=
random
.
gaussian
(
shape
=
shape_tensor_int64
,
dtype
=
np
.
float32
,
mean
=
0.0
,
std
=
1.0
,
seed
=
10
,
)
out_6
=
random
.
gaussian
(
shape
=
shape_tensor_int64
,
dtype
=
np
.
float32
,
mean
=
0.0
,
std
=
1.0
,
seed
=
10
,
)
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
res_1
,
res_2
,
res_3
,
res_4
,
res_5
,
res_6
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"shape_tensor_int32"
:
np
.
array
([
2000
,
500
]).
astype
(
"int32"
),
"shape_tensor_int64"
:
np
.
array
([
2000
,
500
]).
astype
(
"int64"
),
},
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_4
,
out_5
,
out_6
],
)
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
res_1
,
res_2
,
res_3
,
res_4
,
res_5
,
res_6
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"shape_tensor_int32"
:
np
.
array
([
2000
,
500
]).
astype
(
"int32"
),
"shape_tensor_int64"
:
np
.
array
([
2000
,
500
]).
astype
(
"int64"
),
},
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_4
,
out_5
,
out_6
],
)
self
.
assertAlmostEqual
(
np
.
mean
(
res_1
),
0.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
std
(
res_1
),
1.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
mean
(
res_2
),
0.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
std
(
res_2
),
1.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
mean
(
res_3
),
0.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
std
(
res_3
),
1.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
mean
(
res_4
),
0.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
std
(
res_5
),
1.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
mean
(
res_5
),
0.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
std
(
res_5
),
1.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
mean
(
res_6
),
0.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
std
(
res_6
),
1.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
mean
(
res_1
),
0.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
std
(
res_1
),
1.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
mean
(
res_2
),
0.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
std
(
res_2
),
1.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
mean
(
res_3
),
0.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
std
(
res_3
),
1.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
mean
(
res_4
),
0.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
std
(
res_5
),
1.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
mean
(
res_5
),
0.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
std
(
res_5
),
1.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
mean
(
res_6
),
0.0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
np
.
std
(
res_6
),
1.0
,
delta
=
0.1
)
def
test_default_dtype
(
self
):
paddle
.
disable_static
()
def
test_default_fp16
():
paddle
.
framework
.
set_default_dtype
(
'float16'
)
out
=
paddle
.
tensor
.
random
.
gaussian
([
2
,
3
])
...
...
@@ -311,13 +314,9 @@ class TestGaussianRandomAPI(unittest.TestCase):
test_default_fp64
()
test_default_fp32
()
paddle
.
enable_static
()
class
TestStandardNormalDtype
(
unittest
.
TestCase
):
def
test_default_dtype
(
self
):
paddle
.
disable_static
()
def
test_default_fp16
():
paddle
.
framework
.
set_default_dtype
(
'float16'
)
out
=
paddle
.
tensor
.
random
.
standard_normal
([
2
,
3
])
...
...
@@ -339,8 +338,6 @@ class TestStandardNormalDtype(unittest.TestCase):
test_default_fp64
()
test_default_fp32
()
paddle
.
enable_static
()
class
TestRandomValue
(
unittest
.
TestCase
):
def
test_fixed_random_number
(
self
):
...
...
@@ -402,7 +399,6 @@ class TestRandomValue(unittest.TestCase):
_check_random_value
(
core
.
VarDesc
.
VarType
.
FP32
,
expect
,
expect_mean
,
expect_std
)
paddle
.
enable_static
()
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_group_norm_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,12 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
convert_float_to_uint16
,
skip_check_grad_ci
from
eager_op_test
import
(
OpTest
,
convert_float_to_uint16
,
paddle_static_guard
,
skip_check_grad_ci
,
)
from
testsuite
import
create_op
import
paddle
...
...
@@ -42,28 +47,41 @@ def group_norm_naive(x, scale, bias, epsilon, groups, data_layout):
class
TestGroupNormOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
with
paddle_static_guard
():
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
def
test_x_type
():
input
=
np
.
random
.
random
(
2
,
100
,
3
,
5
).
astype
(
'float32'
)
groups
=
2
paddle
.
static
.
nn
.
group_norm
(
input
,
groups
)
def
test_x_type
():
input
=
np
.
random
.
random
(
2
,
100
,
3
,
5
).
astype
(
'float32'
)
groups
=
2
paddle
.
static
.
nn
.
group_norm
(
input
,
groups
)
self
.
assertRaises
(
TypeError
,
test_x_type
)
self
.
assertRaises
(
TypeError
,
test_x_type
)
def
test_x_dtype
():
x2
=
paddle
.
static
.
data
(
name
=
'x2'
,
shape
=
[
-
1
,
2
,
100
,
3
,
5
],
dtype
=
'int32'
)
groups
=
2
paddle
.
static
.
nn
.
group_norm
(
x2
,
groups
)
self
.
assertRaises
(
TypeError
,
test_x_dtype
)
def
test_x_dtype
():
x2
=
paddle
.
static
.
data
(
name
=
'x2'
,
shape
=
[
-
1
,
2
,
100
,
3
,
5
],
dtype
=
'int32'
)
groups
=
2
paddle
.
static
.
nn
.
group_norm
(
x2
,
groups
)
self
.
assertRaises
(
TypeError
,
test_x_dtype
)
def
group_norm_wrapper
(
input
,
weight
,
bias
,
epsilon
=
1e-5
,
num_groups
=
0
,
data_format
=
"NCHW"
):
if
data_format
==
"AnyLayout"
:
data_format
=
"NCDHW"
return
paddle
.
_C_ops
.
group_norm
(
input
,
weight
,
bias
,
epsilon
,
num_groups
,
data_format
)
class
TestGroupNormOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"group_norm"
self
.
python_api
=
group_norm_wrapper
self
.
python_out_sig
=
[
"Y"
]
self
.
data_format
=
"NCHW"
self
.
dtype
=
np
.
float64
self
.
shape
=
(
2
,
100
,
3
,
5
)
...
...
@@ -201,6 +219,8 @@ class TestGroupNormFP16OP(TestGroupNormOp):
class
TestGroupNormBF16Op
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"group_norm"
self
.
python_api
=
group_norm_wrapper
self
.
python_out_sig
=
[
"Y"
]
self
.
data_format
=
"NCHW"
self
.
dtype
=
np
.
uint16
self
.
shape
=
(
2
,
100
,
3
,
5
)
...
...
@@ -361,58 +381,68 @@ class TestGroupNormOpLargeData_With_NHWC(TestGroupNormOp):
class
TestGroupNormAPI_With_NHWC
(
unittest
.
TestCase
):
paddle
.
enable_static
()
def
test_case1
(
self
):
data1
=
paddle
.
static
.
data
(
name
=
'data1'
,
shape
=
[
None
,
3
,
3
,
4
],
dtype
=
'float64'
)
out1
=
paddle
.
static
.
nn
.
group_norm
(
input
=
data1
,
groups
=
2
,
data_layout
=
"NHWC"
)
data2
=
paddle
.
static
.
data
(
name
=
'data2'
,
shape
=
[
None
,
4
,
3
,
3
],
dtype
=
'float64'
)
out2
=
paddle
.
static
.
nn
.
group_norm
(
input
=
data2
,
groups
=
2
,
data_layout
=
"NCHW"
)
data1_np
=
np
.
random
.
random
((
2
,
3
,
3
,
4
)).
astype
(
"float64"
)
data2_np
=
np
.
random
.
random
((
2
,
4
,
3
,
3
)).
astype
(
"float64"
)
scale
=
np
.
array
([
1
]).
astype
(
"float64"
)
bias
=
np
.
array
([
0
]).
astype
(
"float64"
)
with
paddle_static_guard
():
data1
=
paddle
.
static
.
data
(
name
=
'data1'
,
shape
=
[
None
,
3
,
3
,
4
],
dtype
=
'float64'
)
out1
=
paddle
.
static
.
nn
.
group_norm
(
input
=
data1
,
groups
=
2
,
data_layout
=
"NHWC"
)
data2
=
paddle
.
static
.
data
(
name
=
'data2'
,
shape
=
[
None
,
4
,
3
,
3
],
dtype
=
'float64'
)
out2
=
paddle
.
static
.
nn
.
group_norm
(
input
=
data2
,
groups
=
2
,
data_layout
=
"NCHW"
)
place
=
core
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
results
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"data1"
:
data1_np
,
"data2"
:
data2_np
},
fetch_list
=
[
out1
,
out2
],
return_numpy
=
True
,
)
expect_res1
=
group_norm_naive
(
data1_np
,
scale
,
bias
,
epsilon
=
1e-5
,
groups
=
2
,
data_layout
=
"NHWC"
)
expect_res2
=
group_norm_naive
(
data2_np
,
scale
,
bias
,
epsilon
=
1e-5
,
groups
=
2
,
data_layout
=
"NCHW"
)
np
.
testing
.
assert_allclose
(
results
[
0
],
expect_res1
[
0
],
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
results
[
1
],
expect_res2
[
0
],
rtol
=
1e-05
)
data1_np
=
np
.
random
.
random
((
2
,
3
,
3
,
4
)).
astype
(
"float64"
)
data2_np
=
np
.
random
.
random
((
2
,
4
,
3
,
3
)).
astype
(
"float64"
)
scale
=
np
.
array
([
1
]).
astype
(
"float64"
)
bias
=
np
.
array
([
0
]).
astype
(
"float64"
)
place
=
core
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
results
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"data1"
:
data1_np
,
"data2"
:
data2_np
},
fetch_list
=
[
out1
,
out2
],
return_numpy
=
True
,
)
expect_res1
=
group_norm_naive
(
data1_np
,
scale
,
bias
,
epsilon
=
1e-5
,
groups
=
2
,
data_layout
=
"NHWC"
,
)
expect_res2
=
group_norm_naive
(
data2_np
,
scale
,
bias
,
epsilon
=
1e-5
,
groups
=
2
,
data_layout
=
"NCHW"
,
)
np
.
testing
.
assert_allclose
(
results
[
0
],
expect_res1
[
0
],
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
results
[
1
],
expect_res2
[
0
],
rtol
=
1e-05
)
class
TestGroupNormException
(
unittest
.
TestCase
):
# data_layout is not NHWC or NCHW
def
test_exception
(
self
):
data
=
paddle
.
static
.
data
(
name
=
'data'
,
shape
=
[
None
,
3
,
3
,
4
],
dtype
=
"float64"
)
def
attr_data_format
():
out
=
paddle
.
static
.
nn
.
group_norm
(
input
=
data
,
groups
=
2
,
data_layout
=
"NDHW"
with
paddle_static_guard
():
data
=
paddle
.
static
.
data
(
name
=
'data'
,
shape
=
[
None
,
3
,
3
,
4
],
dtype
=
"float64"
)
self
.
assertRaises
(
ValueError
,
attr_data_format
)
def
attr_data_format
():
out
=
paddle
.
static
.
nn
.
group_norm
(
input
=
data
,
groups
=
2
,
data_layout
=
"NDHW"
)
self
.
assertRaises
(
ValueError
,
attr_data_format
)
class
TestGroupNormEager
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/test_gru_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -16,7 +16,7 @@ import functools
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
paddle.fluid.tests.unittests.test_lstm_op
import
ACTIVATION
...
...
python/paddle/fluid/tests/unittests/test_gru_rnn_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -17,7 +17,7 @@ import sys
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
from
paddle.fluid
import
core
...
...
@@ -31,6 +31,38 @@ np.set_printoptions(threshold=np.inf)
paddle
.
enable_static
()
def
rnn_wrapper
(
Input
,
PreState
,
WeightList
=
None
,
SequenceLength
=
None
,
dropout_prob
=
0.0
,
is_bidirec
=
False
,
input_size
=
10
,
hidden_size
=
100
,
num_layers
=
1
,
mode
=
"LSTM"
,
seed
=
0
,
is_test
=
False
,
):
dropout_state_in
=
paddle
.
Tensor
()
return
paddle
.
_C_ops
.
rnn
(
Input
,
[
PreState
],
WeightList
,
SequenceLength
,
dropout_state_in
,
dropout_prob
,
is_bidirec
,
input_size
,
hidden_size
,
num_layers
,
mode
,
seed
,
is_test
,
)
class
TestGRUOp
(
OpTest
):
def
get_weight_names
(
self
):
weight_names
=
[]
...
...
@@ -44,6 +76,10 @@ class TestGRUOp(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"rnn"
self
.
python_api
=
rnn_wrapper
self
.
python_out_sig
=
[
"Out"
,
"DropoutState"
,
"State"
]
self
.
python_out_sig_sub_name
=
{
"State"
:
[
"last_hidden"
]}
self
.
dtype
=
"float32"
if
core
.
is_compiled_with_rocm
()
else
"float64"
self
.
sequence_length
=
(
None
...
...
python/paddle/fluid/tests/unittests/test_hash_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
class
TestHashOp
(
OpTest
):
...
...
python/paddle/fluid/tests/unittests/test_index_select_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
convert_float_to_uint16
from
eager_
op_test
import
OpTest
,
convert_float_to_uint16
import
paddle
from
paddle
import
fluid
...
...
@@ -60,10 +60,10 @@ class TestIndexSelectOp(OpTest):
self
.
index_size
=
100
def
test_check_output
(
self
):
self
.
check_output
(
check_
eager
=
True
,
check_
prim
=
True
)
self
.
check_output
(
check_prim
=
True
)
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
True
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestIndexSelectOpCase2
(
TestIndexSelectOp
):
...
...
@@ -132,10 +132,10 @@ class TestIndexSelectBF16Op(OpTest):
self
.
index_size
=
100
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestIndexSelectAPI
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/test_label_smooth_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
...
...
@@ -44,10 +44,10 @@ class TestLabelSmoothOp(OpTest):
self
.
dtype
=
np
.
float64
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
,
check_eager
=
True
)
self
.
check_grad
([
"X"
],
"Out"
)
class
TestLabelSmoothFP16OP
(
TestLabelSmoothOp
):
...
...
python/paddle/fluid/tests/unittests/test_log_softmax.py
浏览文件 @
6d0fa6f2
...
...
@@ -19,7 +19,10 @@ import numpy as np
import
paddle
import
paddle.nn.functional
as
F
from
paddle.fluid
import
core
from
paddle.fluid.tests.unittests.op_test
import
OpTest
,
convert_float_to_uint16
from
paddle.fluid.tests.unittests.eager_op_test
import
(
OpTest
,
convert_float_to_uint16
,
)
np
.
random
.
seed
(
10
)
...
...
python/paddle/fluid/tests/unittests/test_logcumsumexp_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -17,7 +17,7 @@ import unittest
from
typing
import
Optional
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
from
paddle
import
fluid
...
...
@@ -214,10 +214,17 @@ class TestLogcumsumexp(unittest.TestCase):
out
=
exe
.
run
(
feed
=
{
'X'
:
data_np
},
fetch_list
=
[
y
.
name
])
def
logcumsumexp_wrapper
(
x
,
axis
=-
1
,
flatten
=
False
,
exclusive
=
False
,
reverse
=
False
):
return
paddle
.
_C_ops
.
logcumsumexp
(
x
,
axis
,
flatten
,
exclusive
,
reverse
)
class
BaseTestCases
:
class
BaseOpTest
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"logcumsumexp"
self
.
python_api
=
logcumsumexp_wrapper
input
,
attrs
=
self
.
input_and_attrs
()
self
.
inputs
=
{
'X'
:
input
}
self
.
attrs
=
attrs
...
...
python/paddle/fluid/tests/unittests/test_logit_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -19,7 +19,7 @@ from eager_op_test import OpTest
import
paddle
from
paddle.fluid
import
core
from
paddle.fluid.tests.unittests.op_test
import
convert_float_to_uint16
from
paddle.fluid.tests.unittests.
eager_
op_test
import
convert_float_to_uint16
np
.
random
.
seed
(
10
)
...
...
python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -20,7 +20,7 @@ import paddle
from
paddle
import
enable_static
,
fluid
from
paddle.fluid
import
core
from
paddle.fluid.op
import
Operator
from
paddle.fluid.tests.unittests.op_test
import
(
from
paddle.fluid.tests.unittests.
eager_
op_test
import
(
OpTest
,
convert_float_to_uint16
,
convert_uint16_to_float
,
...
...
python/paddle/fluid/tests/unittests/test_lookup_table_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,12 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
check_out_dtype
,
skip_check_grad_ci
from
eager_op_test
import
(
OpTest
,
check_out_dtype
,
paddle_static_guard
,
skip_check_grad_ci
,
)
import
paddle
import
paddle.nn.functional
as
F
...
...
@@ -157,38 +162,43 @@ class TestLookupTableWithTensorIdsWIsSelectedRows(
class
TestEmbedOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
program_guard
(
Program
(),
Program
()):
input_data
=
np
.
random
.
randint
(
0
,
10
,
(
4
,
1
)).
astype
(
"int64"
)
def
test_Variable
():
# the input type must be Variable
fluid
.
layers
.
embedding
(
input
=
input_data
,
size
=
(
10
,
64
))
self
.
assertRaises
(
TypeError
,
test_Variable
)
def
test_input_dtype
():
# the input dtype must be int64
input
=
paddle
.
static
.
data
(
name
=
'x'
,
shape
=
[
4
,
1
],
dtype
=
'float32'
)
fluid
.
layers
.
embedding
(
input
=
input
,
size
=
(
10
,
64
))
self
.
assertRaises
(
TypeError
,
test_input_dtype
)
def
test_param_dtype
():
# dtype must be float32 or float64
input2
=
paddle
.
static
.
data
(
name
=
'x2'
,
shape
=
[
4
,
1
],
dtype
=
'int64'
with
paddle_static_guard
():
with
program_guard
(
Program
(),
Program
()):
input_data
=
np
.
random
.
randint
(
0
,
10
,
(
4
,
1
)).
astype
(
"int64"
)
def
test_Variable
():
# the input type must be Variable
fluid
.
layers
.
embedding
(
input
=
input_data
,
size
=
(
10
,
64
))
self
.
assertRaises
(
TypeError
,
test_Variable
)
def
test_input_dtype
():
# the input dtype must be int64
input
=
paddle
.
static
.
data
(
name
=
'x'
,
shape
=
[
4
,
1
],
dtype
=
'float32'
)
fluid
.
layers
.
embedding
(
input
=
input
,
size
=
(
10
,
64
))
self
.
assertRaises
(
TypeError
,
test_input_dtype
)
def
test_param_dtype
():
# dtype must be float32 or float64
input2
=
paddle
.
static
.
data
(
name
=
'x2'
,
shape
=
[
4
,
1
],
dtype
=
'int64'
)
fluid
.
layers
.
embedding
(
input
=
input2
,
size
=
(
10
,
64
),
dtype
=
'int64'
)
self
.
assertRaises
(
TypeError
,
test_param_dtype
)
input3
=
paddle
.
static
.
data
(
name
=
'x3'
,
shape
=
[
4
,
1
],
dtype
=
'int64'
)
fluid
.
layers
.
embedding
(
input
=
input
2
,
size
=
(
10
,
64
),
dtype
=
'int64
'
input
=
input
3
,
size
=
(
10
,
64
),
dtype
=
'float16
'
)
self
.
assertRaises
(
TypeError
,
test_param_dtype
)
input3
=
paddle
.
static
.
data
(
name
=
'x3'
,
shape
=
[
4
,
1
],
dtype
=
'int64'
)
fluid
.
layers
.
embedding
(
input
=
input3
,
size
=
(
10
,
64
),
dtype
=
'float16'
)
class
TestLookupTableOpInt8
(
OpTest
):
def
setUp
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -19,7 +19,7 @@ import numpy as np
import
paddle
from
paddle
import
fluid
from
paddle.fluid
import
core
from
paddle.fluid.tests.unittests.op_test
import
convert_uint16_to_float
from
paddle.fluid.tests.unittests.
eager_
op_test
import
convert_uint16_to_float
from
paddle.fluid.tests.unittests.test_lookup_table_bf16_op
import
(
TestLookupTableBF16Op
,
TestLookupTableBF16OpIds4D
,
...
...
@@ -32,6 +32,7 @@ from paddle.fluid.tests.unittests.test_lookup_table_bf16_op import (
class
TestLookupTableV2BF16Op
(
TestLookupTableBF16Op
):
def
init_test
(
self
):
self
.
op_type
=
"lookup_table_v2"
self
.
python_api
=
paddle
.
nn
.
functional
.
embedding
self
.
ids_shape
=
4
self
.
mkldnn_data_type
=
"bfloat16"
...
...
@@ -39,6 +40,7 @@ class TestLookupTableV2BF16Op(TestLookupTableBF16Op):
class
TestLookupTableV2BF16OpIds4D
(
TestLookupTableBF16OpIds4D
):
def
init_test
(
self
):
self
.
op_type
=
"lookup_table_v2"
self
.
python_api
=
paddle
.
nn
.
functional
.
embedding
self
.
ids_shape
=
(
2
,
4
,
5
)
self
.
mkldnn_data_type
=
"bfloat16"
...
...
@@ -48,6 +50,7 @@ class TestLookupTableV2BF16OpWIsSelectedRows(
):
def
init_test
(
self
):
self
.
op_type
=
"lookup_table_v2"
self
.
python_api
=
paddle
.
nn
.
functional
.
embedding
self
.
ids_shape
=
10
...
...
@@ -56,6 +59,7 @@ class TestLookupTableV2BF16OpWIsSelectedRows4DIds(
):
def
init_test
(
self
):
self
.
op_type
=
"lookup_table_v2"
self
.
python_api
=
paddle
.
nn
.
functional
.
embedding
self
.
ids_shape
=
(
3
,
4
,
5
)
...
...
@@ -88,6 +92,7 @@ class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase):
def
setUp
(
self
):
self
.
op_type
=
"lookup_table_v2"
self
.
python_api
=
paddle
.
nn
.
functional
.
embedding
self
.
ids_shape
=
[
4
]
self
.
w_shape
=
[
10
,
64
]
self
.
ids
=
np
.
random
.
randint
(
low
=
0
,
high
=
9
,
size
=
self
.
ids_shape
).
astype
(
...
...
python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -17,7 +17,7 @@ import random
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
from
paddle.fluid
import
core
...
...
@@ -524,7 +524,7 @@ class TestCUDNNLstmOp(OpTest):
else
:
paddle
.
enable_static
()
self
.
check_output_with_place
(
place
,
no_check_set
=
[
'Reserve'
,
'StateOut'
]
place
,
no_check_set
=
[
'Reserve'
,
'StateOut'
]
,
check_dygraph
=
False
)
paddle
.
disable_static
()
...
...
@@ -536,6 +536,7 @@ class TestCUDNNLstmOp(OpTest):
place
,
{
'Input'
,
var_name
,
'InitH'
,
'InitC'
},
[
'Out'
,
'LastH'
,
'LastC'
],
check_dygraph
=
False
,
)
...
...
python/paddle/fluid/tests/unittests/test_matmul_op_with_head.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
def
generate_compatible_shapes_mul_head
(
dim_X
,
dim_Y
,
transpose_X
,
transpose_Y
):
...
...
python/paddle/fluid/tests/unittests/test_matrix_rank_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -19,7 +19,7 @@ import numpy as np
import
paddle
from
paddle
import
fluid
from
paddle.fluid
import
core
from
paddle.fluid.tests.unittests.op_test
import
OpTest
from
paddle.fluid.tests.unittests.
eager_
op_test
import
OpTest
paddle
.
enable_static
()
SEED
=
2049
...
...
python/paddle/fluid/tests/unittests/test_momentum_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -16,7 +16,7 @@ import unittest
import
numpy
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
from
paddle
import
fluid
...
...
@@ -54,9 +54,38 @@ def calculate_momentum_by_numpy(
return
param_out
,
velocity_out
def
momentum_wrapper
(
param
,
grad
,
velocity
,
learning_rate
=
1.0
,
master_param
=
None
,
mu
=
0.0
,
use_nesterov
=
False
,
regularization_method
=
""
,
regularization_coeff
=
0.0
,
multi_precision
=
False
,
rescale_grad
=
1.0
,
):
return
paddle
.
_C_ops
.
momentum_
(
param
,
grad
,
velocity
,
learning_rate
,
master_param
,
mu
,
use_nesterov
,
regularization_method
,
regularization_coeff
,
multi_precision
,
rescale_grad
,
)
class
TestMomentumOp1
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"momentum"
self
.
python_api
=
momentum_wrapper
self
.
dtype
=
np
.
float32
self
.
init_dtype
()
...
...
@@ -107,6 +136,7 @@ class TestMomentumOp2(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"momentum"
self
.
python_api
=
momentum_wrapper
param
=
np
.
random
.
random
((
123
,
321
)).
astype
(
"float32"
)
grad
=
np
.
random
.
random
((
123
,
321
)).
astype
(
"float32"
)
...
...
@@ -221,7 +251,7 @@ class TestLarsMomentumOpWithMP(OpTest):
if
core
.
is_compiled_with_cuda
():
place
=
fluid
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
):
self
.
check_output_with_place
(
place
)
self
.
check_output_with_place
(
place
,
check_dygraph
=
False
)
def
config
(
self
):
self
.
params_num
=
1
...
...
@@ -561,6 +591,7 @@ class TestMomentumV2(unittest.TestCase):
class
TestMomentumOpWithDecay
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"momentum"
self
.
python_api
=
momentum_wrapper
self
.
dtype
=
np
.
float32
self
.
use_nesterov
=
True
self
.
regularization_method
=
'l2_decay'
...
...
python/paddle/fluid/tests/unittests/test_nce.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_op_test
import
OpTest
,
paddle_static_guard
import
paddle
from
paddle
import
fluid
...
...
@@ -186,156 +186,173 @@ class TestNCECase1SelectedRows(unittest.TestCase):
custom_dist
,
is_sparse
,
):
input
=
paddle
.
static
.
data
(
name
=
"input"
,
shape
=
[
-
1
,
10
],
dtype
=
"float32"
)
label
=
paddle
.
static
.
data
(
name
=
"label"
,
shape
=
[
-
1
,
1
],
dtype
=
"int64"
)
w_param
=
(
fluid
.
default_main_program
()
.
global_block
()
.
create_parameter
(
shape
=
[
num_total_classes
,
10
],
dtype
=
'float32'
,
name
=
'nce_w'
,
initializer
=
paddle
.
nn
.
initializer
.
Constant
(),
with
paddle_static_guard
():
input
=
paddle
.
static
.
data
(
name
=
"input"
,
shape
=
[
-
1
,
10
],
dtype
=
"float32"
)
)
b_param
=
(
fluid
.
default_main_program
()
.
global_block
()
.
create_parameter
(
shape
=
[
num_total_classes
,
1
],
dtype
=
'float32'
,
name
=
'nce_b'
,
initializer
=
paddle
.
nn
.
initializer
.
Constant
(),
label
=
paddle
.
static
.
data
(
name
=
"label"
,
shape
=
[
-
1
,
1
],
dtype
=
"int64"
)
)
cost
=
paddle
.
static
.
nn
.
nce
(
input
=
input
,
label
=
label
,
num_total_classes
=
num_total_classes
,
sampler
=
sampler
,
custom_dist
=
custom_dist
,
sample_weight
=
None
,
param_attr
=
'nce_w'
,
bias_attr
=
'nce_b'
,
seed
=
1
,
num_neg_samples
=
num_neg_samples
,
is_sparse
=
is_sparse
,
)
avg_cost
=
paddle
.
mean
(
cost
)
# optimizer
optimizer
=
self
.
get_optimizer
()
optimizer
.
minimize
(
avg_cost
)
w_param
=
(
fluid
.
default_main_program
()
.
global_block
()
.
create_parameter
(
shape
=
[
num_total_classes
,
10
],
dtype
=
'float32'
,
name
=
'nce_w'
,
initializer
=
paddle
.
nn
.
initializer
.
Constant
(),
)
)
b_param
=
(
fluid
.
default_main_program
()
.
global_block
()
.
create_parameter
(
shape
=
[
num_total_classes
,
1
],
dtype
=
'float32'
,
name
=
'nce_b'
,
initializer
=
paddle
.
nn
.
initializer
.
Constant
(),
)
)
return
[
avg_cost
,
[
input
,
label
]]
cost
=
paddle
.
static
.
nn
.
nce
(
input
=
input
,
label
=
label
,
num_total_classes
=
num_total_classes
,
sampler
=
sampler
,
custom_dist
=
custom_dist
,
sample_weight
=
None
,
param_attr
=
'nce_w'
,
bias_attr
=
'nce_b'
,
seed
=
1
,
num_neg_samples
=
num_neg_samples
,
is_sparse
=
is_sparse
,
)
avg_cost
=
paddle
.
mean
(
cost
)
# optimizer
optimizer
=
self
.
get_optimizer
()
optimizer
.
minimize
(
avg_cost
)
return
[
avg_cost
,
[
input
,
label
]]
def
test_input_is_selected_rows
(
self
):
place
=
self
.
get_place
()
exe
=
fluid
.
Executor
(
place
)
data
=
self
.
get_train_data
(
self
.
batch_size
)
nid_freq_arr
=
np
.
random
.
dirichlet
(
np
.
ones
(
20
)
*
1000
).
astype
(
'float32'
)
rets
=
[]
# for dense
dense_scope
=
fluid
.
core
.
Scope
()
dense_startup_program
=
fluid
.
framework
.
Program
()
dense_train_program
=
fluid
.
framework
.
Program
()
with
fluid
.
scope_guard
(
dense_scope
):
with
fluid
.
program_guard
(
dense_train_program
,
dense_startup_program
):
cost
,
feeds
=
self
.
train_network
(
20
,
5
,
"custom_dist"
,
nid_freq_arr
.
tolist
(),
False
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
feeds
,
place
=
place
)
exe
.
run
(
dense_startup_program
)
loss_val
=
exe
.
run
(
dense_train_program
,
feed
=
feeder
.
feed
(
data
),
fetch_list
=
[
cost
.
name
],
)
rets
.
append
(
np
.
mean
(
loss_val
))
# for sparse
sparse_scope
=
fluid
.
core
.
Scope
()
sparse_startup_program
=
fluid
.
framework
.
Program
()
sparse_train_program
=
fluid
.
framework
.
Program
()
with
fluid
.
scope_guard
(
sparse_scope
):
with
fluid
.
program_guard
(
sparse_train_program
,
sparse_startup_program
):
cost
,
feeds
=
self
.
train_network
(
20
,
5
,
"custom_dist"
,
nid_freq_arr
.
tolist
(),
True
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
feeds
,
place
=
place
)
exe
.
run
(
sparse_startup_program
)
loss_val
=
exe
.
run
(
sparse_train_program
,
feed
=
feeder
.
feed
(
data
),
fetch_list
=
[
cost
.
name
],
)
rets
.
append
(
np
.
mean
(
loss_val
))
with
paddle_static_guard
():
place
=
self
.
get_place
()
exe
=
fluid
.
Executor
(
place
)
self
.
assertEqual
(
rets
[
0
],
rets
[
1
])
data
=
self
.
get_train_data
(
self
.
batch_size
)
nid_freq_arr
=
np
.
random
.
dirichlet
(
np
.
ones
(
20
)
*
1000
).
astype
(
'float32'
)
rets
=
[]
# for dense
dense_scope
=
fluid
.
core
.
Scope
()
dense_startup_program
=
fluid
.
framework
.
Program
()
dense_train_program
=
fluid
.
framework
.
Program
()
with
fluid
.
scope_guard
(
dense_scope
):
with
fluid
.
program_guard
(
dense_train_program
,
dense_startup_program
):
cost
,
feeds
=
self
.
train_network
(
20
,
5
,
"custom_dist"
,
nid_freq_arr
.
tolist
(),
False
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
feeds
,
place
=
place
)
paddle
.
enable_static
()
exe
.
run
(
dense_startup_program
)
loss_val
=
exe
.
run
(
dense_train_program
,
feed
=
feeder
.
feed
(
data
),
fetch_list
=
[
cost
.
name
],
)
rets
.
append
(
np
.
mean
(
loss_val
))
# for sparse
sparse_scope
=
fluid
.
core
.
Scope
()
sparse_startup_program
=
fluid
.
framework
.
Program
()
sparse_train_program
=
fluid
.
framework
.
Program
()
with
fluid
.
scope_guard
(
sparse_scope
):
with
fluid
.
program_guard
(
sparse_train_program
,
sparse_startup_program
):
cost
,
feeds
=
self
.
train_network
(
20
,
5
,
"custom_dist"
,
nid_freq_arr
.
tolist
(),
True
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
feeds
,
place
=
place
)
paddle
.
enable_static
()
exe
.
run
(
sparse_startup_program
)
loss_val
=
exe
.
run
(
sparse_train_program
,
feed
=
feeder
.
feed
(
data
),
fetch_list
=
[
cost
.
name
],
)
rets
.
append
(
np
.
mean
(
loss_val
))
self
.
assertEqual
(
rets
[
0
],
rets
[
1
])
class
TestNCE_OpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
program_guard
(
Program
(),
Program
()):
input1
=
fluid
.
create_lod_tensor
(
np
.
array
([
0.0
,
3.0
,
2.0
,
4.0
]),
[[
1
,
1
,
2
]],
fluid
.
CPUPlace
()
)
label1
=
paddle
.
static
.
data
(
name
=
'label1'
,
shape
=
[
-
1
,
4
],
dtype
=
"int64"
)
# the input(input) of nce layer must be Variable.
self
.
assertRaises
(
TypeError
,
paddle
.
static
.
nn
.
nce
,
input1
,
label1
,
5
)
with
paddle_static_guard
():
with
program_guard
(
Program
(),
Program
()):
input1
=
fluid
.
create_lod_tensor
(
np
.
array
([
0.0
,
3.0
,
2.0
,
4.0
]),
[[
1
,
1
,
2
]],
fluid
.
CPUPlace
(),
)
label1
=
paddle
.
static
.
data
(
name
=
'label1'
,
shape
=
[
-
1
,
4
],
dtype
=
"int64"
)
# the input(input) of nce layer must be Variable.
self
.
assertRaises
(
TypeError
,
paddle
.
static
.
nn
.
nce
,
input1
,
label1
,
5
)
input2
=
paddle
.
static
.
data
(
name
=
'input2'
,
shape
=
[
-
1
,
4
],
dtype
=
"float32"
)
label2
=
fluid
.
create_lod_tensor
(
np
.
array
([
0.0
,
3.0
,
2.0
,
4.0
]),
[[
1
,
1
,
2
]],
fluid
.
CPUPlace
()
)
# the input(label) of nce layer must be Variable.
self
.
assertRaises
(
TypeError
,
paddle
.
static
.
nn
.
nce
,
input2
,
label2
,
5
)
input2
=
paddle
.
static
.
data
(
name
=
'input2'
,
shape
=
[
-
1
,
4
],
dtype
=
"float32"
)
label2
=
fluid
.
create_lod_tensor
(
np
.
array
([
0.0
,
3.0
,
2.0
,
4.0
]),
[[
1
,
1
,
2
]],
fluid
.
CPUPlace
(),
)
# the input(label) of nce layer must be Variable.
self
.
assertRaises
(
TypeError
,
paddle
.
static
.
nn
.
nce
,
input2
,
label2
,
5
)
input3
=
paddle
.
static
.
data
(
name
=
'input3'
,
shape
=
[
-
1
,
4
],
dtype
=
"float16"
)
label3
=
paddle
.
static
.
data
(
name
=
'label3'
,
shape
=
[
-
1
,
1
],
dtype
=
"int64"
)
# the data type of input(input) must be float32 or float64.
self
.
assertRaises
(
TypeError
,
paddle
.
static
.
nn
.
nce
,
input3
,
label3
,
5
)
input3
=
paddle
.
static
.
data
(
name
=
'input3'
,
shape
=
[
-
1
,
4
],
dtype
=
"float16"
)
label3
=
paddle
.
static
.
data
(
name
=
'label3'
,
shape
=
[
-
1
,
1
],
dtype
=
"int64"
)
# the data type of input(input) must be float32 or float64.
self
.
assertRaises
(
TypeError
,
paddle
.
static
.
nn
.
nce
,
input3
,
label3
,
5
)
input4
=
paddle
.
static
.
data
(
name
=
'input4'
,
shape
=
[
-
1
,
4
],
dtype
=
"float32"
)
label4
=
paddle
.
static
.
data
(
name
=
'label4'
,
shape
=
[
-
1
,
1
],
dtype
=
"int32"
)
# the data type of input(label) must be int64.
self
.
assertRaises
(
TypeError
,
paddle
.
static
.
nn
.
nce
,
input4
,
label4
,
5
)
input4
=
paddle
.
static
.
data
(
name
=
'input4'
,
shape
=
[
-
1
,
4
],
dtype
=
"float32"
)
label4
=
paddle
.
static
.
data
(
name
=
'label4'
,
shape
=
[
-
1
,
1
],
dtype
=
"int32"
)
# the data type of input(label) must be int64.
self
.
assertRaises
(
TypeError
,
paddle
.
static
.
nn
.
nce
,
input4
,
label4
,
5
)
input5
=
paddle
.
static
.
data
(
name
=
'x'
,
shape
=
[
1
],
dtype
=
'float32'
)
label5
=
paddle
.
static
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
input5
=
paddle
.
static
.
data
(
name
=
'x'
,
shape
=
[
1
],
dtype
=
'float32'
)
label5
=
paddle
.
static
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
self
.
assertRaises
(
ValueError
,
paddle
.
static
.
nn
.
nce
,
input5
,
label5
,
1
)
self
.
assertRaises
(
ValueError
,
paddle
.
static
.
nn
.
nce
,
input5
,
label5
,
1
)
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/test_nn_grad.py
浏览文件 @
6d0fa6f2
...
...
@@ -201,7 +201,7 @@ class TestExpandV2DoubleGradCheck(unittest.TestCase):
class
TestSqueezeDoubleGradCheck
(
unittest
.
TestCase
):
def
squeeze_w
ar
pper
(
self
,
x
):
def
squeeze_w
ra
pper
(
self
,
x
):
axes
=
[
0
,
2
]
return
paddle
.
squeeze
(
x
[
0
],
axes
)
...
...
@@ -221,7 +221,7 @@ class TestSqueezeDoubleGradCheck(unittest.TestCase):
[
x
],
out
,
x_init
=
x_arr
,
place
=
place
,
eps
=
eps
)
gradient_checker
.
double_grad_check_for_dygraph
(
self
.
squeeze_w
ar
pper
,
[
x
],
out
,
x_init
=
x_arr
,
place
=
place
self
.
squeeze_w
ra
pper
,
[
x
],
out
,
x_init
=
x_arr
,
place
=
place
)
def
test_grad
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_norm_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,8 +15,7 @@
import
unittest
import
numpy
as
np
from
eager_op_test
import
OpTest
,
skip_check_grad_ci
from
op_test
import
convert_float_to_uint16
from
eager_op_test
import
OpTest
,
convert_float_to_uint16
,
skip_check_grad_ci
import
paddle
from
paddle
import
fluid
...
...
python/paddle/fluid/tests/unittests/test_number_count_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -14,8 +14,8 @@
import
unittest
import
eager_op_test
import
numpy
as
np
import
op_test
import
paddle
from
paddle.distributed.models.moe
import
utils
...
...
@@ -33,7 +33,7 @@ def count(x, upper_num):
@
unittest
.
skipIf
(
not
core
.
is_compiled_with_cuda
(),
"core is not compiled with CUDA"
)
class
TestNumberCountOpInt64
(
op_test
.
OpTest
):
class
TestNumberCountOpInt64
(
eager_
op_test
.
OpTest
):
def
setUp
(
self
):
upper_num
=
16
self
.
op_type
=
"number_count"
...
...
python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
import
paddle.nn.functional
as
F
...
...
@@ -68,6 +68,12 @@ def pixel_unshuffle_np(x, down_factor, data_format="NCHW"):
return
npresult
def
pixel_unshuffle_wrapper
(
x
,
downscale_factor
,
data_format
):
return
paddle
.
_legacy_C_ops
.
pixel_unshuffle
(
x
,
"downscale_factor"
,
downscale_factor
,
"data_format"
,
data_format
)
class
TestPixelUnshuffleOp
(
OpTest
):
'''TestPixelUnshuffleOp'''
...
...
@@ -75,6 +81,7 @@ class TestPixelUnshuffleOp(OpTest):
'''setUp'''
self
.
op_type
=
"pixel_unshuffle"
self
.
python_api
=
pixel_unshuffle_wrapper
self
.
init_data_format
()
n
,
c
,
h
,
w
=
2
,
1
,
12
,
12
...
...
python/paddle/fluid/tests/unittests/test_pool2d_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -16,8 +16,9 @@ import unittest
import
numpy
as
np
import
paddle
from
paddle.fluid
import
core
from
paddle.fluid.tests.unittests.op_test
import
OpTest
from
paddle.fluid.tests.unittests.
eager_
op_test
import
OpTest
def
adaptive_start_index
(
index
,
input_size
,
output_size
):
...
...
@@ -285,6 +286,67 @@ def pool2D_forward_naive(
return
out
def
pool2d_wrapper_not_use_cudnn
(
X
,
ksize
=
[],
strides
=
[],
paddings
=
[],
ceil_mode
=
False
,
exclusive
=
True
,
data_format
=
"NCDHW"
,
pooling_type
=
"max"
,
global_pooling
=
False
,
adaptive
=
False
,
padding_algorithm
=
"EXPLICIT"
,
):
tmp
=
X
.
_use_gpudnn
(
False
)
if
data_format
==
"AnyLayout"
:
data_format
=
"NCDHW"
return
paddle
.
_C_ops
.
pool2d
(
tmp
,
ksize
,
strides
,
paddings
,
ceil_mode
,
exclusive
,
data_format
,
pooling_type
,
global_pooling
,
adaptive
,
padding_algorithm
,
)
def
pool2d_wrapper_use_cudnn
(
X
,
ksize
=
[],
strides
=
[],
paddings
=
[],
ceil_mode
=
False
,
exclusive
=
True
,
data_format
=
"NCDHW"
,
pooling_type
=
"max"
,
global_pooling
=
False
,
adaptive
=
False
,
padding_algorithm
=
"EXPLICIT"
,
):
if
data_format
==
"AnyLayout"
:
data_format
=
"NCDHW"
return
paddle
.
_C_ops
.
pool2d
(
X
,
ksize
,
strides
,
paddings
,
ceil_mode
,
exclusive
,
data_format
,
pooling_type
,
global_pooling
,
adaptive
,
padding_algorithm
,
)
class
TestPool2D_Op_Mixin
:
def
setUp
(
self
):
self
.
op_type
=
"pool2d"
...
...
@@ -337,6 +399,11 @@ class TestPool2D_Op_Mixin:
self
.
outputs
=
{
'Out'
:
output
}
if
self
.
use_cudnn
:
self
.
python_api
=
pool2d_wrapper_use_cudnn
else
:
self
.
python_api
=
pool2d_wrapper_not_use_cudnn
def
has_cudnn
(
self
):
return
core
.
is_compiled_with_cuda
()
and
self
.
use_cudnn
...
...
python/paddle/fluid/tests/unittests/test_pool3d_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
from
paddle.fluid
import
core
...
...
@@ -271,6 +271,67 @@ def avg_pool3D_forward_naive(
return
out
def
pool3d_wrapper_not_use_cudnn
(
X
,
ksize
=
[],
strides
=
[],
paddings
=
[],
ceil_mode
=
False
,
exclusive
=
True
,
data_format
=
"NCDHW"
,
pooling_type
=
"max"
,
global_pooling
=
False
,
adaptive
=
False
,
padding_algorithm
=
"EXPLICIT"
,
):
tmp
=
X
.
_use_gpudnn
(
False
)
if
data_format
==
"AnyLayout"
:
data_format
=
"NCDHW"
return
paddle
.
_C_ops
.
pool3d
(
tmp
,
ksize
,
strides
,
paddings
,
ceil_mode
,
exclusive
,
data_format
,
pooling_type
,
global_pooling
,
adaptive
,
padding_algorithm
,
)
def
pool3d_wrapper_use_cudnn
(
X
,
ksize
=
[],
strides
=
[],
paddings
=
[],
ceil_mode
=
False
,
exclusive
=
True
,
data_format
=
"NCDHW"
,
pooling_type
=
"max"
,
global_pooling
=
False
,
adaptive
=
False
,
padding_algorithm
=
"EXPLICIT"
,
):
if
data_format
==
"AnyLayout"
:
data_format
=
"NCDHW"
return
paddle
.
_C_ops
.
pool3d
(
X
,
ksize
,
strides
,
paddings
,
ceil_mode
,
exclusive
,
data_format
,
pooling_type
,
global_pooling
,
adaptive
,
padding_algorithm
,
)
class
TestPool3D_Op
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"pool3d"
...
...
@@ -322,6 +383,11 @@ class TestPool3D_Op(OpTest):
self
.
outputs
=
{
'Out'
:
output
}
if
self
.
use_cudnn
:
self
.
python_api
=
pool3d_wrapper_use_cudnn
else
:
self
.
python_api
=
pool3d_wrapper_not_use_cudnn
def
has_cudnn
(
self
):
return
core
.
is_compiled_with_cuda
()
and
self
.
use_cudnn
...
...
python/paddle/fluid/tests/unittests/test_pool_max_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,9 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_op_test
import
OpTest
import
paddle
def
adaptive_start_index
(
index
,
input_size
,
output_size
):
...
...
@@ -129,6 +131,19 @@ def max_pool2D_forward_naive(
return
out
,
mask
def
max_pool3d_with_index_wapper
(
x
,
kernel_size
=
[],
strides
=
[],
paddings
=
[],
global_pooling
=
False
,
adaptive
=
False
,
):
return
paddle
.
_C_ops
.
max_pool3d_with_index
(
x
,
kernel_size
,
strides
,
paddings
,
global_pooling
,
adaptive
)
class
TestMaxPoolWithIndex_Op
(
OpTest
):
def
setUp
(
self
):
self
.
init_test_case
()
...
...
@@ -167,6 +182,7 @@ class TestMaxPoolWithIndex_Op(OpTest):
def
init_test_case
(
self
):
self
.
op_type
=
"max_pool3d_with_index"
self
.
python_api
=
max_pool3d_with_index_wapper
self
.
pool_forward_naive
=
max_pool3D_forward_naive
self
.
shape
=
[
2
,
3
,
7
,
7
,
7
]
self
.
ksize
=
[
3
,
3
,
3
]
...
...
@@ -188,6 +204,7 @@ class TestCase1(TestMaxPoolWithIndex_Op):
class
TestCase2
(
TestMaxPoolWithIndex_Op
):
def
init_test_case
(
self
):
self
.
op_type
=
"max_pool3d_with_index"
self
.
python_api
=
max_pool3d_with_index_wapper
self
.
pool_forward_naive
=
max_pool3D_forward_naive
self
.
shape
=
[
2
,
3
,
7
,
7
,
7
]
self
.
ksize
=
[
3
,
3
,
3
]
...
...
@@ -204,9 +221,25 @@ class TestCase3(TestCase2):
# ----------------max_pool2d_with_index----------------
def
max_pool2d_with_index_wapper
(
x
,
kernel_size
=
[],
strides
=
[],
paddings
=
[],
global_pooling
=
False
,
adaptive
=
False
,
):
return
paddle
.
_C_ops
.
max_pool2d_with_index
(
x
,
kernel_size
,
strides
,
paddings
,
global_pooling
,
adaptive
)
class
TestCase4
(
TestMaxPoolWithIndex_Op
):
def
init_test_case
(
self
):
self
.
op_type
=
"max_pool2d_with_index"
self
.
python_api
=
max_pool2d_with_index_wapper
self
.
pool_forward_naive
=
max_pool2D_forward_naive
self
.
shape
=
[
2
,
3
,
7
,
7
]
self
.
ksize
=
[
3
,
3
]
...
...
@@ -225,6 +258,7 @@ class TestCase5(TestCase4):
class
TestCase6
(
TestMaxPoolWithIndex_Op
):
def
init_test_case
(
self
):
self
.
op_type
=
"max_pool2d_with_index"
self
.
python_api
=
max_pool2d_with_index_wapper
self
.
pool_forward_naive
=
max_pool2D_forward_naive
self
.
shape
=
[
2
,
3
,
7
,
7
]
self
.
ksize
=
[
3
,
3
]
...
...
python/paddle/fluid/tests/unittests/test_precision_recall_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
def
calc_precision
(
tp_count
,
fp_count
):
...
...
python/paddle/fluid/tests/unittests/test_prelu_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
skip_check_grad_ci
from
eager_
op_test
import
OpTest
,
skip_check_grad_ci
import
paddle
import
paddle.nn.functional
as
F
...
...
@@ -162,16 +162,14 @@ class TestNNPReluAPI(unittest.TestCase):
paddle
.
enable_static
()
def
prelu_api_wrapper
(
x
,
weight
,
data_format
=
"NCHW"
):
weight
=
weight
.
reshape
([
-
1
])
return
paddle
.
nn
.
functional
.
prelu
(
x
,
weight
,
data_format
,
name
=
None
)
def
prelu_api_wrapper
(
x
,
alpha
,
data_format
=
"NCHW"
,
mode
=
"all"
):
return
paddle
.
_C_ops
.
prelu
(
x
,
alpha
,
data_format
,
mode
)
class
PReluTest
(
OpTest
):
def
setUp
(
self
):
self
.
init_dtype
()
self
.
init_input_shape
()
self
.
eager_mode
=
True
self
.
init_attr
()
self
.
op_type
=
"prelu"
self
.
python_api
=
prelu_api_wrapper
...
...
@@ -192,8 +190,6 @@ class PReluTest(OpTest):
alpha_np
=
np
.
random
.
uniform
(
-
1
,
-
0.5
,
[
1
,
1
,
1
,
self
.
x_shape
[
-
1
]])
else
:
alpha_np
=
np
.
random
.
uniform
(
-
1
,
-
0.5
,
[
1
]
+
self
.
x_shape
[
1
:])
# eager check don't support mode = 'all'
self
.
eager_mode
=
False
alpha_np
=
alpha_np
.
astype
(
self
.
dtype
)
self
.
inputs
=
{
'X'
:
x_np
,
'Alpha'
:
alpha_np
}
...
...
@@ -226,10 +222,10 @@ class PReluTest(OpTest):
self
.
attrs
=
{
'mode'
:
"channel"
,
"data_format"
:
"NCHW"
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
self
.
eager_mode
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
,
'Alpha'
],
'Out'
,
check_eager
=
self
.
eager_mode
)
self
.
check_grad
([
'X'
,
'Alpha'
],
'Out'
)
@
skip_check_grad_ci
(
...
...
@@ -392,9 +388,7 @@ def create_test_fp16_class(
if
core
.
is_compiled_with_cuda
():
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
):
self
.
check_output_with_place
(
place
,
atol
=
atol
,
check_eager
=
self
.
eager_mode
)
self
.
check_output_with_place
(
place
,
atol
=
atol
)
def
test_check_grad
(
self
):
place
=
core
.
CUDAPlace
(
0
)
...
...
@@ -404,7 +398,6 @@ def create_test_fp16_class(
[
'X'
,
'Alpha'
],
'Out'
,
max_relative_error
=
max_relative_error
,
check_eager
=
self
.
eager_mode
,
)
cls_name
=
"{0}_{1}"
.
format
(
parent
.
__name__
,
"Fp16Op"
)
...
...
python/paddle/fluid/tests/unittests/test_rank_attention_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -16,7 +16,7 @@ import random
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
paddle.fluid
import
core
...
...
python/paddle/fluid/tests/unittests/test_rank_loss_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
class
TestRankLossOp
(
OpTest
):
...
...
python/paddle/fluid/tests/unittests/test_reduce_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
convert_float_to_uint16
,
skip_check_grad_ci
from
eager_
op_test
import
OpTest
,
convert_float_to_uint16
,
skip_check_grad_ci
import
paddle
from
paddle
import
fluid
...
...
@@ -35,10 +35,10 @@ class TestSumOp(OpTest):
self
.
enable_cinn
=
True
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
True
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestSumOpFp32
(
OpTest
):
...
...
@@ -58,7 +58,7 @@ class TestSumOpFp32(OpTest):
self
.
enable_cinn
=
True
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
calc_gradient
(
self
):
x
=
self
.
inputs
[
"X"
]
...
...
@@ -70,7 +70,6 @@ class TestSumOpFp32(OpTest):
[
'X'
],
'Out'
,
user_defined_grads
=
self
.
gradient
,
check_eager
=
True
,
check_prim
=
True
,
)
...
...
@@ -89,10 +88,10 @@ class TestSumOp_ZeroDim(OpTest):
self
.
enable_cinn
=
False
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
@
unittest
.
skipIf
(
...
...
@@ -118,7 +117,7 @@ class TestSumOp_bf16(OpTest):
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_eager
=
True
,
atol
=
0.1
)
self
.
check_output_with_place
(
place
,
atol
=
0.1
)
def
test_check_grad
(
self
):
place
=
core
.
CUDAPlace
(
0
)
...
...
@@ -127,7 +126,6 @@ class TestSumOp_bf16(OpTest):
[
'X'
],
'Out'
,
user_defined_grads
=
self
.
gradient
,
check_eager
=
True
,
check_prim
=
True
,
)
...
...
@@ -156,7 +154,7 @@ class TestSumOp_fp16_withInt(OpTest):
self
.
enable_cinn
=
True
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
calc_gradient
(
self
):
x
=
self
.
inputs
[
"X"
]
...
...
@@ -168,7 +166,6 @@ class TestSumOp_fp16_withInt(OpTest):
[
'X'
],
'Out'
,
user_defined_grads
=
self
.
gradient
,
check_eager
=
True
,
check_prim
=
True
,
)
...
...
@@ -188,10 +185,10 @@ class TestSumOp5D(OpTest):
self
.
enable_cinn
=
True
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
True
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestSumOp6D
(
OpTest
):
...
...
@@ -207,10 +204,10 @@ class TestSumOp6D(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
(
axis
=
0
)}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
True
,
check_
prim
=
True
)
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestSumOp8D
(
OpTest
):
...
...
@@ -227,7 +224,7 @@ class TestSumOp8D(OpTest):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
@
skip_check_grad_ci
(
...
...
@@ -249,14 +246,13 @@ class TestMaxOp(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
# only composite op support gradient check of reduce_max
self
.
check_grad
(
[
'X'
],
'Out'
,
check_eager
=
True
,
check_prim
=
True
,
only_check_prim
=
True
,
)
...
...
@@ -266,7 +262,7 @@ class TestMaxOp(OpTest):
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
6
,
10
)).
astype
(
"float16"
)}
place
=
core
.
CUDAPlace
(
0
)
with
self
.
assertRaises
(
RuntimeError
)
as
cm
:
self
.
check_output_with_place
(
place
,
check_eager
=
True
)
self
.
check_output_with_place
(
place
)
error_msg
=
str
(
cm
.
exception
).
split
(
"
\n
"
)[
-
2
].
strip
().
split
(
"."
)[
0
]
self
.
assertEqual
(
error_msg
,
...
...
@@ -290,14 +286,13 @@ class TestMaxOp_ZeroDim(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
# only composite op support gradient check of reduce_max
self
.
check_grad
(
[
'X'
],
'Out'
,
check_eager
=
True
,
check_prim
=
True
,
only_check_prim
=
True
,
)
...
...
@@ -320,14 +315,13 @@ class TestMaxOp_FP32(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
# only composite op support gradient check of reduce_max
self
.
check_grad
(
[
'X'
],
'Out'
,
check_eager
=
True
,
check_prim
=
True
,
only_check_prim
=
True
,
)
...
...
@@ -350,7 +344,7 @@ class TestMinOp(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestMinOp_ZeroDim
(
OpTest
):
...
...
@@ -366,7 +360,7 @@ class TestMinOp_ZeroDim(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestMin6DOp
(
OpTest
):
...
...
@@ -384,7 +378,7 @@ class TestMin6DOp(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestMin8DOp
(
OpTest
):
...
...
@@ -402,7 +396,7 @@ class TestMin8DOp(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
raw_reduce_prod
(
x
,
dim
=
[
0
],
keep_dim
=
False
):
...
...
@@ -423,10 +417,10 @@ class TestProdOp(OpTest):
)
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestProdOp_ZeroDim
(
OpTest
):
...
...
@@ -438,10 +432,10 @@ class TestProdOp_ZeroDim(OpTest):
self
.
attrs
=
{
'dim'
:
[],
'reduce_all'
:
True
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestProd6DOp
(
OpTest
):
...
...
@@ -463,10 +457,10 @@ class TestProd6DOp(OpTest):
)
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestProd8DOp
(
OpTest
):
...
...
@@ -490,10 +484,10 @@ class TestProd8DOp(OpTest):
)
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestAllOp
(
OpTest
):
...
...
@@ -505,7 +499,7 @@ class TestAllOp(OpTest):
self
.
attrs
=
{
'reduce_all'
:
True
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAllOp_ZeroDim
(
OpTest
):
...
...
@@ -517,7 +511,7 @@ class TestAllOp_ZeroDim(OpTest):
self
.
attrs
=
{
'dim'
:
[],
'reduce_all'
:
True
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAll8DOp
(
OpTest
):
...
...
@@ -533,7 +527,7 @@ class TestAll8DOp(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
all
(
axis
=
self
.
attrs
[
'dim'
])}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAllOpWithDim
(
OpTest
):
...
...
@@ -545,7 +539,7 @@ class TestAllOpWithDim(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
all
(
axis
=
self
.
attrs
[
'dim'
])}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAll8DOpWithDim
(
OpTest
):
...
...
@@ -561,7 +555,7 @@ class TestAll8DOpWithDim(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
all
(
axis
=
self
.
attrs
[
'dim'
])}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAllOpWithKeepDim
(
OpTest
):
...
...
@@ -575,7 +569,7 @@ class TestAllOpWithKeepDim(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAll8DOpWithKeepDim
(
OpTest
):
...
...
@@ -595,7 +589,7 @@ class TestAll8DOpWithKeepDim(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAllOpError
(
unittest
.
TestCase
):
...
...
@@ -620,7 +614,7 @@ class TestAnyOp(OpTest):
self
.
attrs
=
{
'reduce_all'
:
True
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAnyOp_ZeroDim
(
OpTest
):
...
...
@@ -632,7 +626,7 @@ class TestAnyOp_ZeroDim(OpTest):
self
.
attrs
=
{
'dim'
:
[],
'reduce_all'
:
True
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAny8DOp
(
OpTest
):
...
...
@@ -648,7 +642,7 @@ class TestAny8DOp(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
any
(
axis
=
self
.
attrs
[
'dim'
])}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAnyOpWithDim
(
OpTest
):
...
...
@@ -660,7 +654,7 @@ class TestAnyOpWithDim(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
any
(
axis
=
1
)}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAny8DOpWithDim
(
OpTest
):
...
...
@@ -676,7 +670,7 @@ class TestAny8DOpWithDim(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
any
(
axis
=
self
.
attrs
[
'dim'
])}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAnyOpWithKeepDim
(
OpTest
):
...
...
@@ -692,7 +686,7 @@ class TestAnyOpWithKeepDim(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAny8DOpWithKeepDim
(
OpTest
):
...
...
@@ -712,7 +706,7 @@ class TestAny8DOpWithKeepDim(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestAnyOpError
(
unittest
.
TestCase
):
...
...
@@ -821,9 +815,14 @@ class Test3DReduce3(Test1DReduce):
}
def
reduce_sum_wrapper2
(
x
,
axis
=
[
0
],
dtype
=
None
,
keepdim
=
False
):
return
paddle
.
_C_ops
.
sum
(
x
,
axis
,
dtype
,
keepdim
)
class
Test8DReduce0
(
Test1DReduce
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
python_api
=
reduce_sum_wrapper2
self
.
attrs
=
{
'dim'
:
(
4
,
2
,
3
)}
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
2
,
5
,
3
,
2
,
2
,
3
,
4
,
2
)).
astype
(
"float64"
)
...
...
@@ -854,9 +853,26 @@ class TestKeepDimReduce(Test1DReduce):
}
class
TestKeepDimReduceForEager
(
Test1DReduce
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
python_api
=
reduce_sum_wrapper2
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
6
,
10
)).
astype
(
"float64"
)}
self
.
attrs
=
{
'dim'
:
[
1
],
'keep_dim'
:
True
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]),
keepdims
=
self
.
attrs
[
'keep_dim'
]
)
}
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
class
TestKeepDim8DReduce
(
Test1DReduce
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
python_api
=
reduce_sum_wrapper2
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
2
,
5
,
3
,
2
,
2
,
3
,
4
,
2
)).
astype
(
"float64"
)
}
...
...
@@ -893,14 +909,13 @@ class TestReduceMaxOpMultiAxises(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
# only composite op support gradient check of reduce_max
self
.
check_grad
(
[
'X'
],
'Out'
,
check_eager
=
True
,
check_prim
=
True
,
only_check_prim
=
True
,
)
...
...
@@ -923,7 +938,7 @@ class TestReduceMinOpMultiAxises(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
class
TestKeepDimReduceSumMultiAxises
(
OpTest
):
...
...
@@ -947,6 +962,25 @@ class TestKeepDimReduceSumMultiAxises(OpTest):
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestKeepDimReduceSumMultiAxisesForEager
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
python_api
=
reduce_sum_wrapper2
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
6
,
10
)).
astype
(
"float64"
)}
self
.
attrs
=
{
'dim'
:
[
-
2
,
-
1
],
'keep_dim'
:
True
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]),
keepdims
=
True
)
}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
class
TestReduceSumWithDimOne
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
...
...
@@ -969,6 +1003,26 @@ class TestReduceSumWithDimOne(OpTest):
self
.
check_grad
([
'X'
],
'Out'
,
check_prim
=
True
)
class
TestReduceSumWithDimOneForEager
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
python_api
=
reduce_sum_wrapper2
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
100
,
1
,
1
)).
astype
(
"float64"
)}
self
.
attrs
=
{
'dim'
:
[
1
,
2
],
'keep_dim'
:
True
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]),
keepdims
=
True
)
}
self
.
enable_cinn
=
True
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
class
TestReduceSumWithNumelOne
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
...
...
python/paddle/fluid/tests/unittests/test_rnn_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -31,7 +31,7 @@ np.set_printoptions(threshold=np.inf)
paddle
.
enable_static
()
def
rnn_w
ar
pper
(
def
rnn_w
ra
pper
(
Input
,
PreState
,
WeightList
=
None
,
...
...
@@ -76,7 +76,7 @@ class TestRNNOp(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"rnn"
self
.
python_api
=
rnn_w
ar
pper
self
.
python_api
=
rnn_w
ra
pper
self
.
python_out_sig
=
[
"Out"
,
"DropoutState"
,
"State"
]
self
.
python_out_sig_sub_name
=
{
"State"
:
[
"last_hidden"
,
"last_cell"
]}
self
.
dtype
=
np
.
float32
if
core
.
is_compiled_with_rocm
()
else
np
.
float64
...
...
python/paddle/fluid/tests/unittests/test_roll_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -106,10 +106,10 @@ class TestRollBF16OP(TestRollOp):
self
.
place
=
core
.
CUDAPlace
(
0
)
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
,
check_eager
=
True
)
self
.
check_output_with_place
(
self
.
place
)
def
test_check_grad_normal
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
],
'Out'
)
@
unittest
.
skipIf
(
...
...
@@ -126,10 +126,10 @@ class TestRollBF16OpCase2(TestRollOp):
self
.
place
=
core
.
CUDAPlace
(
0
)
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
,
check_eager
=
True
)
self
.
check_output_with_place
(
self
.
place
)
def
test_check_grad_normal
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
],
'Out'
)
@
unittest
.
skipIf
(
...
...
@@ -146,10 +146,10 @@ class TestRollBF16OpCase3(TestRollOp):
self
.
place
=
core
.
CUDAPlace
(
0
)
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
,
check_eager
=
True
)
self
.
check_output_with_place
(
self
.
place
)
def
test_check_grad_normal
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
],
'Out'
)
class
TestRollAPI
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/test_scale_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -17,7 +17,7 @@ import unittest
import
gradient_checker
import
numpy
as
np
from
decorator_helper
import
prog_scope
from
op_test
import
OpTest
,
convert_float_to_uint16
from
eager_
op_test
import
OpTest
,
convert_float_to_uint16
import
paddle
from
paddle
import
fluid
...
...
@@ -42,10 +42,10 @@ class TestScaleOp(OpTest):
pass
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestScaleOpScaleVariable
(
OpTest
):
...
...
@@ -66,10 +66,10 @@ class TestScaleOpScaleVariable(OpTest):
pass
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestScaleOpSelectedRows
(
unittest
.
TestCase
):
...
...
@@ -148,10 +148,10 @@ class TestScaleFp16Op(TestScaleOp):
self
.
dtype
=
np
.
float16
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
,
check_eager
=
True
)
self
.
check_grad
([
"X"
],
"Out"
)
class
TestScaleBF16Op
(
OpTest
):
...
...
@@ -166,14 +166,13 @@ class TestScaleBF16Op(OpTest):
self
.
outputs
=
{
'Out'
:
convert_float_to_uint16
(
out
)}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
[
'X'
],
'Out'
,
numeric_grad_delta
=
0.8
,
check_eager
=
True
,
)
...
...
python/paddle/fluid/tests/unittests/test_sgd_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
from
paddle
import
fluid
...
...
@@ -25,9 +25,19 @@ from paddle.fluid.op import Operator
paddle
.
enable_static
()
def
sgd_wrapper
(
param
,
learning_rate
,
grad
,
master_param
=
None
,
multi_precision
=
False
):
paddle
.
_C_ops
.
sgd_
(
param
,
learning_rate
,
grad
,
master_param
,
multi_precision
)
class
TestSGDOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"sgd"
self
.
python_api
=
sgd_wrapper
self
.
python_out_sig
=
[
'Out'
]
self
.
conf
()
w
=
np
.
random
.
random
((
self
.
h
,
self
.
w
)).
astype
(
"float32"
)
g
=
np
.
random
.
random
((
self
.
h
,
self
.
w
)).
astype
(
"float32"
)
...
...
python/paddle/fluid/tests/unittests/test_sgd_op_bf16.py
浏览文件 @
6d0fa6f2
...
...
@@ -21,7 +21,7 @@ import paddle
from
paddle
import
fluid
from
paddle.fluid
import
core
from
paddle.fluid.op
import
Operator
from
paddle.fluid.tests.unittests.op_test
import
(
from
paddle.fluid.tests.unittests.
eager_
op_test
import
(
OpTest
,
OpTestTool
,
convert_float_to_uint16
,
...
...
python/paddle/fluid/tests/unittests/test_simple_rnn_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -31,7 +31,7 @@ np.set_printoptions(threshold=np.inf)
paddle
.
enable_static
()
def
rnn_w
ar
pper
(
def
rnn_w
ra
pper
(
Input
,
PreState
,
WeightList
=
None
,
...
...
@@ -76,7 +76,7 @@ class TestSimpleRNNOp(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"rnn"
self
.
python_api
=
rnn_w
ar
pper
self
.
python_api
=
rnn_w
ra
pper
self
.
python_out_sig
=
[
"Out"
,
"DropoutState"
,
"State"
]
self
.
python_out_sig_sub_name
=
{
"State"
:
[
"last_hidden"
]}
...
...
python/paddle/fluid/tests/unittests/test_slice_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -17,7 +17,7 @@ import unittest
import
gradient_checker
import
numpy
as
np
from
decorator_helper
import
prog_scope
from
op_test
import
OpTest
,
convert_float_to_uint16
from
eager_op_test
import
OpTest
,
convert_float_to_uint16
,
paddle_static_guard
import
paddle
from
paddle
import
fluid
...
...
@@ -27,6 +27,19 @@ from paddle.tensor.manipulation import tensor_array_to_tensor
paddle
.
enable_static
()
def
slice_wrapper
(
Input
,
axes
=
[],
StartsTensor
=
None
,
EndsTensor
=
None
,
infer_flags
=
[],
decrease_axis
=
[],
):
return
paddle
.
_C_ops
.
slice
(
Input
,
axes
,
StartsTensor
,
EndsTensor
,
infer_flags
,
decrease_axis
)
# Situation 1: starts(list, no tensor), ends(list, no tensor)
# 1.1 without attr(decrease)
class
TestSliceOp
(
OpTest
):
...
...
@@ -148,73 +161,12 @@ class TestSliceOp_decs_dim(OpTest):
)
class
TestSliceOp_decs_dim_2
(
TestSliceOp_decs_dim
):
def
config
(
self
):
self
.
enable_cinn
=
True
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
1
,
0
,
2
]
self
.
ends
=
[
2
,
1
,
4
]
self
.
axes
=
[
0
,
1
,
2
]
self
.
decrease_axis
=
[
0
,
1
]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[
1
,
0
,
2
:
4
,
:]
class
TestSliceOp_decs_dim_3
(
TestSliceOp_decs_dim
):
def
config
(
self
):
self
.
enable_cinn
=
True
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
-
1
,
0
,
2
]
self
.
ends
=
[
1000000
,
1
,
4
]
self
.
axes
=
[
0
,
1
,
2
]
self
.
decrease_axis
=
[
0
,
1
]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[
-
1
,
0
,
2
:
4
,
:]
class
TestSliceOp_decs_dim_4
(
TestSliceOp_decs_dim
):
def
config
(
self
):
self
.
enable_cinn
=
True
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
7
]).
astype
(
"float64"
)
self
.
starts
=
[
0
,
1
,
2
,
3
]
self
.
ends
=
[
1
,
2
,
3
,
4
]
self
.
axes
=
[
0
,
1
,
2
,
3
]
self
.
decrease_axis
=
[
0
,
1
,
2
,
3
]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[
0
,
1
,
2
,
3
:
4
]
class
TestSliceOp_decs_dim_5
(
TestSliceOp_decs_dim
):
def
config
(
self
):
self
.
enable_cinn
=
True
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
-
1
]
self
.
ends
=
[
1000000
]
self
.
axes
=
[
3
]
self
.
decrease_axis
=
[
3
]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[:,
:,
:,
-
1
]
# test_6 with test_2 with test_3
class
TestSliceOp_decs_dim_6
(
TestSliceOp_decs_dim
):
def
config
(
self
):
self
.
enable_cinn
=
True
self
.
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
self
.
starts
=
[
0
,
1
,
2
,
3
]
self
.
ends
=
[
1
,
2
,
3
,
4
]
self
.
axes
=
[
0
,
1
,
2
,
3
]
self
.
decrease_axis
=
[
0
,
1
,
2
,
3
]
self
.
infer_flags
=
[
1
,
1
,
1
]
self
.
out
=
self
.
input
[
0
,
1
,
2
,
3
:
4
]
# Situation 2: starts(list, have tensor), ends(list, no tensor)
# without attr(decrease)
class
TestSliceOp_starts_ListTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
python_api
=
paddle
.
slice
self
.
python_api
=
slice_wrapper
self
.
config
()
starts_tensor
=
[]
...
...
@@ -254,7 +206,7 @@ class TestSliceOp_starts_ListTensor(OpTest):
class
TestSliceOp_decs_dim_starts_ListTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
python_api
=
paddle
.
slice
self
.
python_api
=
slice_wrapper
self
.
config
()
starts_tensor
=
[]
...
...
@@ -312,7 +264,7 @@ class TestSliceOp_decs_dim_5_starts_ListTensor(
class
TestSliceOp_decs_dim_starts_OneTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
python_api
=
paddle
.
slice
self
.
python_api
=
slice_wrapper
self
.
config
()
self
.
inputs
=
{
'Input'
:
self
.
input
,
...
...
@@ -348,7 +300,7 @@ class TestSliceOp_decs_dim_starts_OneTensor(OpTest):
class
TestSliceOp_starts_OneTensor_ends_OneTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
python_api
=
paddle
.
slice
self
.
python_api
=
slice_wrapper
self
.
config
()
self
.
inputs
=
{
...
...
@@ -384,7 +336,7 @@ class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest):
class
TestSliceOp_decs_dim_starts_and_ends_OneTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
python_api
=
paddle
.
slice
self
.
python_api
=
slice_wrapper
self
.
config
()
self
.
inputs
=
{
'Input'
:
self
.
input
,
...
...
@@ -421,7 +373,7 @@ class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest):
class
TestSliceOp_starts_OneTensor_ends_ListTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"slice"
self
.
python_api
=
paddle
.
slice
self
.
python_api
=
slice_wrapper
self
.
config
()
ends_tensor
=
[]
...
...
@@ -587,57 +539,64 @@ class TestBF16(OpTest):
# Test python API
class
TestSliceAPI
(
unittest
.
TestCase
):
def
test_1
(
self
):
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
minus_1
=
paddle
.
tensor
.
fill_constant
([
1
],
"int32"
,
-
1
)
minus_3
=
paddle
.
tensor
.
fill_constant
([
1
],
"int64"
,
-
3
)
starts
=
paddle
.
static
.
data
(
name
=
'starts'
,
shape
=
[
1
,
3
],
dtype
=
"float32"
)
starts
.
desc
.
set_need_check_feed
(
False
)
ends
=
paddle
.
static
.
data
(
name
=
'ends'
,
shape
=
[
3
],
dtype
=
"float32"
)
ends
.
desc
.
set_need_check_feed
(
False
)
x
=
paddle
.
static
.
data
(
name
=
"x"
,
shape
=
[
3
,
4
,
5
,
6
],
dtype
=
"float64"
,
)
with
paddle_static_guard
():
input
=
np
.
random
.
random
([
3
,
4
,
5
,
6
]).
astype
(
"float64"
)
minus_1
=
paddle
.
tensor
.
fill_constant
([
1
],
"int32"
,
-
1
)
minus_3
=
paddle
.
tensor
.
fill_constant
([
1
],
"int64"
,
-
3
)
starts
=
paddle
.
static
.
data
(
name
=
'starts'
,
shape
=
[
1
,
3
],
dtype
=
"float32"
)
starts
.
desc
.
set_need_check_feed
(
False
)
ends
=
paddle
.
static
.
data
(
name
=
'ends'
,
shape
=
[
3
],
dtype
=
"float32"
)
ends
.
desc
.
set_need_check_feed
(
False
)
x
=
paddle
.
static
.
data
(
name
=
"x"
,
shape
=
[
3
,
4
,
5
,
6
],
dtype
=
"float64"
,
)
# value_int64 is greater than 2147483647 which is the max of int32
value_int64
=
paddle
.
tensor
.
fill_constant
([
1
],
"int64"
,
2147483648
)
# value_int64 is greater than 2147483647 which is the max of int32
value_int64
=
paddle
.
tensor
.
fill_constant
([
1
],
"int64"
,
2147483648
)
out_1
=
paddle
.
slice
(
x
,
axes
=
[
0
,
1
,
2
],
starts
=
[
-
3
,
0
,
2
],
ends
=
[
value_int64
,
100
,
-
1
]
)
out_2
=
paddle
.
slice
(
x
,
axes
=
[
0
,
1
,
3
],
starts
=
[
minus_3
,
0
,
2
],
ends
=
[
3
,
100
,
-
1
]
)
out_3
=
paddle
.
slice
(
x
,
axes
=
[
0
,
1
,
3
],
starts
=
[
minus_3
,
0
,
2
],
ends
=
[
3
,
100
,
minus_1
]
)
out_4
=
paddle
.
slice
(
x
,
axes
=
[
0
,
1
,
2
],
starts
=
starts
,
ends
=
ends
)
out_5
=
x
[
-
3
:
3
,
0
:
100
,
2
:
-
1
]
out_6
=
x
[
minus_3
:
3
,
0
:
100
,
:,
2
:
-
1
]
out_7
=
x
[
minus_1
,
0
:
100
,
:,
2
:
minus_1
]
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
res_1
,
res_2
,
res_3
,
res_4
,
res_5
,
res_6
,
res_7
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
input
,
'starts'
:
np
.
array
([
-
3
,
0
,
2
]).
astype
(
"int32"
),
'ends'
:
np
.
array
([
3
,
100
,
-
1
]).
astype
(
"int32"
),
},
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_4
,
out_5
,
out_6
,
out_7
],
)
out_1
=
paddle
.
slice
(
x
,
axes
=
[
0
,
1
,
2
],
starts
=
[
-
3
,
0
,
2
],
ends
=
[
value_int64
,
100
,
-
1
],
)
out_2
=
paddle
.
slice
(
x
,
axes
=
[
0
,
1
,
3
],
starts
=
[
minus_3
,
0
,
2
],
ends
=
[
3
,
100
,
-
1
]
)
out_3
=
paddle
.
slice
(
x
,
axes
=
[
0
,
1
,
3
],
starts
=
[
minus_3
,
0
,
2
],
ends
=
[
3
,
100
,
minus_1
],
)
out_4
=
paddle
.
slice
(
x
,
axes
=
[
0
,
1
,
2
],
starts
=
starts
,
ends
=
ends
)
out_5
=
x
[
-
3
:
3
,
0
:
100
,
2
:
-
1
]
out_6
=
x
[
minus_3
:
3
,
0
:
100
,
:,
2
:
-
1
]
out_7
=
x
[
minus_1
,
0
:
100
,
:,
2
:
minus_1
]
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
res_1
,
res_2
,
res_3
,
res_4
,
res_5
,
res_6
,
res_7
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
input
,
'starts'
:
np
.
array
([
-
3
,
0
,
2
]).
astype
(
"int32"
),
'ends'
:
np
.
array
([
3
,
100
,
-
1
]).
astype
(
"int32"
),
},
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_4
,
out_5
,
out_6
,
out_7
],
)
assert
np
.
array_equal
(
res_1
,
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:])
assert
np
.
array_equal
(
res_2
,
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
])
assert
np
.
array_equal
(
res_3
,
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
])
assert
np
.
array_equal
(
res_4
,
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:])
assert
np
.
array_equal
(
res_5
,
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:])
assert
np
.
array_equal
(
res_6
,
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
])
assert
np
.
array_equal
(
res_7
,
input
[
-
1
,
0
:
100
,
:,
2
:
-
1
])
assert
np
.
array_equal
(
res_1
,
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:])
assert
np
.
array_equal
(
res_2
,
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
])
assert
np
.
array_equal
(
res_3
,
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
])
assert
np
.
array_equal
(
res_4
,
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:])
assert
np
.
array_equal
(
res_5
,
input
[
-
3
:
3
,
0
:
100
,
2
:
-
1
,
:])
assert
np
.
array_equal
(
res_6
,
input
[
-
3
:
3
,
0
:
100
,
:,
2
:
-
1
])
assert
np
.
array_equal
(
res_7
,
input
[
-
1
,
0
:
100
,
:,
2
:
-
1
])
class
TestSliceApiWithTensor
(
unittest
.
TestCase
):
...
...
@@ -718,60 +677,61 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase):
self
.
exe
=
fluid
.
Executor
(
self
.
place
)
def
set_program_and_run
(
self
,
main_program
,
case_num
):
with
fluid
.
program_guard
(
main_program
):
x
=
[
paddle
.
static
.
data
(
name
=
'x0'
,
shape
=
self
.
shape
,
dtype
=
"float32"
),
paddle
.
static
.
data
(
name
=
'x1'
,
shape
=
self
.
shape
,
dtype
=
"float32"
),
paddle
.
static
.
data
(
name
=
'x2'
,
shape
=
self
.
shape
,
dtype
=
"float32"
),
]
for
each_x
in
x
:
each_x
.
stop_gradient
=
False
arr
=
paddle
.
tensor
.
create_array
(
dtype
=
"float32"
)
for
i
in
range
(
3
):
idx
=
paddle
.
tensor
.
array_length
(
arr
)
arr
=
paddle
.
tensor
.
array_write
(
x
=
x
[
i
],
i
=
idx
,
array
=
arr
)
if
case_num
==
1
:
self
.
sliced_arr
=
output
=
arr
[
0
]
elif
case_num
==
2
:
end
=
(
paddle
.
tensor
.
array_length
(
arr
)
-
1
)
# dtype of end is int64
self
.
sliced_arr
=
slice_arr
=
arr
[
self
.
start
:
end
]
output
,
_
=
tensor_array_to_tensor
(
slice_arr
,
axis
=
self
.
axis
,
use_stack
=
True
)
elif
case_num
==
3
:
value_int64
=
paddle
.
tensor
.
fill_constant
(
[
1
],
"int64"
,
2147483648
)
self
.
sliced_arr
=
slice_arr
=
arr
[
self
.
start
:
value_int64
]
output
,
_
=
tensor_array_to_tensor
(
slice_arr
,
axis
=
self
.
axis
,
use_stack
=
True
with
paddle_static_guard
():
with
fluid
.
program_guard
(
main_program
):
x
=
[
paddle
.
static
.
data
(
name
=
'x0'
,
shape
=
self
.
shape
,
dtype
=
"float32"
),
paddle
.
static
.
data
(
name
=
'x1'
,
shape
=
self
.
shape
,
dtype
=
"float32"
),
paddle
.
static
.
data
(
name
=
'x2'
,
shape
=
self
.
shape
,
dtype
=
"float32"
),
]
for
each_x
in
x
:
each_x
.
stop_gradient
=
False
arr
=
paddle
.
tensor
.
create_array
(
dtype
=
"float32"
)
for
i
in
range
(
3
):
idx
=
paddle
.
tensor
.
array_length
(
arr
)
arr
=
paddle
.
tensor
.
array_write
(
x
=
x
[
i
],
i
=
idx
,
array
=
arr
)
if
case_num
==
1
:
self
.
sliced_arr
=
output
=
arr
[
0
]
elif
case_num
==
2
:
end
=
(
paddle
.
tensor
.
array_length
(
arr
)
-
1
)
# dtype of end is int64
self
.
sliced_arr
=
slice_arr
=
arr
[
self
.
start
:
end
]
output
,
_
=
tensor_array_to_tensor
(
slice_arr
,
axis
=
self
.
axis
,
use_stack
=
True
)
elif
case_num
==
3
:
value_int64
=
paddle
.
tensor
.
fill_constant
(
[
1
],
"int64"
,
2147483648
)
self
.
sliced_arr
=
slice_arr
=
arr
[
self
.
start
:
value_int64
]
output
,
_
=
tensor_array_to_tensor
(
slice_arr
,
axis
=
self
.
axis
,
use_stack
=
True
)
loss
=
paddle
.
sum
(
output
)
fluid
.
backward
.
append_backward
(
loss
)
g_vars
=
list
(
map
(
main_program
.
global_block
().
var
,
[
each_x
.
name
+
"@GRAD"
for
each_x
in
x
],
)
)
loss
=
paddle
.
sum
(
output
)
fluid
.
backward
.
append_backward
(
loss
)
g_vars
=
list
(
map
(
main_program
.
global_block
().
var
,
[
each_x
.
name
+
"@GRAD"
for
each_x
in
x
],
self
.
out
,
self
.
g_x0
,
self
.
g_x1
,
self
.
g_x2
=
self
.
exe
.
run
(
main_program
,
feed
=
{
'x0'
:
self
.
data
,
'x1'
:
self
.
data
,
'x2'
:
self
.
data
},
fetch_list
=
[
output
]
+
g_vars
,
)
)
self
.
out
,
self
.
g_x0
,
self
.
g_x1
,
self
.
g_x2
=
self
.
exe
.
run
(
main_program
,
feed
=
{
'x0'
:
self
.
data
,
'x1'
:
self
.
data
,
'x2'
:
self
.
data
},
fetch_list
=
[
output
]
+
g_vars
,
)
def
test_case_1
(
self
):
main_program
=
fluid
.
Program
()
...
...
@@ -785,35 +745,37 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase):
np
.
testing
.
assert_array_equal
(
self
.
g_x2
,
np
.
zeros_like
(
self
.
data
))
def
test_case_2
(
self
):
main_program
=
fluid
.
Program
()
self
.
set_program_and_run
(
main_program
,
2
)
with
paddle_static_guard
():
main_program
=
fluid
.
Program
()
self
.
set_program_and_run
(
main_program
,
2
)
self
.
assertTrue
(
self
.
sliced_arr
.
type
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
)
self
.
assertEqual
(
self
.
sliced_arr
.
shape
,
self
.
shape
)
np
.
testing
.
assert_array_equal
(
self
.
out
,
np
.
stack
([
self
.
data
,
self
.
data
],
axis
=
self
.
axis
)
)
np
.
testing
.
assert_array_equal
(
self
.
g_x0
,
np
.
ones_like
(
self
.
data
))
np
.
testing
.
assert_array_equal
(
self
.
g_x1
,
np
.
ones_like
(
self
.
data
))
np
.
testing
.
assert_array_equal
(
self
.
g_x2
,
np
.
zeros_like
(
self
.
data
))
self
.
assertTrue
(
self
.
sliced_arr
.
type
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
)
self
.
assertEqual
(
self
.
sliced_arr
.
shape
,
self
.
shape
)
np
.
testing
.
assert_array_equal
(
self
.
out
,
np
.
stack
([
self
.
data
,
self
.
data
],
axis
=
self
.
axis
)
)
np
.
testing
.
assert_array_equal
(
self
.
g_x0
,
np
.
ones_like
(
self
.
data
))
np
.
testing
.
assert_array_equal
(
self
.
g_x1
,
np
.
ones_like
(
self
.
data
))
np
.
testing
.
assert_array_equal
(
self
.
g_x2
,
np
.
zeros_like
(
self
.
data
))
def
test_case_3
(
self
):
main_program
=
fluid
.
Program
()
self
.
set_program_and_run
(
main_program
,
3
)
with
paddle_static_guard
():
main_program
=
fluid
.
Program
()
self
.
set_program_and_run
(
main_program
,
3
)
self
.
assertTrue
(
self
.
sliced_arr
.
type
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
)
self
.
assertEqual
(
self
.
sliced_arr
.
shape
,
self
.
shape
)
np
.
testing
.
assert_array_equal
(
self
.
out
,
np
.
stack
([
self
.
data
,
self
.
data
,
self
.
data
],
axis
=
self
.
axis
),
)
np
.
testing
.
assert_array_equal
(
self
.
g_x0
,
np
.
ones_like
(
self
.
data
))
np
.
testing
.
assert_array_equal
(
self
.
g_x1
,
np
.
ones_like
(
self
.
data
))
np
.
testing
.
assert_array_equal
(
self
.
g_x2
,
np
.
ones_like
(
self
.
data
))
self
.
assertTrue
(
self
.
sliced_arr
.
type
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
)
self
.
assertEqual
(
self
.
sliced_arr
.
shape
,
self
.
shape
)
np
.
testing
.
assert_array_equal
(
self
.
out
,
np
.
stack
([
self
.
data
,
self
.
data
,
self
.
data
],
axis
=
self
.
axis
),
)
np
.
testing
.
assert_array_equal
(
self
.
g_x0
,
np
.
ones_like
(
self
.
data
))
np
.
testing
.
assert_array_equal
(
self
.
g_x1
,
np
.
ones_like
(
self
.
data
))
np
.
testing
.
assert_array_equal
(
self
.
g_x2
,
np
.
ones_like
(
self
.
data
))
class
TestImperativeVarBaseGetItem
(
unittest
.
TestCase
):
...
...
@@ -964,12 +926,12 @@ class TestSliceDoubleGradCheck(unittest.TestCase):
)
def
test_grad
(
self
):
paddle
.
enable_static
()
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
places
.
append
(
fluid
.
CUDAPlace
(
0
))
for
p
in
places
:
self
.
func
(
p
)
with
paddle_static_guard
():
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
places
.
append
(
fluid
.
CUDAPlace
(
0
))
for
p
in
places
:
self
.
func
(
p
)
class
TestSliceTripleGradCheck
(
unittest
.
TestCase
):
...
...
@@ -999,12 +961,12 @@ class TestSliceTripleGradCheck(unittest.TestCase):
)
def
test_grad
(
self
):
paddle
.
enable_static
()
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
places
.
append
(
fluid
.
CUDAPlace
(
0
))
for
p
in
places
:
self
.
func
(
p
)
with
paddle_static_guard
():
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
places
.
append
(
fluid
.
CUDAPlace
(
0
))
for
p
in
places
:
self
.
func
(
p
)
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
from
paddle
import
fluid
,
incubate
...
...
@@ -42,6 +42,7 @@ def _get_softmax(x, mask, fp16=True):
class
TestSoftmaxMaskFuseOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"fused_softmax_mask"
self
.
python_api
=
paddle
.
incubate
.
softmax_mask_fuse
x
=
np
.
random
.
random
((
1
,
1
,
8
,
32
))
mask
=
np
.
random
.
randint
(
0
,
2
,
(
1
,
1
,
8
,
32
))
mask_input
=
np
.
where
(
mask
==
1
,
-
10000.0
,
mask
)
...
...
@@ -68,6 +69,7 @@ class TestSoftmaxMaskFuseOp(OpTest):
class
TestSoftmaxMaskFuseOp0
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"fused_softmax_mask"
self
.
python_api
=
paddle
.
incubate
.
softmax_mask_fuse
x
=
np
.
random
.
random
((
1
,
1
,
8
,
32
)).
astype
(
"float16"
)
mask
=
np
.
random
.
randint
(
0
,
2
,
(
1
,
1
,
8
,
32
)).
astype
(
"float16"
)
mask_input
=
np
.
where
(
mask
==
1
,
-
10000.0
,
mask
)
...
...
python/paddle/fluid/tests/unittests/test_stft_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,8 +15,8 @@
import
unittest
import
numpy
as
np
from
eager_op_test
import
OpTest
from
numpy.lib.stride_tricks
import
as_strided
from
op_test
import
OpTest
import
paddle
...
...
@@ -80,12 +80,12 @@ class TestStftOp(OpTest):
def
test_check_output
(
self
):
paddle
.
enable_static
()
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
paddle
.
disable_static
()
def
test_check_grad_normal
(
self
):
paddle
.
enable_static
()
self
.
check_grad
([
'X'
],
'Out'
,
check_
eager
=
Tru
e
)
self
.
check_grad
([
'X'
],
'Out'
,
check_
dygraph
=
Fals
e
)
paddle
.
disable_static
()
...
...
python/paddle/fluid/tests/unittests/test_strided_slice_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
import
paddle
from
paddle
import
fluid
...
...
@@ -96,10 +96,10 @@ class TestStrideSliceOp(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
({
'Input'
},
'Out'
,
check_eager
=
True
)
self
.
check_grad
({
'Input'
},
'Out'
)
def
initTestCase
(
self
):
self
.
input
=
np
.
random
.
rand
(
100
)
...
...
@@ -318,6 +318,7 @@ class TestStrideSliceOpBool6D(TestStrideSliceOpBool):
class
TestStridedSliceOp_starts_ListTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"strided_slice"
self
.
python_api
=
paddle
.
strided_slice
self
.
config
()
starts_tensor
=
[]
...
...
@@ -359,6 +360,7 @@ class TestStridedSliceOp_starts_ListTensor(OpTest):
class
TestStridedSliceOp_ends_ListTensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"strided_slice"
self
.
python_api
=
paddle
.
strided_slice
self
.
config
()
ends_tensor
=
[]
...
...
@@ -400,6 +402,7 @@ class TestStridedSliceOp_ends_ListTensor(OpTest):
class
TestStridedSliceOp_starts_Tensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"strided_slice"
self
.
python_api
=
paddle
.
strided_slice
self
.
config
()
self
.
inputs
=
{
'Input'
:
self
.
input
,
...
...
@@ -435,6 +438,7 @@ class TestStridedSliceOp_starts_Tensor(OpTest):
class
TestStridedSliceOp_ends_Tensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"strided_slice"
self
.
python_api
=
paddle
.
strided_slice
self
.
config
()
self
.
inputs
=
{
'Input'
:
self
.
input
,
...
...
@@ -476,6 +480,7 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest):
(
"x"
+
str
(
index
),
np
.
ones
((
1
)).
astype
(
'int32'
)
*
ele
)
)
self
.
op_type
=
"strided_slice"
self
.
python_api
=
paddle
.
strided_slice
self
.
inputs
=
{
'Input'
:
self
.
input
,
...
...
@@ -512,6 +517,7 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest):
class
TestStridedSliceOp_strides_Tensor
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"strided_slice"
self
.
python_api
=
paddle
.
strided_slice
self
.
config
()
self
.
inputs
=
{
'Input'
:
self
.
input
,
...
...
@@ -551,7 +557,7 @@ class TestStridedSliceAPI(unittest.TestCase):
minus_1
=
paddle
.
tensor
.
fill_constant
([
1
],
"int32"
,
-
1
)
minus_3
=
paddle
.
tensor
.
fill_constant
([
1
],
"int32"
,
-
3
)
starts
=
paddle
.
static
.
data
(
name
=
'starts'
,
shape
=
[
3
],
dtype
=
'int32'
)
ends
=
paddle
.
static
.
data
(
name
=
'ends'
,
shape
=
[
3
],
dtype
=
'int
32
'
)
ends
=
paddle
.
static
.
data
(
name
=
'ends'
,
shape
=
[
3
],
dtype
=
'int
64
'
)
strides
=
paddle
.
static
.
data
(
name
=
'strides'
,
shape
=
[
3
],
dtype
=
'int32'
)
x
=
paddle
.
static
.
data
(
...
...
@@ -971,6 +977,7 @@ class TestStridedSliceTensorArray(unittest.TestCase):
class
TestStridedSliceFloat16
(
unittest
.
TestCase
):
def
init_test_case
(
self
):
self
.
op_type
=
'strided_slice'
self
.
python_api
=
paddle
.
strided_slice
self
.
input_shape
=
[
3
,
3
,
3
,
6
,
7
,
8
]
self
.
axes
=
[
0
,
1
,
2
,
3
,
4
,
5
]
self
.
starts
=
[
1
,
0
,
0
,
0
,
1
,
2
]
...
...
python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -24,7 +24,7 @@ import unittest
import
numpy
as
np
from
decorator_helper
import
prog_scope
from
op_test
import
OpTest
,
_set_use_system_allocator
from
eager_
op_test
import
OpTest
,
_set_use_system_allocator
import
paddle
from
paddle
import
fluid
,
nn
...
...
python/paddle/fluid/tests/unittests/test_temporal_shift_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -41,12 +41,16 @@ def temporal_shift(x, seg_num, shift_ratio, data_format):
return
out
def
wrapper_temporal_shift
(
x
,
seg_num
,
shift_ratio
=
0.25
,
data_format
=
"NCHW"
):
return
paddle
.
_C_ops
.
temporal_shift
(
x
,
seg_num
,
shift_ratio
,
data_format
)
class
TestTemporalShift
(
OpTest
):
def
setUp
(
self
):
self
.
initTestCase
()
self
.
init_dtype
()
self
.
op_type
=
'temporal_shift'
self
.
python_api
=
paddle
.
nn
.
functional
.
temporal_shift
self
.
python_api
=
wrapper_
temporal_shift
x
=
np
.
random
.
random
(
self
.
x_shape
).
astype
(
self
.
dtype
)
self
.
attrs
=
{
...
...
@@ -198,7 +202,7 @@ class TestTemporalShiftBF16(OpTest):
def
setUp
(
self
):
self
.
initTestCase
()
self
.
op_type
=
'temporal_shift'
self
.
python_api
=
paddle
.
nn
.
functional
.
temporal_shift
self
.
python_api
=
wrapper_
temporal_shift
x
=
np
.
random
.
random
(
self
.
x_shape
).
astype
(
np
.
float32
)
...
...
python/paddle/fluid/tests/unittests/test_transfer_dtype_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,11 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
convert_float_to_uint16
,
convert_uint16_to_float
from
eager_op_test
import
(
OpTest
,
convert_float_to_uint16
,
convert_uint16_to_float
,
)
import
paddle
from
paddle.fluid
import
core
...
...
@@ -33,7 +37,7 @@ class TestTransferDtypeOpFp32ToFp64(OpTest):
self
.
op_type
=
'transfer_dtype'
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
class
TestTransferDtypeOpFp16ToFp32
(
OpTest
):
...
...
@@ -48,7 +52,7 @@ class TestTransferDtypeOpFp16ToFp32(OpTest):
self
.
op_type
=
'transfer_dtype'
def
test_check_output
(
self
):
self
.
check_output
(
atol
=
1e-3
)
self
.
check_output
(
atol
=
1e-3
,
check_dygraph
=
False
)
class
TestTransferDtypeOpFp32ToFp16
(
OpTest
):
...
...
@@ -63,7 +67,7 @@ class TestTransferDtypeOpFp32ToFp16(OpTest):
self
.
op_type
=
'transfer_dtype'
def
test_check_output
(
self
):
self
.
check_output
(
atol
=
1e-3
)
self
.
check_output
(
atol
=
1e-3
,
check_dygraph
=
False
)
class
TestTransferDtypeOpBf16ToFp32
(
OpTest
):
...
...
@@ -78,7 +82,7 @@ class TestTransferDtypeOpBf16ToFp32(OpTest):
self
.
op_type
=
'transfer_dtype'
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
class
TestTransferDtypeFp32ToBf16
(
OpTest
):
...
...
@@ -93,7 +97,7 @@ class TestTransferDtypeFp32ToBf16(OpTest):
self
.
op_type
=
'transfer_dtype'
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_dygraph
=
False
)
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/test_unique.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_op_test
import
OpTest
,
paddle_static_guard
import
paddle
from
paddle.fluid
import
core
...
...
@@ -27,9 +27,9 @@ class TestUniqueOp(OpTest):
self
.
init_config
()
def
test_check_output
(
self
):
paddle
.
enable_static
()
self
.
check_output
()
paddle
.
disable_static
()
self
.
check_output
(
check_dygraph
=
False
)
# unique return sorted data in dygraph
def
init_config
(
self
):
self
.
inputs
=
{
...
...
@@ -73,19 +73,20 @@ class TestRandom(TestUniqueOp):
class
TestUniqueRaiseError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
paddle
.
enable_static
()
with
paddle_static_guard
():
def
test_type
():
paddle
.
unique
([
10
])
def
test_type
():
paddle
.
unique
([
10
])
self
.
assertRaises
(
TypeError
,
test_type
)
self
.
assertRaises
(
TypeError
,
test_type
)
def
test_dtype
():
data
=
paddle
.
static
.
data
(
shape
=
[
10
],
dtype
=
"float16"
,
name
=
"input"
)
paddle
.
unique
(
data
)
def
test_dtype
():
data
=
paddle
.
static
.
data
(
shape
=
[
10
],
dtype
=
"float16"
,
name
=
"input"
)
paddle
.
unique
(
data
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
paddle
.
disable_static
()
self
.
assertRaises
(
TypeError
,
test_dtype
)
@
unittest
.
skipIf
(
...
...
@@ -104,10 +105,10 @@ class TestOneGPU(TestUniqueOp):
def
test_check_output
(
self
):
if
core
.
is_compiled_with_cuda
():
paddle
.
enable_static
()
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
paddle
.
disable_static
()
self
.
check_output_with_place
(
place
,
atol
=
1e-5
,
check_dygraph
=
False
)
# unique return sorted data in dygraph
@
unittest
.
skipIf
(
...
...
@@ -131,10 +132,10 @@ class TestRandomGPU(TestUniqueOp):
def
test_check_output
(
self
):
if
core
.
is_compiled_with_cuda
():
paddle
.
enable_static
()
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
paddle
.
disable_static
()
self
.
check_output_with_place
(
place
,
atol
=
1e-5
,
check_dygraph
=
False
)
# unique return sorted data in dygraph
class
TestSortedUniqueOp
(
TestUniqueOp
):
...
...
@@ -243,6 +244,7 @@ class TestUniqueOpAxis1(TestUniqueOp):
class
TestUniqueAPI
(
unittest
.
TestCase
):
def
test_dygraph_api_out
(
self
):
paddle
.
disable_static
()
x_data
=
x_data
=
np
.
random
.
randint
(
0
,
10
,
(
120
))
x
=
paddle
.
to_tensor
(
x_data
)
out
=
paddle
.
unique
(
x
)
...
...
@@ -250,6 +252,7 @@ class TestUniqueAPI(unittest.TestCase):
self
.
assertTrue
((
out
.
numpy
()
==
expected_out
).
all
(),
True
)
def
test_dygraph_api_attr
(
self
):
paddle
.
disable_static
()
x_data
=
np
.
random
.
random
((
3
,
5
,
5
)).
astype
(
"float32"
)
x
=
paddle
.
to_tensor
(
x_data
)
out
,
index
,
inverse
,
counts
=
paddle
.
unique
(
...
...
@@ -272,6 +275,7 @@ class TestUniqueAPI(unittest.TestCase):
self
.
assertTrue
((
counts
.
numpy
()
==
np_counts
).
all
(),
True
)
def
test_dygraph_attr_dtype
(
self
):
paddle
.
disable_static
()
x_data
=
x_data
=
np
.
random
.
randint
(
0
,
10
,
(
120
))
x
=
paddle
.
to_tensor
(
x_data
)
out
,
indices
,
inverse
,
counts
=
paddle
.
unique
(
...
...
@@ -290,69 +294,62 @@ class TestUniqueAPI(unittest.TestCase):
self
.
assertTrue
((
counts
.
numpy
()
==
np_counts
).
all
(),
True
)
def
test_static_graph
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
x
=
paddle
.
static
.
data
(
name
=
'x'
,
shape
=
[
3
,
2
],
dtype
=
'float64'
)
unique
,
inverse
,
counts
=
paddle
.
unique
(
x
,
return_inverse
=
True
,
return_counts
=
True
,
axis
=
0
)
place
=
paddle
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
x_np
=
np
.
array
([[
1
,
2
],
[
3
,
4
],
[
1
,
2
]]).
astype
(
'float64'
)
result
=
exe
.
run
(
feed
=
{
"x"
:
x_np
},
fetch_list
=
[
unique
,
inverse
,
counts
]
)
np_unique
,
np_inverse
,
np_counts
=
np
.
unique
(
x_np
,
return_inverse
=
True
,
return_counts
=
True
,
axis
=
0
)
np
.
testing
.
assert_allclose
(
result
[
0
],
np_unique
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
result
[
1
],
np_inverse
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
result
[
2
],
np_counts
,
rtol
=
1e-05
)
paddle
.
disable_static
()
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
x
=
paddle
.
static
.
data
(
name
=
'x'
,
shape
=
[
3
,
2
],
dtype
=
'float64'
)
unique
,
inverse
,
counts
=
paddle
.
unique
(
x
,
return_inverse
=
True
,
return_counts
=
True
,
axis
=
0
)
place
=
paddle
.
CPUPlace
()
exe
=
paddle
.
static
.
Executor
(
place
)
x_np
=
np
.
array
([[
1
,
2
],
[
3
,
4
],
[
1
,
2
]]).
astype
(
'float64'
)
result
=
exe
.
run
(
feed
=
{
"x"
:
x_np
},
fetch_list
=
[
unique
,
inverse
,
counts
]
)
class
TestUniqueError
(
unittest
.
TestCase
):
def
test_input_dtype
(
self
):
def
test_x_dtype
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
x
=
paddle
.
static
.
data
(
name
=
'x'
,
shape
=
[
10
,
10
],
dtype
=
'float16'
)
result
=
paddle
.
unique
(
x
)
with
paddle_static_guard
():
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
x
=
paddle
.
static
.
data
(
name
=
'x'
,
shape
=
[
10
,
10
],
dtype
=
'float16'
)
result
=
paddle
.
unique
(
x
)
self
.
assertRaises
(
TypeError
,
test_x_dtype
)
self
.
assertRaises
(
TypeError
,
test_x_dtype
)
def
test_attr
(
self
):
paddle
.
enable_static
()
x
=
paddle
.
static
.
data
(
name
=
'x'
,
shape
=
[
10
,
10
],
dtype
=
'float64'
)
with
paddle_static_guard
():
x
=
paddle
.
static
.
data
(
name
=
'x'
,
shape
=
[
10
,
10
],
dtype
=
'float64'
)
def
test_return_index
():
result
=
paddle
.
unique
(
x
,
return_index
=
0
)
def
test_return_index
():
result
=
paddle
.
unique
(
x
,
return_index
=
0
)
self
.
assertRaises
(
TypeError
,
test_return_index
)
self
.
assertRaises
(
TypeError
,
test_return_index
)
def
test_return_inverse
():
result
=
paddle
.
unique
(
x
,
return_inverse
=
's'
)
def
test_return_inverse
():
result
=
paddle
.
unique
(
x
,
return_inverse
=
's'
)
self
.
assertRaises
(
TypeError
,
test_return_inverse
)
self
.
assertRaises
(
TypeError
,
test_return_inverse
)
def
test_return_counts
():
result
=
paddle
.
unique
(
x
,
return_counts
=
3
)
def
test_return_counts
():
result
=
paddle
.
unique
(
x
,
return_counts
=
3
)
self
.
assertRaises
(
TypeError
,
test_return_counts
)
self
.
assertRaises
(
TypeError
,
test_return_counts
)
def
test_axis
():
result
=
paddle
.
unique
(
x
,
axis
=
'12'
)
def
test_axis
():
result
=
paddle
.
unique
(
x
,
axis
=
'12'
)
def
test_dtype
():
result
=
paddle
.
unique
(
x
,
dtype
=
'float64'
)
def
test_dtype
():
result
=
paddle
.
unique
(
x
,
dtype
=
'float64'
)
self
.
assertRaises
(
TypeError
,
test_axis
)
paddle
.
disable_static
()
self
.
assertRaises
(
TypeError
,
test_axis
)
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_unique_with_counts.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_op_test
import
OpTest
,
paddle_static_guard
import
paddle
from
paddle.fluid
import
core
...
...
@@ -81,16 +81,15 @@ class TestRandom(TestUniqueWithCountsOp):
class
TestUniqueWithCountsRaiseError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
def
test_type
():
paddle
.
unique
([
10
])
with
paddle_static_guard
():
self
.
assertRaises
(
TypeError
,
test_type
)
def
test_dtype
():
data
=
paddle
.
static
.
data
(
shape
=
[
10
],
dtype
=
"float16"
,
name
=
"input"
)
paddle
.
unique
(
data
)
def
test_dtype
():
data
=
paddle
.
static
.
data
(
shape
=
[
10
],
dtype
=
"float16"
,
name
=
"input"
)
paddle
.
unique
(
data
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
@
unittest
.
skipIf
(
...
...
python/paddle/fluid/tests/unittests/test_warpctc_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -16,15 +16,13 @@ import sys
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
eager_
op_test
import
OpTest
from
test_softmax_op
import
stable_softmax
import
paddle
import
paddle.nn.functional
as
F
from
paddle.fluid
import
Program
,
core
,
program_guard
paddle
.
enable_static
()
CUDA_BLOCK_SIZE
=
32
...
...
@@ -206,6 +204,19 @@ class CTCForward:
return
self
.
loss
def
warpctc_wrapper
(
Logits
,
Label
,
LogitsLength
=
None
,
LabelLength
=
None
,
blank
=
0
,
norm_by_times
=
False
,
):
return
paddle
.
_C_ops
.
warpctc
(
Logits
,
Label
,
LogitsLength
,
LabelLength
,
blank
,
norm_by_times
)
class
TestWarpCTCOp
(
OpTest
):
def
config
(
self
):
self
.
batch_size
=
4
...
...
@@ -217,6 +228,8 @@ class TestWarpCTCOp(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"warpctc"
self
.
python_api
=
warpctc_wrapper
self
.
python_out_sig
=
[
"Loss"
]
self
.
config
()
logits
=
np
.
random
.
uniform
(
...
...
@@ -304,6 +317,7 @@ class TestWarpCTCOpWithPadding(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"warpctc"
self
.
python_api
=
warpctc_wrapper
self
.
python_out_sig
=
[
"Loss"
]
self
.
config
()
...
...
@@ -380,7 +394,7 @@ class TestWarpCTCOpWithPadding(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
False
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
outputs
[
'WarpCTCGrad'
]
=
self
.
gradient
...
...
@@ -425,6 +439,7 @@ class TestWarpCTCOpFp64(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"warpctc"
self
.
python_api
=
warpctc_wrapper
self
.
python_out_sig
=
[
"Loss"
]
self
.
config
()
...
...
@@ -501,11 +516,11 @@ class TestWarpCTCOpFp64(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
False
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
outputs
[
'WarpCTCGrad'
]
=
self
.
gradient
self
.
check_grad
([
"Logits"
],
"Loss"
,
check_eager
=
False
)
self
.
check_grad
([
"Logits"
],
"Loss"
)
class
TestWarpCTCOpError
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/test_where_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -15,7 +15,7 @@
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
convert_float_to_uint16
from
eager_
op_test
import
OpTest
,
convert_float_to_uint16
import
paddle
from
paddle
import
fluid
...
...
@@ -32,10 +32,10 @@ class TestWhereOp(OpTest):
self
.
outputs
=
{
'Out'
:
np
.
where
(
self
.
cond
,
self
.
x
,
self
.
y
)}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
False
)
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_eager
=
False
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
def
init_config
(
self
):
self
.
x
=
np
.
random
.
uniform
((
-
3
),
5
,
100
).
astype
(
'float64'
)
...
...
@@ -80,12 +80,12 @@ class TestWhereBF16OP(OpTest):
def
test_check_output
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_eager
=
False
)
self
.
check_output_with_place
(
place
)
def
test_check_grad
(
self
):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
,
'Y'
],
'Out'
,
check_eager
=
False
,
numeric_grad_delta
=
0.05
place
,
[
'X'
,
'Y'
],
'Out'
,
numeric_grad_delta
=
0.05
)
def
init_config
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_yolo_box_op.py
浏览文件 @
6d0fa6f2
...
...
@@ -93,11 +93,37 @@ def YoloBox(x, img_size, attrs):
return
(
pred_box
,
pred_score
.
reshape
((
n
,
(
-
1
),
class_num
)))
def
yolo_box_wrapper
(
X
,
ImgSize
,
anchors
=
[],
class_num
=
0
,
conf_thresh
=
0.01
,
downsample_ratio
=
32
,
clip_bbox
=
True
,
scale_x_y
=
1.0
,
iou_aware
=
False
,
iou_aware_factor
=
0.5
,
):
return
paddle
.
_C_ops
.
yolo_box
(
X
,
ImgSize
,
anchors
,
class_num
,
conf_thresh
,
downsample_ratio
,
clip_bbox
,
scale_x_y
,
iou_aware
,
iou_aware_factor
,
)
class
TestYoloBoxOp
(
OpTest
):
def
setUp
(
self
):
self
.
initTestCase
()
self
.
op_type
=
'yolo_box'
self
.
python_api
=
paddle
.
vision
.
ops
.
yolo_box
self
.
python_api
=
yolo_box_wrapper
x
=
np
.
random
.
random
(
self
.
x_shape
).
astype
(
'float32'
)
img_size
=
np
.
random
.
randint
(
10
,
20
,
self
.
imgsize_shape
).
astype
(
'int32'
)
self
.
attrs
=
{
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
6d0fa6f2
...
...
@@ -3867,7 +3867,10 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
def
check_list_elements_dtype
(
list_input
,
input_name
):
if
isinstance
(
list_input
,
Variable
):
check_dtype
(
list_input
.
dtype
,
input_name
,
[
'int32'
],
'strided_slice'
list_input
.
dtype
,
input_name
,
[
'int32'
,
'int64'
],
'strided_slice'
,
)
else
:
for
i
,
var
in
enumerate
(
list_input
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录