Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
733d3109
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
733d3109
编写于
3月 16, 2022
作者:
P
phlrain
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update; test=develop
上级
64bc9079
变更
53
隐藏空白更改
内联
并排
Showing
53 changed file
with
502 addition
and
249 deletion
+502
-249
paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py
...er/auto_code_generator/final_state_generator/eager_gen.py
+14
-5
paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py
...auto_code_generator/final_state_generator/python_c_gen.py
+1
-1
paddle/fluid/pybind/eager_utils.cc
paddle/fluid/pybind/eager_utils.cc
+1
-1
paddle/phi/kernels/cpu/accuracy_kernel.cc
paddle/phi/kernels/cpu/accuracy_kernel.cc
+4
-1
paddle/phi/kernels/cpu/put_along_axis_kernel.cc
paddle/phi/kernels/cpu/put_along_axis_kernel.cc
+3
-1
paddle/phi/kernels/cpu/take_along_axis_kernel.cc
paddle/phi/kernels/cpu/take_along_axis_kernel.cc
+3
-1
paddle/phi/kernels/gpu/accuracy_kernel.cu
paddle/phi/kernels/gpu/accuracy_kernel.cu
+4
-1
paddle/phi/kernels/gpu/put_along_axis_kernel.cu
paddle/phi/kernels/gpu/put_along_axis_kernel.cu
+3
-1
paddle/phi/kernels/gpu/take_along_axis_kernel.cu
paddle/phi/kernels/gpu/take_along_axis_kernel.cu
+3
-1
paddle/phi/kernels/norm_grad_kernel.h
paddle/phi/kernels/norm_grad_kernel.h
+1
-1
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+3
-1
python/paddle/fluid/layers/layer_function_generator.py
python/paddle/fluid/layers/layer_function_generator.py
+3
-0
python/paddle/fluid/layers/loss.py
python/paddle/fluid/layers/loss.py
+4
-0
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+15
-3
python/paddle/fluid/layers/ops.py
python/paddle/fluid/layers/ops.py
+6
-1
python/paddle/fluid/tests/unittests/op_test.py
python/paddle/fluid/tests/unittests/op_test.py
+8
-9
python/paddle/fluid/tests/unittests/test_accuracy_op.py
python/paddle/fluid/tests/unittests/test_accuracy_op.py
+14
-2
python/paddle/fluid/tests/unittests/test_activation_op.py
python/paddle/fluid/tests/unittests/test_activation_op.py
+30
-13
python/paddle/fluid/tests/unittests/test_bernoulli_op.py
python/paddle/fluid/tests/unittests/test_bernoulli_op.py
+22
-0
python/paddle/fluid/tests/unittests/test_bincount_op.py
python/paddle/fluid/tests/unittests/test_bincount_op.py
+9
-0
python/paddle/fluid/tests/unittests/test_bitwise_op.py
python/paddle/fluid/tests/unittests/test_bitwise_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py
...on/paddle/fluid/tests/unittests/test_cholesky_solve_op.py
+129
-126
python/paddle/fluid/tests/unittests/test_cumsum_op.py
python/paddle/fluid/tests/unittests/test_cumsum_op.py
+8
-2
python/paddle/fluid/tests/unittests/test_eigh_op.py
python/paddle/fluid/tests/unittests/test_eigh_op.py
+4
-2
python/paddle/fluid/tests/unittests/test_erf_op.py
python/paddle/fluid/tests/unittests/test_erf_op.py
+4
-2
python/paddle/fluid/tests/unittests/test_erfinv_op.py
python/paddle/fluid/tests/unittests/test_erfinv_op.py
+5
-3
python/paddle/fluid/tests/unittests/test_increment.py
python/paddle/fluid/tests/unittests/test_increment.py
+10
-0
python/paddle/fluid/tests/unittests/test_is_empty_op.py
python/paddle/fluid/tests/unittests/test_is_empty_op.py
+3
-1
python/paddle/fluid/tests/unittests/test_log_loss_op.py
python/paddle/fluid/tests/unittests/test_log_loss_op.py
+6
-2
python/paddle/fluid/tests/unittests/test_matrix_power_op.py
python/paddle/fluid/tests/unittests/test_matrix_power_op.py
+7
-2
python/paddle/fluid/tests/unittests/test_multinomial_op.py
python/paddle/fluid/tests/unittests/test_multinomial_op.py
+35
-0
python/paddle/fluid/tests/unittests/test_segment_ops.py
python/paddle/fluid/tests/unittests/test_segment_ops.py
+10
-3
python/paddle/fluid/tests/unittests/test_selu_op.py
python/paddle/fluid/tests/unittests/test_selu_op.py
+11
-0
python/paddle/fluid/tests/unittests/test_shard_index_op.py
python/paddle/fluid/tests/unittests/test_shard_index_op.py
+7
-4
python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py
...ts/unittests/test_sigmoid_cross_entropy_with_logits_op.py
+23
-14
python/paddle/fluid/tests/unittests/test_size_op.py
python/paddle/fluid/tests/unittests/test_size_op.py
+2
-1
python/paddle/fluid/tests/unittests/test_softmax_op.py
python/paddle/fluid/tests/unittests/test_softmax_op.py
+2
-2
python/paddle/fluid/tests/unittests/test_take_along_axis_op.py
...n/paddle/fluid/tests/unittests/test_take_along_axis_op.py
+3
-2
python/paddle/fluid/tests/unittests/test_tile_op.py
python/paddle/fluid/tests/unittests/test_tile_op.py
+11
-5
python/paddle/fluid/tests/unittests/test_trace_op.py
python/paddle/fluid/tests/unittests/test_trace_op.py
+3
-2
python/paddle/fluid/tests/unittests/test_unfold_op.py
python/paddle/fluid/tests/unittests/test_unfold_op.py
+1
-0
python/paddle/fluid/tests/unittests/test_where_index.py
python/paddle/fluid/tests/unittests/test_where_index.py
+4
-1
python/paddle/incubate/tensor/math.py
python/paddle/incubate/tensor/math.py
+10
-0
python/paddle/metric/metrics.py
python/paddle/metric/metrics.py
+2
-2
python/paddle/nn/functional/activation.py
python/paddle/nn/functional/activation.py
+3
-1
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+6
-0
python/paddle/tensor/logic.py
python/paddle/tensor/logic.py
+2
-3
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+11
-0
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+12
-0
python/paddle/tensor/random.py
python/paddle/tensor/random.py
+5
-0
python/paddle/tensor/search.py
python/paddle/tensor/search.py
+4
-0
python/paddle/utils/code_gen/api.yaml
python/paddle/utils/code_gen/api.yaml
+6
-24
python/paddle/utils/code_gen/backward.yaml
python/paddle/utils/code_gen/backward.yaml
+1
-1
未找到文件。
paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py
浏览文件 @
733d3109
...
...
@@ -687,6 +687,7 @@ def GenerateNodeCreationCodes(
pass_stop_gradient_args_list
=
[
"false"
]
num_fwd_outputs
=
len
(
forward_outputs_position_map
.
keys
())
for
name
,
(
rtype
,
pos
)
in
forward_outputs_position_map
.
items
():
print
(
"@@@@"
,
fwd_api_name
,
name
,
rtype
,
pos
)
output_autograd_meta_name
=
GetAutoGradMetaName
(
name
)
output_autograd_meta_vec_name
=
GetAutoGradMetaVectorName
(
name
)
if
num_fwd_outputs
==
1
:
...
...
@@ -727,19 +728,27 @@ def GenerateNodeCreationCodes(
# SetTensorWrappers
set_tensor_wrappers_list
=
[]
fwd_api_input_num
=
0
for
name
,
(
atype
,
is_fwd_input
,
pos
)
in
backward_fwd_input_map
.
items
():
is_optional
=
(
name
in
optional_inputs
)
if
is_fwd_input
:
fwd_api_input_num
+=
1
if
is_optional
:
set_tensor_wrappers
=
f
" if(
{
name
}
.is_initialized()) grad_node->SetTensorWrapper
{
name
}
(
{
name
}
, true);"
else
:
set_tensor_wrappers
=
f
" grad_node->SetTensorWrapper
{
name
}
(
{
name
}
, true);"
else
:
if
IsVectorTensorType
(
atype
):
tw_name
=
f
"api_result[
{
pos
}
]"
print
(
"!!!!"
,
fwd_api_name
,
name
,
atype
,
pos
)
if
num_fwd_outputs
==
1
:
if
IsVectorTensorType
(
atype
):
tw_name
=
f
"std::get<
{
pos
}
>(api_result)"
else
:
tw_name
=
f
"api_result"
else
:
tw_name
=
f
"api_result"
assert
IsPlainTensorType
(
atype
),
atype
out_pos
=
pos
-
fwd_api_input_num
tw_name
=
f
"std::get<
{
out_pos
}
>(api_result)"
if
is_optional
:
set_tensor_wrappers
=
f
" if(
{
tw_name
}
.is_initialized()) grad_node->SetTensorWrapper
{
name
}
(
{
tw_name
}
, false);"
...
...
@@ -779,7 +788,7 @@ def GenerateNodeCreationCodes(
if
num_outputs
==
1
:
set_retain_grad
=
f
" egr::EagerUtils::CheckAndRetainGrad(api_result);"
else
:
set_retain_grad
=
f
" egr::EagerUtils::CheckAndRetainGrad(
api_result[
{
pos
}
]
);"
set_retain_grad
=
f
" egr::EagerUtils::CheckAndRetainGrad(
std::get<
{
pos
}
>(api_result)
);"
set_retain_grad_list
.
append
(
set_retain_grad
)
set_out_rank_str
=
"
\n
"
.
join
(
set_out_rank_list
)
set_history_str
=
"
\n
"
.
join
(
set_history_list
)
...
...
@@ -902,7 +911,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name,
returns_list
[
0
]
=
f
"api_result"
else
:
# Tuple api_result
returns_list
[
pos
]
=
f
"
api_result[
{
pos
}
]
"
returns_list
[
pos
]
=
f
"
std::get<
{
pos
}
>(api_result)
"
if
IsPlainTensorType
(
rtype
):
returns_type_list
[
pos
]
=
"paddle::experimental::Tensor"
...
...
paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py
浏览文件 @
733d3109
...
...
@@ -22,7 +22,7 @@ atype_to_parsing_function = {
"bool"
:
"CastPyArg2Boolean"
,
"int"
:
"CastPyArg2Int"
,
"long"
:
"CastPyArg2Long"
,
"std::string"
:
"CastPyArg
s
2String"
,
"std::string"
:
"CastPyArg2String"
,
"int64_t"
:
"CastPyArg2Long"
,
"float"
:
"CastPyArg2Float"
,
"string"
:
"CastPyArg2String"
,
...
...
paddle/fluid/pybind/eager_utils.cc
浏览文件 @
733d3109
...
...
@@ -825,7 +825,7 @@ paddle::experimental::ScalarArray CastPyArg2ScalarArray(
// obj could be: int, float, bool, paddle.Tensor
PyTypeObject
*
type
=
obj
->
ob_type
;
auto
type_name
=
std
::
string
(
type
->
tp_name
);
if
(
type_name
==
"list"
)
{
if
(
type_name
==
"list"
||
type_name
==
"tuple"
)
{
std
::
vector
<
int
>
value
=
CastPyArg2Ints
(
obj
,
op_type
,
arg_pos
);
return
paddle
::
experimental
::
ScalarArray
(
value
);
...
...
paddle/phi/kernels/cpu/accuracy_kernel.cc
浏览文件 @
733d3109
...
...
@@ -69,4 +69,7 @@ void AccuracyRawKernel(const Context& dev_ctx,
// TODO(add supported dtype.)
PD_REGISTER_KERNEL
(
accuracy
,
CPU
,
ALL_LAYOUT
,
phi
::
AccuracyRawKernel
,
float
,
double
)
{}
accuracy
,
CPU
,
ALL_LAYOUT
,
phi
::
AccuracyRawKernel
,
float
,
double
)
{
kernel
->
InputAt
(
1
).
SetDataType
(
phi
::
DataType
::
INT64
);
kernel
->
InputAt
(
2
).
SetDataType
(
phi
::
DataType
::
INT64
);
}
paddle/phi/kernels/cpu/put_along_axis_kernel.cc
浏览文件 @
733d3109
...
...
@@ -84,4 +84,6 @@ PD_REGISTER_KERNEL(put_along_axis,
double
,
int
,
uint8_t
,
int64_t
)
{}
int64_t
)
{
kernel
->
InputAt
(
1
).
SetDataType
(
phi
::
DataType
::
ALL_DTYPE
);
}
paddle/phi/kernels/cpu/take_along_axis_kernel.cc
浏览文件 @
733d3109
...
...
@@ -57,4 +57,6 @@ PD_REGISTER_KERNEL(take_along_axis,
double
,
int
,
uint8_t
,
int64_t
)
{}
int64_t
)
{
kernel
->
InputAt
(
1
).
SetDataType
(
phi
::
DataType
::
ALL_DTYPE
);
}
paddle/phi/kernels/gpu/accuracy_kernel.cu
浏览文件 @
733d3109
...
...
@@ -114,4 +114,7 @@ PD_REGISTER_KERNEL(accuracy,
phi
::
AccuracyRawKernel
,
phi
::
dtype
::
float16
,
float
,
double
)
{}
double
)
{
kernel
->
InputAt
(
1
).
SetDataType
(
phi
::
DataType
::
INT64
);
kernel
->
InputAt
(
2
).
SetDataType
(
phi
::
DataType
::
INT64
);
}
paddle/phi/kernels/gpu/put_along_axis_kernel.cu
浏览文件 @
733d3109
...
...
@@ -83,4 +83,6 @@ PD_REGISTER_KERNEL(put_along_axis,
double
,
int64_t
,
int
,
phi
::
dtype
::
float16
)
{}
phi
::
dtype
::
float16
)
{
kernel
->
InputAt
(
1
).
SetDataType
(
phi
::
DataType
::
ALL_DTYPE
);
}
paddle/phi/kernels/gpu/take_along_axis_kernel.cu
浏览文件 @
733d3109
...
...
@@ -57,4 +57,6 @@ PD_REGISTER_KERNEL(take_along_axis,
double
,
int64_t
,
int
,
phi
::
dtype
::
float16
)
{}
phi
::
dtype
::
float16
)
{
kernel
->
InputAt
(
1
).
SetDataType
(
phi
::
DataType
::
ALL_DTYPE
);
}
paddle/phi/kernels/norm_grad_kernel.h
浏览文件 @
733d3109
...
...
@@ -21,7 +21,7 @@ namespace phi {
template
<
typename
T
,
typename
Context
>
void
NormGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
out
,
const
DenseTensor
&
norm
,
const
DenseTensor
&
out_grad
,
int
axis
,
float
epsilon
,
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
733d3109
...
...
@@ -18,7 +18,7 @@ from ..wrapped_decorator import signature_safe_contextmanager
from
.layer_function_generator
import
autodoc
,
templatedoc
from
.tensor
import
assign
,
cast
,
fill_constant
from
..
import
core
from
..framework
import
Program
,
Variable
,
Operator
,
in_dygraph_mode
,
static_only
from
..framework
import
Program
,
Variable
,
Operator
,
in_dygraph_mode
,
static_only
,
_in_eager_mode
from
..layer_helper
import
LayerHelper
,
unique_name
from
.nn
import
logical_and
,
logical_not
,
logical_or
from
.utils
import
assert_same_structure
,
map_structure
,
hold_mutable_vars
,
copy_mutable_vars
...
...
@@ -3852,6 +3852,8 @@ def is_empty(x, name=None):
"""
if
in_dygraph_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_is_empty
(
x
)
return
_C_ops
.
is_empty
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
...
python/paddle/fluid/layers/layer_function_generator.py
浏览文件 @
733d3109
...
...
@@ -258,6 +258,9 @@ def generate_activation_fn(op_type):
def
func
(
x
,
name
=
None
):
if
in_dygraph_mode
():
if
_in_eager_mode
():
op
=
getattr
(
_C_ops
,
"final_state_"
+
op_type
)
return
op
(
x
)
op
=
getattr
(
_C_ops
,
op_type
)
return
op
(
x
)
...
...
python/paddle/fluid/layers/loss.py
浏览文件 @
733d3109
...
...
@@ -1458,6 +1458,10 @@ def sigmoid_cross_entropy_with_logits(x,
ignore_index=-1, normalize=True)
print(loss)
"""
if
in_dygraph_mode
()
and
_in_eager_mode
():
return
_C_ops
.
final_state_sigmoid_cross_entropy_with_logits
(
x
,
label
,
normalize
,
ignore_index
)
check_variable_and_dtype
(
x
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
],
'sigmoid_cross_entropy_with_logits'
)
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
733d3109
...
...
@@ -11571,6 +11571,8 @@ def size(input):
"""
if in_dygraph_mode():
if _in_eager_mode():
return _C_ops.final_state_size(input)
return _C_ops.size(input)
check_variable_and_dtype(
input, 'input',
...
...
@@ -12543,7 +12545,8 @@ def logical_not(x, out=None, name=None):
res = paddle.logical_not(x)
print(res) # [False True False True]
"""
if paddle.in_dygraph_mode() and _in_eager_mode():
return _C_ops.final_state_logical_not(x)
return _logical_op(
op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False)
...
...
@@ -13319,6 +13322,9 @@ def log_loss(input, label, epsilon=1e-4, name=None):
prob = paddle.randn((10,1))
cost = F.log_loss(input=prob, label=label)
"""
if in_dygraph_mode() and _in_eager_mode():
return _C_ops.final_state_log_loss(input, label, epsilon)
helper = LayerHelper('log_loss', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'log_loss')
check_variable_and_dtype(label, 'label', ['float32'], 'log_loss')
...
...
@@ -14335,6 +14341,8 @@ def where(condition):
"""
if in_dygraph_mode():
if _in_eager_mode():
return _C_ops.final_state_where_index(condition)
return _C_ops.where_index(condition)
helper = LayerHelper("where_index", **locals())
...
...
@@ -14829,8 +14837,8 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
if in_dygraph_mode():
if _in_eager_mode():
return _C_op
.final_state_unfold(x, kernel_sizes, strdides, padding
s,
dilations)
return _C_op
s.final_state_unfold(x, kernel_sizes, strdide
s,
paddings,
dilations)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
...
...
@@ -15059,6 +15067,10 @@ def shard_index(input, index_num, nshards, shard_id, ignore_value=-1):
print(shard_label)
# [[-1], [1]]
"""
if in_dygraph_mode() and _in_eager_mode():
return _C_ops.final_state_shard_index(input, index_num, nshards,
shard_id, ignore_value)
check_variable_and_dtype(input, 'input', ['int64', 'int32'], 'shard_index')
op_type = 'shard_index'
helper = LayerHelper(op_type, **locals())
...
...
python/paddle/fluid/layers/ops.py
浏览文件 @
733d3109
...
...
@@ -16,9 +16,10 @@ from __future__ import print_function
import
os
from
.layer_function_generator
import
generate_layer_fn
,
generate_activation_fn
,
generate_inplace_fn
,
add_sample_code
from
..
import
core
from
..framework
import
convert_np_dtype_to_dtype_
,
Variable
from
..framework
import
convert_np_dtype_to_dtype_
,
Variable
,
in_dygraph_mode
,
_in_eager_mode
from
..data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
paddle.utils
import
deprecated
from
paddle
import
_C_ops
__deprecated_func_name__
=
{
'tanh_shrink'
:
'tanhshrink'
,
...
...
@@ -794,6 +795,10 @@ _erf_ = generate_layer_fn('erf')
def
erf
(
x
,
name
=
None
):
if
in_dygraph_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_erf
(
x
)
locals_var
=
locals
().
copy
()
kwargs
=
dict
()
for
name
,
val
in
locals_var
.
items
():
...
...
python/paddle/fluid/tests/unittests/op_test.py
浏览文件 @
733d3109
...
...
@@ -545,7 +545,6 @@ class OpTest(unittest.TestCase):
v
.
value
().
get_tensor
().
set_recursive_sequence_lengths
(
lod
)
return
v
else
:
print
(
"init her"
)
return
paddle
.
to_tensor
(
value
)
def
get_sequence_batch_size_1_input
(
self
,
lod
=
None
,
shape
=
None
):
...
...
@@ -1502,14 +1501,14 @@ class OpTest(unittest.TestCase):
.
recursive_sequence_lengths
(),
expect
[
1
],
"Output ("
+
out_name
+
") has different lod at "
+
str
(
place
)
+
" in eager dygraph mode"
)
if
check_eager
:
with
_test_eager_guard
():
self
.
assertListEqual
(
eager_imperative_actual
.
value
().
get_tensor
()
.
recursive_sequence_lengths
(),
expect
[
1
],
"Output ("
+
out_name
+
") has different lod at "
+
str
(
place
)
+
" in eager dygraph mode"
)
with
fluid
.
dygraph
.
base
.
guard
()
:
with
_test_eager_guard
():
self
.
assertListEqual
(
eager_imperative_actual
.
value
().
get_tensor
()
.
recursive_sequence_lengths
(),
expect
[
1
],
"Output ("
+
out_name
+
") has different lod at "
+
str
(
place
)
+
" in eager dygraph mode"
)
# Note(zhiqiu): inplace_atol should be only set when op doesn't ensure
# computational consistency.
...
...
python/paddle/fluid/tests/unittests/test_accuracy_op.py
浏览文件 @
733d3109
...
...
@@ -20,6 +20,7 @@ from op_test import OpTest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
compiler
,
Program
,
program_guard
from
paddle.fluid.framework
import
_test_eager_guard
class
TestAccuracyOp
(
OpTest
):
...
...
@@ -49,7 +50,7 @@ class TestAccuracyOp(OpTest):
pass
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
Fals
e
)
self
.
check_output
(
check_eager
=
Tru
e
)
class
TestAccuracyOpFp16
(
TestAccuracyOp
):
...
...
@@ -57,7 +58,7 @@ class TestAccuracyOpFp16(TestAccuracyOp):
self
.
dtype
=
np
.
float16
def
test_check_output
(
self
):
self
.
check_output
(
atol
=
1e-3
)
self
.
check_output
(
atol
=
1e-3
,
check_eager
=
True
)
class
TestAccuracyOpError
(
unittest
.
TestCase
):
...
...
@@ -127,6 +128,17 @@ class TestAccuracyAPI(unittest.TestCase):
self
.
assertEqual
((
result
.
numpy
()
==
expect_value
).
all
(),
True
)
with
_test_eager_guard
():
predictions
=
paddle
.
to_tensor
(
[[
0.2
,
0.1
,
0.4
,
0.1
,
0.1
],
[
0.2
,
0.3
,
0.1
,
0.15
,
0.25
]],
dtype
=
'float32'
)
label
=
paddle
.
to_tensor
([[
2
],
[
0
]],
dtype
=
"int64"
)
result
=
paddle
.
metric
.
accuracy
(
input
=
predictions
,
label
=
label
,
k
=
1
)
expect_value
=
np
.
array
([
0.5
],
dtype
=
'float32'
)
self
.
assertEqual
((
result
.
numpy
()
==
expect_value
).
all
(),
True
)
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/test_activation_op.py
浏览文件 @
733d3109
...
...
@@ -59,12 +59,17 @@ class TestActivation(OpTest):
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
(
hasattr
(
self
,
"python_api"
)
and
self
.
python_api
!=
None
))
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
(
[
'X'
],
'Out'
,
check_eager
=
(
hasattr
(
self
,
"python_api"
)
and
self
.
python_api
!=
None
))
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float64
...
...
@@ -356,6 +361,7 @@ class TestTanh(TestActivation, TestParameter):
def
setUp
(
self
):
self
.
op_type
=
"tanh"
self
.
init_dtype
()
self
.
python_api
=
paddle
.
tanh
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
out
=
np
.
tanh
(
x
)
...
...
@@ -366,7 +372,7 @@ class TestTanh(TestActivation, TestParameter):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
def
init_dtype
(
self
):
#TODO If dtype is float64, the output (Out) has diff at CPUPlace
...
...
@@ -449,6 +455,7 @@ class TestAtan(TestActivation, TestParameter):
self
.
op_type
=
"atan"
self
.
init_dtype
()
self
.
python_api
=
paddle
.
atan
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
out
=
np
.
arctan
(
x
)
...
...
@@ -459,7 +466,7 @@ class TestAtan(TestActivation, TestParameter):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
def
test_out_name
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
...
...
@@ -485,6 +492,7 @@ class TestSinh(TestActivation):
def
setUp
(
self
):
self
.
op_type
=
"sinh"
self
.
init_dtype
()
self
.
python_api
=
paddle
.
sinh
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
...
...
@@ -496,7 +504,7 @@ class TestSinh(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
def
test_dygraph
(
self
):
with
fluid
.
dygraph
.
guard
():
...
...
@@ -557,6 +565,7 @@ class TestCosh(TestActivation):
def
setUp
(
self
):
self
.
op_type
=
"cosh"
self
.
init_dtype
()
self
.
python_api
=
paddle
.
cosh
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
...
...
@@ -568,7 +577,7 @@ class TestCosh(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
def
test_dygraph
(
self
):
with
fluid
.
dygraph
.
guard
():
...
...
@@ -1082,6 +1091,7 @@ class TestCos(TestActivation):
def
setUp
(
self
):
self
.
op_type
=
"cos"
self
.
init_dtype
()
self
.
python_api
=
paddle
.
cos
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
12
]).
astype
(
self
.
dtype
)
...
...
@@ -1093,7 +1103,7 @@ class TestCos(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
class
TestTan
(
TestActivation
):
...
...
@@ -1151,6 +1161,7 @@ class TestAcos(TestActivation):
def
setUp
(
self
):
self
.
op_type
=
"acos"
self
.
init_dtype
()
self
.
python_api
=
paddle
.
acos
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
-
0.95
,
0.95
,
[
10
,
12
]).
astype
(
self
.
dtype
)
...
...
@@ -1162,13 +1173,14 @@ class TestAcos(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
class
TestSin
(
TestActivation
,
TestParameter
):
def
setUp
(
self
):
self
.
op_type
=
"sin"
self
.
init_dtype
()
self
.
python_api
=
paddle
.
sin
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
12
]).
astype
(
self
.
dtype
)
...
...
@@ -1180,13 +1192,14 @@ class TestSin(TestActivation, TestParameter):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
class
TestAsin
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"asin"
self
.
init_dtype
()
self
.
python_api
=
paddle
.
asin
np
.
random
.
seed
(
2048
)
x
=
np
.
random
.
uniform
(
-
0.95
,
0.95
,
[
10
,
12
]).
astype
(
self
.
dtype
)
...
...
@@ -1198,13 +1211,14 @@ class TestAsin(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
class
TestAcosh
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"acosh"
self
.
init_dtype
()
self
.
python_api
=
paddle
.
acosh
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
2
,
3
,
[
10
,
12
]).
astype
(
self
.
dtype
)
...
...
@@ -1216,13 +1230,14 @@ class TestAcosh(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
class
TestAsinh
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"asinh"
self
.
init_dtype
()
self
.
python_api
=
paddle
.
asinh
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
1
,
2
,
[
10
,
12
]).
astype
(
self
.
dtype
)
...
...
@@ -1234,13 +1249,14 @@ class TestAsinh(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
class
TestAtanh
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"atanh"
self
.
init_dtype
()
self
.
python_api
=
paddle
.
atanh
np
.
random
.
seed
(
400
)
x
=
np
.
random
.
uniform
(
-
0.9
,
0.9
,
[
10
,
12
]).
astype
(
self
.
dtype
)
...
...
@@ -1252,7 +1268,7 @@ class TestAtanh(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
class
TestRound
(
TestActivation
):
...
...
@@ -3195,4 +3211,5 @@ def create_test_act_bf16_class(parent,
create_test_act_bf16_class
(
TestRelu
)
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_bernoulli_op.py
浏览文件 @
733d3109
...
...
@@ -19,6 +19,7 @@ import paddle
from
op_test
import
OpTest
import
numpy
as
np
import
os
from
paddle.fluid.framework
import
_test_eager_guard
def
output_hist
(
out
):
...
...
@@ -32,6 +33,7 @@ def output_hist(out):
class
TestBernoulliOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"bernoulli"
self
.
python_api
=
paddle
.
bernoulli
self
.
inputs
=
{
"X"
:
np
.
random
.
uniform
(
size
=
(
1000
,
784
))}
self
.
attrs
=
{}
self
.
outputs
=
{
"Out"
:
np
.
zeros
((
1000
,
784
)).
astype
(
"float32"
)}
...
...
@@ -104,8 +106,28 @@ class TestRandomValue(unittest.TestCase):
expect
=
[
0.
,
0.
,
1.
,
1.
,
1.
,
1.
,
0.
,
1.
,
1.
,
1.
]
self
.
assertTrue
(
np
.
array_equal
(
y
[
16
,
500
,
500
:
510
],
expect
))
with
_test_eager_guard
():
x
=
paddle
.
to_tensor
(
x_np
,
dtype
=
'float64'
)
y
=
paddle
.
bernoulli
(
x
).
numpy
()
index0
,
index1
,
index2
=
np
.
nonzero
(
y
)
self
.
assertEqual
(
np
.
sum
(
index0
),
260028995
)
self
.
assertEqual
(
np
.
sum
(
index1
),
8582429431
)
self
.
assertEqual
(
np
.
sum
(
index2
),
8581445798
)
expect
=
[
0.
,
0.
,
0.
,
0.
,
0.
,
0.
,
0.
,
1.
,
1.
,
1.
]
self
.
assertTrue
(
np
.
array_equal
(
y
[
16
,
500
,
500
:
510
],
expect
))
x
=
paddle
.
to_tensor
(
x_np
,
dtype
=
'float32'
)
y
=
paddle
.
bernoulli
(
x
).
numpy
()
index0
,
index1
,
index2
=
np
.
nonzero
(
y
)
self
.
assertEqual
(
np
.
sum
(
index0
),
260092343
)
self
.
assertEqual
(
np
.
sum
(
index1
),
8583509076
)
self
.
assertEqual
(
np
.
sum
(
index2
),
8582778540
)
expect
=
[
0.
,
0.
,
1.
,
1.
,
1.
,
1.
,
0.
,
1.
,
1.
,
1.
]
self
.
assertTrue
(
np
.
array_equal
(
y
[
16
,
500
,
500
:
510
],
expect
))
paddle
.
enable_static
()
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_bincount_op.py
浏览文件 @
733d3109
...
...
@@ -20,6 +20,7 @@ import paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
from
paddle.fluid
import
Program
,
program_guard
from
paddle.fluid.framework
import
_test_eager_guard
from
op_test
import
OpTest
paddle
.
enable_static
()
...
...
@@ -62,6 +63,14 @@ class TestBincountOpAPI(unittest.TestCase):
(
actual
.
numpy
()
==
expected
).
all
(),
msg
=
'bincount output is wrong, out ='
+
str
(
actual
.
numpy
()))
with
_test_eager_guard
():
inputs
=
fluid
.
dygraph
.
to_variable
(
inputs_np
)
actual
=
paddle
.
bincount
(
inputs
)
expected
=
np
.
bincount
(
inputs
)
self
.
assertTrue
(
(
actual
.
numpy
()
==
expected
).
all
(),
msg
=
'bincount output is wrong, out ='
+
str
(
actual
.
numpy
()))
class
TestBincountOpError
(
unittest
.
TestCase
):
"""Test bincount op error."""
...
...
python/paddle/fluid/tests/unittests/test_bitwise_op.py
浏览文件 @
733d3109
...
...
@@ -289,7 +289,7 @@ class TestBitwiseNot(OpTest):
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
Fals
e
)
self
.
check_output
(
check_eager
=
Tru
e
)
def
test_check_grad
(
self
):
pass
...
...
python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py
浏览文件 @
733d3109
...
...
@@ -128,132 +128,135 @@ class TestCholeskySolveOp(OpTest):
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
# def test_check_grad_normal(self):
# self.check_grad(['Y'], 'Out', max_relative_error=0.01, check_eager=True)
# # 3D(broadcast) + 3D, upper=True
# class TestCholeskySolveOp3(TestCholeskySolveOp):
# """
# case 3
# """
# def config(self):
# self.y_shape = [1, 10, 10]
# self.x_shape = [2, 10, 5]
# self.upper = True
# self.dtype = np.float64
# class TestCholeskySolveAPI(unittest.TestCase):
# def setUp(self):
# np.random.seed(2021)
# self.place = [paddle.CPUPlace()]
# # self.place = [paddle.CUDAPlace(0)]
# self.dtype = "float64"
# self.upper = True
# if core.is_compiled_with_cuda():
# self.place.append(paddle.CUDAPlace(0))
# def check_static_result(self, place):
# paddle.enable_static()
# with fluid.program_guard(fluid.Program(), fluid.Program()):
# x = fluid.data(name="x", shape=[10, 2], dtype=self.dtype)
# y = fluid.data(name="y", shape=[10, 10], dtype=self.dtype)
# z = paddle.linalg.cholesky_solve(x, y, upper=self.upper)
# x_np = np.random.random([10, 2]).astype(self.dtype)
# y_np = np.random.random([10, 10]).astype(self.dtype)
# if self.upper:
# umat = np.triu(y_np)
# else:
# umat = np.tril(y_np)
# z_np = cholesky_solution(umat, x_np, upper=self.upper)
# z2_np = scipy_cholesky_solution(umat, x_np, upper=self.upper)
# exe = fluid.Executor(place)
# fetches = exe.run(fluid.default_main_program(),
# feed={"x": x_np,
# "y": umat},
# fetch_list=[z])
# self.assertTrue(np.allclose(fetches[0], z_np))
# def test_static(self):
# for place in self.place:
# self.check_static_result(place=place)
# def test_dygraph(self):
# def run(place):
# paddle.disable_static(place)
# x_np = np.random.random([20, 2]).astype(self.dtype)
# y_np = np.random.random([20, 20]).astype(self.dtype)
# z_np = scipy_cholesky_solution(y_np, x_np, upper=self.upper)
# x = paddle.to_tensor(x_np)
# y = paddle.to_tensor(y_np)
# z = paddle.linalg.cholesky_solve(x, y, upper=self.upper)
# self.assertTrue(np.allclose(z_np, z.numpy()))
# self.assertEqual(z_np.shape, z.numpy().shape)
# paddle.enable_static()
# for idx, place in enumerate(self.place):
# run(place)
# def test_boardcast(self):
# def run(place):
# paddle.disable_static()
# x_np = np.random.random([1, 30, 2]).astype(self.dtype)
# y_np = np.random.random([2, 30, 30]).astype(self.dtype)
# nx_np = np.concatenate((x_np, x_np), axis=0)
# z_sci = scipy_cholesky_solution_batch(y_np, nx_np, upper=self.upper)
# x = paddle.to_tensor(x_np)
# y = paddle.to_tensor(y_np)
# z = paddle.linalg.cholesky_solve(x, y, upper=self.upper)
# self.assertEqual(z_sci.shape, z.numpy().shape)
# self.assertTrue(np.allclose(z_sci, z.numpy()))
# for idx, place in enumerate(self.place):
# run(place)
# class TestCholeskySolveOpError(unittest.TestCase):
# def test_errors(self):
# paddle.enable_static()
# with program_guard(Program(), Program()):
# # The input type of solve_op must be Variable.
# x1 = fluid.create_lod_tensor(
# np.array([[-1]]), [[1]], fluid.CPUPlace())
# y1 = fluid.create_lod_tensor(
# np.array([[-1]]), [[1]], fluid.CPUPlace())
# self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x1, y1)
# # The data type of input must be float32 or float64.
# x2 = fluid.data(name="x2", shape=[30, 30], dtype="bool")
# y2 = fluid.data(name="y2", shape=[30, 10], dtype="bool")
# self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x2, y2)
# x3 = fluid.data(name="x3", shape=[30, 30], dtype="int32")
# y3 = fluid.data(name="y3", shape=[30, 10], dtype="int32")
# self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x3, y3)
# x4 = fluid.data(name="x4", shape=[30, 30], dtype="float16")
# y4 = fluid.data(name="y4", shape=[30, 10], dtype="float16")
# self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x4, y4)
# # The number of dimensions of input'X must be >= 2.
# x5 = fluid.data(name="x5", shape=[30], dtype="float64")
# y5 = fluid.data(name="y5", shape=[30, 30], dtype="float64")
# self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x5, y5)
# # The number of dimensions of input'Y must be >= 2.
# x6 = fluid.data(name="x6", shape=[30, 30], dtype="float64")
# y6 = fluid.data(name="y6", shape=[30], dtype="float64")
# self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x6, y6)
# # The inner-most 2 dimensions of input'X should be equal to each other
# x7 = fluid.data(name="x7", shape=[2, 3, 4], dtype="float64")
# y7 = fluid.data(name="y7", shape=[2, 4, 3], dtype="float64")
# self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x7, y7)
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'Y'
],
'Out'
,
max_relative_error
=
0.01
,
check_eager
=
True
)
# 3D(broadcast) + 3D, upper=True
class
TestCholeskySolveOp3
(
TestCholeskySolveOp
):
"""
case 3
"""
def
config
(
self
):
self
.
y_shape
=
[
1
,
10
,
10
]
self
.
x_shape
=
[
2
,
10
,
5
]
self
.
upper
=
True
self
.
dtype
=
np
.
float64
class
TestCholeskySolveAPI
(
unittest
.
TestCase
):
def
setUp
(
self
):
np
.
random
.
seed
(
2021
)
self
.
place
=
[
paddle
.
CPUPlace
()]
# self.place = [paddle.CUDAPlace(0)]
self
.
dtype
=
"float64"
self
.
upper
=
True
if
core
.
is_compiled_with_cuda
():
self
.
place
.
append
(
paddle
.
CUDAPlace
(
0
))
def
check_static_result
(
self
,
place
):
paddle
.
enable_static
()
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
10
,
2
],
dtype
=
self
.
dtype
)
y
=
fluid
.
data
(
name
=
"y"
,
shape
=
[
10
,
10
],
dtype
=
self
.
dtype
)
z
=
paddle
.
linalg
.
cholesky_solve
(
x
,
y
,
upper
=
self
.
upper
)
x_np
=
np
.
random
.
random
([
10
,
2
]).
astype
(
self
.
dtype
)
y_np
=
np
.
random
.
random
([
10
,
10
]).
astype
(
self
.
dtype
)
if
self
.
upper
:
umat
=
np
.
triu
(
y_np
)
else
:
umat
=
np
.
tril
(
y_np
)
z_np
=
cholesky_solution
(
umat
,
x_np
,
upper
=
self
.
upper
)
z2_np
=
scipy_cholesky_solution
(
umat
,
x_np
,
upper
=
self
.
upper
)
exe
=
fluid
.
Executor
(
place
)
fetches
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
x_np
,
"y"
:
umat
},
fetch_list
=
[
z
])
self
.
assertTrue
(
np
.
allclose
(
fetches
[
0
],
z_np
))
def
test_static
(
self
):
for
place
in
self
.
place
:
self
.
check_static_result
(
place
=
place
)
def
test_dygraph
(
self
):
def
run
(
place
):
paddle
.
disable_static
(
place
)
x_np
=
np
.
random
.
random
([
20
,
2
]).
astype
(
self
.
dtype
)
y_np
=
np
.
random
.
random
([
20
,
20
]).
astype
(
self
.
dtype
)
z_np
=
scipy_cholesky_solution
(
y_np
,
x_np
,
upper
=
self
.
upper
)
x
=
paddle
.
to_tensor
(
x_np
)
y
=
paddle
.
to_tensor
(
y_np
)
z
=
paddle
.
linalg
.
cholesky_solve
(
x
,
y
,
upper
=
self
.
upper
)
self
.
assertTrue
(
np
.
allclose
(
z_np
,
z
.
numpy
()))
self
.
assertEqual
(
z_np
.
shape
,
z
.
numpy
().
shape
)
paddle
.
enable_static
()
for
idx
,
place
in
enumerate
(
self
.
place
):
run
(
place
)
def
test_boardcast
(
self
):
def
run
(
place
):
paddle
.
disable_static
()
x_np
=
np
.
random
.
random
([
1
,
30
,
2
]).
astype
(
self
.
dtype
)
y_np
=
np
.
random
.
random
([
2
,
30
,
30
]).
astype
(
self
.
dtype
)
nx_np
=
np
.
concatenate
((
x_np
,
x_np
),
axis
=
0
)
z_sci
=
scipy_cholesky_solution_batch
(
y_np
,
nx_np
,
upper
=
self
.
upper
)
x
=
paddle
.
to_tensor
(
x_np
)
y
=
paddle
.
to_tensor
(
y_np
)
z
=
paddle
.
linalg
.
cholesky_solve
(
x
,
y
,
upper
=
self
.
upper
)
self
.
assertEqual
(
z_sci
.
shape
,
z
.
numpy
().
shape
)
self
.
assertTrue
(
np
.
allclose
(
z_sci
,
z
.
numpy
()))
for
idx
,
place
in
enumerate
(
self
.
place
):
run
(
place
)
class
TestCholeskySolveOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
paddle
.
enable_static
()
with
program_guard
(
Program
(),
Program
()):
# The input type of solve_op must be Variable.
x1
=
fluid
.
create_lod_tensor
(
np
.
array
([[
-
1
]]),
[[
1
]],
fluid
.
CPUPlace
())
y1
=
fluid
.
create_lod_tensor
(
np
.
array
([[
-
1
]]),
[[
1
]],
fluid
.
CPUPlace
())
self
.
assertRaises
(
TypeError
,
paddle
.
linalg
.
cholesky_solve
,
x1
,
y1
)
# The data type of input must be float32 or float64.
x2
=
fluid
.
data
(
name
=
"x2"
,
shape
=
[
30
,
30
],
dtype
=
"bool"
)
y2
=
fluid
.
data
(
name
=
"y2"
,
shape
=
[
30
,
10
],
dtype
=
"bool"
)
self
.
assertRaises
(
TypeError
,
paddle
.
linalg
.
cholesky_solve
,
x2
,
y2
)
x3
=
fluid
.
data
(
name
=
"x3"
,
shape
=
[
30
,
30
],
dtype
=
"int32"
)
y3
=
fluid
.
data
(
name
=
"y3"
,
shape
=
[
30
,
10
],
dtype
=
"int32"
)
self
.
assertRaises
(
TypeError
,
paddle
.
linalg
.
cholesky_solve
,
x3
,
y3
)
x4
=
fluid
.
data
(
name
=
"x4"
,
shape
=
[
30
,
30
],
dtype
=
"float16"
)
y4
=
fluid
.
data
(
name
=
"y4"
,
shape
=
[
30
,
10
],
dtype
=
"float16"
)
self
.
assertRaises
(
TypeError
,
paddle
.
linalg
.
cholesky_solve
,
x4
,
y4
)
# The number of dimensions of input'X must be >= 2.
x5
=
fluid
.
data
(
name
=
"x5"
,
shape
=
[
30
],
dtype
=
"float64"
)
y5
=
fluid
.
data
(
name
=
"y5"
,
shape
=
[
30
,
30
],
dtype
=
"float64"
)
self
.
assertRaises
(
ValueError
,
paddle
.
linalg
.
cholesky_solve
,
x5
,
y5
)
# The number of dimensions of input'Y must be >= 2.
x6
=
fluid
.
data
(
name
=
"x6"
,
shape
=
[
30
,
30
],
dtype
=
"float64"
)
y6
=
fluid
.
data
(
name
=
"y6"
,
shape
=
[
30
],
dtype
=
"float64"
)
self
.
assertRaises
(
ValueError
,
paddle
.
linalg
.
cholesky_solve
,
x6
,
y6
)
# The inner-most 2 dimensions of input'X should be equal to each other
x7
=
fluid
.
data
(
name
=
"x7"
,
shape
=
[
2
,
3
,
4
],
dtype
=
"float64"
)
y7
=
fluid
.
data
(
name
=
"y7"
,
shape
=
[
2
,
4
,
3
],
dtype
=
"float64"
)
self
.
assertRaises
(
ValueError
,
paddle
.
linalg
.
cholesky_solve
,
x7
,
y7
)
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_cumsum_op.py
浏览文件 @
733d3109
...
...
@@ -21,6 +21,7 @@ import paddle
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
from
paddle.fluid
import
compiler
,
Program
,
program_guard
from
paddle.fluid.framework
import
_test_eager_guard
class
TestCumsumOp
(
unittest
.
TestCase
):
...
...
@@ -84,6 +85,9 @@ class TestCumsumOp(unittest.TestCase):
def
test_cpu
(
self
):
paddle
.
disable_static
(
paddle
.
fluid
.
CPUPlace
())
self
.
run_cases
()
with
_test_eager_guard
():
self
.
run_cases
()
paddle
.
enable_static
()
self
.
run_static
()
...
...
@@ -107,15 +111,16 @@ class TestCumsumOp(unittest.TestCase):
class
TestSumOp1
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"cumsum"
self
.
python_api
=
paddle
.
cumsum
self
.
attrs
=
{
'axis'
:
2
}
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
6
,
10
)).
astype
(
"float64"
)}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
cumsum
(
axis
=
2
)}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
class
TestSumOp2
(
OpTest
):
...
...
@@ -306,4 +311,5 @@ class BadInputTest(unittest.TestCase):
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_eigh_op.py
浏览文件 @
733d3109
...
...
@@ -25,6 +25,7 @@ class TestEighOp(OpTest):
def
setUp
(
self
):
paddle
.
enable_static
()
self
.
op_type
=
"eigh"
self
.
python_api
=
paddle
.
linalg
.
eigh
self
.
init_input
()
self
.
init_config
()
np
.
random
.
seed
(
123
)
...
...
@@ -42,10 +43,10 @@ class TestEighOp(OpTest):
self
.
x_np
=
np
.
random
.
random
(
self
.
x_shape
).
astype
(
self
.
x_type
)
def
test_check_output
(
self
):
self
.
check_output
(
no_check_set
=
[
'Eigenvectors'
])
self
.
check_output
(
no_check_set
=
[
'Eigenvectors'
]
,
check_eager
=
True
)
def
test_grad
(
self
):
self
.
check_grad
([
"X"
],
[
"Eigenvalues"
])
self
.
check_grad
([
"X"
],
[
"Eigenvalues"
]
,
check_eager
=
True
)
class
TestEighUPLOCase
(
TestEighOp
):
...
...
@@ -207,4 +208,5 @@ class TestEighAPIError(unittest.TestCase):
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_erf_op.py
浏览文件 @
733d3109
...
...
@@ -27,6 +27,7 @@ import paddle.fluid.dygraph as dg
class
TestErfOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"erf"
self
.
python_api
=
paddle
.
erf
self
.
dtype
=
self
.
_init_dtype
()
self
.
x_shape
=
[
11
,
17
]
x
=
np
.
random
.
uniform
(
-
1
,
1
,
size
=
self
.
x_shape
).
astype
(
self
.
dtype
)
...
...
@@ -38,10 +39,10 @@ class TestErfOp(OpTest):
return
"float64"
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
class
TestErfLayer
(
unittest
.
TestCase
):
...
...
@@ -67,4 +68,5 @@ class TestErfLayer(unittest.TestCase):
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_erfinv_op.py
浏览文件 @
733d3109
...
...
@@ -21,13 +21,13 @@ from op_test import OpTest
import
paddle
import
paddle.fluid.core
as
core
paddle
.
enable_static
()
np
.
random
.
seed
(
0
)
class
TestErfinv
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"erfinv"
self
.
python_api
=
paddle
.
erfinv
self
.
init_dtype
()
self
.
shape
=
[
11
,
17
]
self
.
x
=
np
.
random
.
uniform
(
-
1
,
1
,
size
=
self
.
shape
).
astype
(
self
.
dtype
)
...
...
@@ -42,14 +42,15 @@ class TestErfinv(OpTest):
self
.
dtype
=
np
.
float64
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
(
[
'X'
],
'Out'
,
user_defined_grads
=
[
self
.
gradient
],
user_defined_grad_outputs
=
self
.
grad_out
)
user_defined_grad_outputs
=
self
.
grad_out
,
check_eager
=
True
)
class
TestErfinvFP32
(
TestErfinv
):
...
...
@@ -108,4 +109,5 @@ class TestErfinvAPI(unittest.TestCase):
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_increment.py
浏览文件 @
733d3109
...
...
@@ -19,6 +19,7 @@ import unittest
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.framework
import
_test_eager_guard
class
TestIncrement
(
unittest
.
TestCase
):
...
...
@@ -39,6 +40,14 @@ class TestIncrement(unittest.TestCase):
output
=
paddle
.
tensor
.
math
.
increment
(
input
,
value
=
1
)
self
.
assertEqual
((
output
.
numpy
()
==
expected_result
).
all
(),
True
)
with
fluid
.
dygraph
.
guard
():
with
_test_eager_guard
():
input
=
paddle
.
ones
(
shape
=
[
1
],
dtype
=
'int64'
)
expected_result
=
np
.
array
([
2
],
dtype
=
'int64'
)
output
=
paddle
.
tensor
.
math
.
increment
(
input
,
value
=
1
)
self
.
assertEqual
((
output
.
numpy
()
==
expected_result
).
all
(),
True
)
class
TestInplaceApiWithDataTransform
(
unittest
.
TestCase
):
def
test_increment
(
self
):
...
...
@@ -55,4 +64,5 @@ class TestInplaceApiWithDataTransform(unittest.TestCase):
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_is_empty_op.py
浏览文件 @
733d3109
...
...
@@ -23,11 +23,12 @@ import paddle
class
TestEmpty
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"is_empty"
self
.
python_api
=
paddle
.
is_empty
self
.
inputs
=
{
'X'
:
np
.
array
([
1
,
2
,
3
])}
self
.
outputs
=
{
'Out'
:
np
.
array
([
False
])}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
class
TestNotEmpty
(
TestEmpty
):
...
...
@@ -75,4 +76,5 @@ class TestIsEmptyOpDygraph(unittest.TestCase):
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_log_loss_op.py
浏览文件 @
733d3109
...
...
@@ -18,6 +18,7 @@ import unittest
import
numpy
as
np
from
op_test
import
OpTest
import
paddle.fluid
as
fluid
import
paddle
def
sigmoid_array
(
x
):
...
...
@@ -27,6 +28,7 @@ def sigmoid_array(x):
class
TestLogLossOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
'log_loss'
self
.
python_api
=
paddle
.
nn
.
functional
.
log_loss
samples_num
=
100
x
=
np
.
random
.
random
((
samples_num
,
1
)).
astype
(
"float32"
)
...
...
@@ -44,10 +46,11 @@ class TestLogLossOp(OpTest):
self
.
outputs
=
{
'Loss'
:
loss
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'Predicted'
],
'Loss'
,
max_relative_error
=
0.03
)
self
.
check_grad
(
[
'Predicted'
],
'Loss'
,
max_relative_error
=
0.03
,
check_eager
=
True
)
class
TestLogLossOpError
(
unittest
.
TestCase
):
...
...
@@ -80,4 +83,5 @@ class TestLogLossOpError(unittest.TestCase):
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_matrix_power_op.py
浏览文件 @
733d3109
...
...
@@ -31,6 +31,7 @@ class TestMatrixPowerOp(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"matrix_power"
self
.
config
()
self
.
python_api
=
paddle
.
linalg
.
matrix_power
np
.
random
.
seed
(
123
)
mat
=
np
.
random
.
random
(
self
.
matrix_shape
).
astype
(
self
.
dtype
)
...
...
@@ -41,11 +42,15 @@ class TestMatrixPowerOp(OpTest):
self
.
attrs
=
{
"n"
:
self
.
n
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_grad
(
self
):
self
.
check_grad
(
[
"X"
],
"Out"
,
numeric_grad_delta
=
1e-5
,
max_relative_error
=
1e-7
)
[
"X"
],
"Out"
,
numeric_grad_delta
=
1e-5
,
max_relative_error
=
1e-7
,
check_eager
=
True
)
class
TestMatrixPowerOpN1
(
TestMatrixPowerOp
):
...
...
python/paddle/fluid/tests/unittests/test_multinomial_op.py
浏览文件 @
733d3109
...
...
@@ -20,6 +20,7 @@ import paddle.fluid as fluid
from
paddle.fluid
import
core
from
op_test
import
OpTest
import
numpy
as
np
from
paddle.fluid.framework
import
_test_eager_guard
def
sample_output_one_dimension
(
out
,
dim
):
...
...
@@ -46,6 +47,7 @@ class TestMultinomialOp(OpTest):
def
setUp
(
self
):
paddle
.
enable_static
()
self
.
op_type
=
"multinomial"
self
.
python_api
=
paddle
.
multinomial
self
.
init_data
()
self
.
inputs
=
{
"X"
:
self
.
input_np
}
...
...
@@ -113,6 +115,22 @@ class TestMultinomialApi(unittest.TestCase):
sample_prob
,
prob
,
rtol
=
0
,
atol
=
0.01
),
"sample_prob: "
+
str
(
sample_prob
)
+
"
\n
prob: "
+
str
(
prob
))
def
test_eager
(
self
):
# input probability is a vector, and replacement is True
paddle
.
disable_static
()
with
_test_eager_guard
():
x_numpy
=
np
.
random
.
rand
(
4
)
x
=
paddle
.
to_tensor
(
x_numpy
)
out
=
paddle
.
multinomial
(
x
,
num_samples
=
100000
,
replacement
=
True
)
sample_prob
=
sample_output_one_dimension
(
out
.
numpy
(),
4
)
prob
=
x_numpy
/
x_numpy
.
sum
(
axis
=-
1
,
keepdims
=
True
)
self
.
assertTrue
(
np
.
allclose
(
sample_prob
,
prob
,
rtol
=
0
,
atol
=
0.01
),
"sample_prob: "
+
str
(
sample_prob
)
+
"
\n
prob: "
+
str
(
prob
))
paddle
.
enable_static
()
def
test_dygraph2
(
self
):
# input probability is a matrix, and replacement is True
paddle
.
disable_static
()
...
...
@@ -128,6 +146,22 @@ class TestMultinomialApi(unittest.TestCase):
"sample_prob: "
+
str
(
sample_prob
)
+
"
\n
prob: "
+
str
(
prob
))
paddle
.
enable_static
()
def
test_eager2
(
self
):
# input probability is a matrix, and replacement is True
paddle
.
disable_static
()
with
_test_eager_guard
():
x_numpy
=
np
.
random
.
rand
(
3
,
4
)
x
=
paddle
.
to_tensor
(
x_numpy
)
out
=
paddle
.
multinomial
(
x
,
num_samples
=
100000
,
replacement
=
True
)
sample_prob
=
sample_output_two_dimension
(
out
.
numpy
(),
[
3
,
4
])
prob
=
x_numpy
/
x_numpy
.
sum
(
axis
=-
1
,
keepdims
=
True
)
self
.
assertTrue
(
np
.
allclose
(
sample_prob
,
prob
,
rtol
=
0
,
atol
=
0.01
),
"sample_prob: "
+
str
(
sample_prob
)
+
"
\n
prob: "
+
str
(
prob
))
paddle
.
enable_static
()
def
test_dygraph3
(
self
):
# replacement is False. number of samples must be less than number of categories.
paddle
.
disable_static
()
...
...
@@ -217,4 +251,5 @@ class TestMultinomialError(unittest.TestCase):
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_segment_ops.py
浏览文件 @
733d3109
...
...
@@ -93,6 +93,7 @@ class TestSegmentOps(OpTest):
self
.
dtype
=
np
.
float64
self
.
shape
=
[
30
,
15
]
self
.
attrs
=
{
"pooltype"
:
"SUM"
}
self
.
python_api
=
paddle
.
incubate
.
segment_sum
def
setUp
(
self
):
self
.
prepare
()
...
...
@@ -105,10 +106,10 @@ class TestSegmentOps(OpTest):
self
.
outputs
=
{
'Out'
:
result
.
astype
(
self
.
dtype
)}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
False
)
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
)
self
.
check_grad
([
"X"
],
"Out"
,
check_eager
=
False
)
class
TestSegmentSum2
(
TestSegmentOps
):
...
...
@@ -136,6 +137,7 @@ class TestSegmentMax(TestSegmentOps):
super
(
TestSegmentMax
,
self
).
prepare
()
self
.
shape
=
[
40
,
20
]
self
.
attrs
=
{
'pooltype'
:
"MAX"
}
# self.python_api = paddle.incubate.segment_max
def
setUp
(
self
):
self
.
prepare
()
...
...
@@ -148,7 +150,8 @@ class TestSegmentMax(TestSegmentOps):
self
.
outputs
=
{
'Out'
:
result
.
astype
(
self
.
dtype
)}
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
,
user_defined_grads
=
[
self
.
gradient
])
self
.
check_grad
(
[
"X"
],
"Out"
,
user_defined_grads
=
[
self
.
gradient
],
check_eager
=
False
)
class
TestSegmentMax2
(
TestSegmentMax
):
...
...
@@ -164,6 +167,7 @@ class TestSegmentMin(TestSegmentMax):
def
prepare
(
self
):
super
(
TestSegmentMin
,
self
).
prepare
()
self
.
attrs
=
{
'pooltype'
:
"MIN"
}
#self.python_api = paddle.incubate.segment_min
class
TestSegmentMin2
(
TestSegmentMin
):
...
...
@@ -180,6 +184,7 @@ class TestSegmentMean(TestSegmentOps):
super
(
TestSegmentMean
,
self
).
prepare
()
self
.
shape
=
[
40
,
20
]
self
.
attrs
=
{
'pooltype'
:
"MEAN"
}
#self.python_api = paddle.incubate.segment_mean
def
setUp
(
self
):
self
.
prepare
()
...
...
@@ -199,6 +204,7 @@ class TestSegmentMean2(TestSegmentMean):
self
.
dtype
=
np
.
float32
self
.
shape
=
[
30
,
20
]
self
.
attrs
=
{
'pooltype'
:
"MEAN"
}
#self.python_api = paddle.incubate.segment_mean
class
API_SegmentOpsTest
(
unittest
.
TestCase
):
...
...
@@ -259,4 +265,5 @@ class API_SegmentOpsTest(unittest.TestCase):
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_selu_op.py
浏览文件 @
733d3109
...
...
@@ -24,6 +24,7 @@ import paddle.fluid as fluid
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.fluid
import
compiler
,
Program
,
program_guard
from
paddle.fluid.framework
import
_test_eager_guard
def
ref_selu
(
x
,
...
...
@@ -113,6 +114,15 @@ class TestSeluAPI(unittest.TestCase):
out_ref
=
ref_selu
(
self
.
x_np
,
self
.
scale
,
self
.
alpha
)
for
r
in
[
out1
,
out2
]:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
.
numpy
()),
True
)
with
_test_eager_guard
():
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
selu
(
x
,
self
.
scale
,
self
.
alpha
)
selu
=
paddle
.
nn
.
SELU
(
self
.
scale
,
self
.
alpha
)
out2
=
selu
(
x
)
out_ref
=
ref_selu
(
self
.
x_np
,
self
.
scale
,
self
.
alpha
)
for
r
in
[
out1
,
out2
]:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
.
numpy
()),
True
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
...
...
@@ -145,4 +155,5 @@ class TestSeluAPI(unittest.TestCase):
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_shard_index_op.py
浏览文件 @
733d3109
...
...
@@ -22,10 +22,12 @@ import paddle.fluid as fluid
import
paddle.fluid.core
as
core
import
paddle.fluid.framework
as
framework
from
paddle.fluid.framework
import
Program
,
program_guard
import
paddle
def
common_setup
(
self
,
index_num
,
nshards
,
shard_id
,
ignore_value
):
self
.
op_type
=
'shard_index'
self
.
python_api
=
paddle
.
shard_index
x_lod
=
[[
i
for
i
in
range
(
10
)]]
N
=
sum
(
x_lod
[
0
])
x
=
[
np
.
random
.
randint
(
0
,
index_num
-
1
)
for
i
in
range
(
N
)]
...
...
@@ -54,7 +56,7 @@ class TestShardIndexShardId0Op(OpTest):
common_setup
(
self
,
20
,
2
,
0
,
-
1
)
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
class
TestShardIndexShardId1Op
(
OpTest
):
...
...
@@ -62,7 +64,7 @@ class TestShardIndexShardId1Op(OpTest):
common_setup
(
self
,
20
,
2
,
1
,
-
1
)
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
class
TestShardIndexIgnoreValueOp
(
OpTest
):
...
...
@@ -70,7 +72,7 @@ class TestShardIndexIgnoreValueOp(OpTest):
common_setup
(
self
,
20
,
2
,
0
,
-
2
)
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
class
TestShardIndexNotEvenlyDividedOp
(
OpTest
):
...
...
@@ -78,8 +80,9 @@ class TestShardIndexNotEvenlyDividedOp(OpTest):
common_setup
(
self
,
15
,
2
,
1
,
-
1
)
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py
浏览文件 @
733d3109
...
...
@@ -22,6 +22,7 @@ import paddle.fluid.core as core
import
unittest
from
paddle.fluid
import
compiler
,
Program
,
program_guard
import
paddle.fluid
as
fluid
import
paddle
class
TestSigmoidCrossEntropyWithLogitsOp1
(
OpTest
):
...
...
@@ -30,6 +31,7 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"sigmoid_cross_entropy_with_logits"
self
.
python_api
=
paddle
.
fluid
.
layers
.
sigmoid_cross_entropy_with_logits
batch_size
=
64
num_classes
=
20
self
.
inputs
=
{
...
...
@@ -49,10 +51,10 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest):
self
.
outputs
=
{
'Out'
:
-
term1
-
term2
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
False
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
False
)
class
TestSigmoidCrossEntropyWithLogitsOp2
(
OpTest
):
...
...
@@ -61,6 +63,7 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"sigmoid_cross_entropy_with_logits"
self
.
python_api
=
paddle
.
fluid
.
layers
.
sigmoid_cross_entropy_with_logits
batch_size
=
64
num_classes
=
20
ignore_index
=
-
1
...
...
@@ -83,10 +86,10 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest):
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
False
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
False
)
class
TestSigmoidCrossEntropyWithLogitsOp3
(
OpTest
):
...
...
@@ -95,6 +98,7 @@ class TestSigmoidCrossEntropyWithLogitsOp3(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"sigmoid_cross_entropy_with_logits"
self
.
python_api
=
paddle
.
fluid
.
layers
.
sigmoid_cross_entropy_with_logits
batch_size
=
64
num_classes
=
20
self
.
inputs
=
{
...
...
@@ -114,15 +118,16 @@ class TestSigmoidCrossEntropyWithLogitsOp3(OpTest):
self
.
outputs
=
{
'Out'
:
-
term1
-
term2
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
False
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
False
)
class
TestSigmoidCrossEntropyWithNorm
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"sigmoid_cross_entropy_with_logits"
self
.
python_api
=
paddle
.
fluid
.
layers
.
sigmoid_cross_entropy_with_logits
batch_size
=
64
num_classes
=
20
ignore_index
=
-
1
...
...
@@ -145,10 +150,10 @@ class TestSigmoidCrossEntropyWithNorm(OpTest):
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
False
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
False
)
class
TestSigmoidCrossEntropyWithLogitsOp5
(
OpTest
):
...
...
@@ -157,6 +162,7 @@ class TestSigmoidCrossEntropyWithLogitsOp5(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"sigmoid_cross_entropy_with_logits"
self
.
python_api
=
paddle
.
fluid
.
layers
.
sigmoid_cross_entropy_with_logits
batch_size
=
[
10
,
10
]
num_classes
=
20
self
.
inputs
=
{
...
...
@@ -176,15 +182,16 @@ class TestSigmoidCrossEntropyWithLogitsOp5(OpTest):
self
.
outputs
=
{
'Out'
:
-
term1
-
term2
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
False
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
False
)
class
TestSigmoidCrossEntropyWithNorm2
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"sigmoid_cross_entropy_with_logits"
self
.
python_api
=
paddle
.
fluid
.
layers
.
sigmoid_cross_entropy_with_logits
batch_size
=
[
10
,
10
]
num_classes
=
20
ignore_index
=
-
1
...
...
@@ -207,10 +214,10 @@ class TestSigmoidCrossEntropyWithNorm2(OpTest):
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
False
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
False
)
class
TestSigmoidCrossEntropyWithLogitsOp6
(
OpTest
):
...
...
@@ -219,6 +226,7 @@ class TestSigmoidCrossEntropyWithLogitsOp6(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"sigmoid_cross_entropy_with_logits"
self
.
python_api
=
paddle
.
fluid
.
layers
.
sigmoid_cross_entropy_with_logits
batch_size
=
[
10
,
10
]
num_classes
=
20
self
.
inputs
=
{
...
...
@@ -238,10 +246,10 @@ class TestSigmoidCrossEntropyWithLogitsOp6(OpTest):
self
.
outputs
=
{
'Out'
:
-
term1
-
term2
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
False
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
False
)
class
TestSigmoidCrossEntropyWithLogitsOpError
(
unittest
.
TestCase
):
...
...
@@ -271,4 +279,5 @@ class TestSigmoidCrossEntropyWithLogitsOpError(unittest.TestCase):
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_size_op.py
浏览文件 @
733d3109
...
...
@@ -22,6 +22,7 @@ from op_test import OpTest
class
TestSizeOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"size"
self
.
python_api
=
paddle
.
numel
self
.
shape
=
[]
self
.
config
()
input
=
np
.
zeros
(
self
.
shape
,
dtype
=
'bool'
)
...
...
@@ -32,7 +33,7 @@ class TestSizeOp(OpTest):
pass
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
class
TestRank1Tensor
(
TestSizeOp
):
...
...
python/paddle/fluid/tests/unittests/test_softmax_op.py
浏览文件 @
733d3109
...
...
@@ -85,10 +85,10 @@ class TestSoftmaxOp(OpTest):
place
,
atol
=
1e-5
,
check_dygraph
=
(
self
.
use_mkldnn
==
False
),
check_eager
=
Fals
e
)
check_eager
=
Tru
e
)
else
:
self
.
check_output
(
check_dygraph
=
(
self
.
use_mkldnn
==
False
),
check_eager
=
Fals
e
)
check_dygraph
=
(
self
.
use_mkldnn
==
False
),
check_eager
=
Tru
e
)
def
test_check_grad
(
self
):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
...
...
python/paddle/fluid/tests/unittests/test_take_along_axis_op.py
浏览文件 @
733d3109
...
...
@@ -29,6 +29,7 @@ class TestTakeAlongAxisOp(OpTest):
def
setUp
(
self
):
self
.
init_data
()
self
.
op_type
=
"take_along_axis"
self
.
python_api
=
paddle
.
take_along_axis
self
.
xnp
=
np
.
random
.
random
(
self
.
x_shape
).
astype
(
self
.
x_type
)
self
.
target
=
np
.
take_along_axis
(
self
.
xnp
,
self
.
index
,
self
.
axis
)
broadcast_shape_list
=
list
(
self
.
x_shape
)
...
...
@@ -43,10 +44,10 @@ class TestTakeAlongAxisOp(OpTest):
self
.
outputs
=
{
'Result'
:
self
.
target
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'Input'
],
'Result'
)
self
.
check_grad
([
'Input'
],
'Result'
,
check_eager
=
True
)
def
init_data
(
self
):
self
.
x_type
=
"float64"
...
...
python/paddle/fluid/tests/unittests/test_tile_op.py
浏览文件 @
733d3109
...
...
@@ -27,6 +27,7 @@ class TestTileOpRank1(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"tile"
self
.
init_data
()
self
.
python_api
=
paddle
.
tile
self
.
inputs
=
{
'X'
:
np
.
random
.
random
(
self
.
ori_shape
).
astype
(
"float64"
)}
self
.
attrs
=
{
'repeat_times'
:
self
.
repeat_times
}
...
...
@@ -38,10 +39,10 @@ class TestTileOpRank1(OpTest):
self
.
repeat_times
=
[
2
]
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
# with dimension expanding
...
...
@@ -85,6 +86,7 @@ class TestTileOpRank4(TestTileOpRank1):
class
TestTileOpRank1_tensor_attr
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"tile"
self
.
python_api
=
paddle
.
tile
self
.
init_data
()
repeat_times_tensor
=
[]
for
index
,
ele
in
enumerate
(
self
.
repeat_times
):
...
...
@@ -160,6 +162,7 @@ class TestTileOpRank2_tensor(TestTileOpRank1_tensor):
class
TestTileOpInteger
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"tile"
self
.
python_api
=
paddle
.
tile
self
.
inputs
=
{
'X'
:
np
.
random
.
randint
(
10
,
size
=
(
4
,
4
,
5
)).
astype
(
"int32"
)
...
...
@@ -169,26 +172,28 @@ class TestTileOpInteger(OpTest):
self
.
outputs
=
{
'Out'
:
output
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
# Situation 5: input x is Bool
class
TestTileOpBoolean
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"tile"
self
.
python_api
=
paddle
.
tile
self
.
inputs
=
{
'X'
:
np
.
random
.
randint
(
2
,
size
=
(
2
,
4
,
5
)).
astype
(
"bool"
)}
self
.
attrs
=
{
'repeat_times'
:
[
2
,
1
,
4
]}
output
=
np
.
tile
(
self
.
inputs
[
'X'
],
(
2
,
1
,
4
))
self
.
outputs
=
{
'Out'
:
output
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
# Situation 56: input x is Integer
class
TestTileOpInt64_t
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"tile"
self
.
python_api
=
paddle
.
tile
self
.
inputs
=
{
'X'
:
np
.
random
.
randint
(
10
,
size
=
(
2
,
4
,
5
)).
astype
(
"int64"
)
...
...
@@ -198,7 +203,7 @@ class TestTileOpInt64_t(OpTest):
self
.
outputs
=
{
'Out'
:
output
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
class
TestTileError
(
unittest
.
TestCase
):
...
...
@@ -248,4 +253,5 @@ class TestTileAPI(unittest.TestCase):
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_trace_op.py
浏览文件 @
733d3109
...
...
@@ -27,14 +27,15 @@ import paddle
class
TestTraceOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"trace"
self
.
python_api
=
paddle
.
trace
self
.
init_config
()
self
.
outputs
=
{
'Out'
:
self
.
target
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'Input'
],
'Out'
)
self
.
check_grad
([
'Input'
],
'Out'
,
check_eager
=
True
)
def
init_config
(
self
):
self
.
case
=
np
.
random
.
randn
(
20
,
6
).
astype
(
'float64'
)
...
...
python/paddle/fluid/tests/unittests/test_unfold_op.py
浏览文件 @
733d3109
...
...
@@ -95,6 +95,7 @@ class TestUnfoldOp(OpTest):
def
setUp
(
self
):
self
.
op_type
=
'unfold'
self
.
set_data
()
self
.
python_api
=
paddle
.
nn
.
functional
.
unfold
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
True
)
...
...
python/paddle/fluid/tests/unittests/test_where_index.py
浏览文件 @
733d3109
...
...
@@ -21,15 +21,17 @@ import paddle.fluid.core as core
from
paddle.fluid.op
import
Operator
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
program_guard
import
paddle
class
TestWhereIndexOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"where_index"
self
.
pythona_api
=
paddle
.
fluid
.
layers
.
where
self
.
init_config
()
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
init_config
(
self
):
self
.
inputs
=
{
'Condition'
:
np
.
array
([
True
,
False
,
True
]),
}
...
...
@@ -111,4 +113,5 @@ class TestWhereRaiseError(unittest.TestCase):
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/incubate/tensor/math.py
浏览文件 @
733d3109
...
...
@@ -15,6 +15,7 @@
from
paddle.fluid.layer_helper
import
LayerHelper
,
in_dygraph_mode
from
paddle.fluid.data_feeder
import
check_variable_and_dtype
from
paddle
import
_C_ops
from
paddle.fluid.framework
import
_in_eager_mode
__all__
=
[]
...
...
@@ -51,6 +52,8 @@ def segment_sum(data, segment_ids, name=None):
"""
if
in_dygraph_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_segment_pool
(
data
,
segment_idsm
,
"SUM"
)[
0
]
out
,
tmp
=
_C_ops
.
segment_pool
(
data
,
segment_ids
,
'pooltype'
,
"SUM"
)
return
out
...
...
@@ -104,6 +107,9 @@ def segment_mean(data, segment_ids, name=None):
"""
if
in_dygraph_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_segment_pool
(
data
,
segment_idsm
,
"MEAN"
)[
0
]
out
,
tmp
=
_C_ops
.
segment_pool
(
data
,
segment_ids
,
'pooltype'
,
"MEAN"
)
return
out
...
...
@@ -156,6 +162,8 @@ def segment_min(data, segment_ids, name=None):
"""
if
in_dygraph_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_segment_pool
(
data
,
segment_idsm
,
"MIN"
)[
0
]
out
,
tmp
=
_C_ops
.
segment_pool
(
data
,
segment_ids
,
'pooltype'
,
"MIN"
)
return
out
...
...
@@ -208,6 +216,8 @@ def segment_max(data, segment_ids, name=None):
"""
if
in_dygraph_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_segment_pool
(
data
,
segment_idsm
,
"MAX"
)[
0
]
out
,
tmp
=
_C_ops
.
segment_pool
(
data
,
segment_ids
,
'pooltype'
,
"MAX"
)
return
out
...
...
python/paddle/metric/metrics.py
浏览文件 @
733d3109
...
...
@@ -22,7 +22,7 @@ import numpy as np
from
..fluid.data_feeder
import
check_variable_and_dtype
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.framework
import
core
,
_varbase_creator
,
in_dygraph_mode
from
..fluid.framework
import
core
,
_varbase_creator
,
in_dygraph_mode
,
_in_eager_mode
import
paddle
from
paddle
import
_C_ops
...
...
@@ -798,7 +798,7 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None):
total
=
_varbase_creator
(
dtype
=
"int32"
)
topk_out
,
topk_indices
=
paddle
.
topk
(
input
,
k
=
k
)
if
_in_eager_mode
:
if
_in_eager_mode
()
:
_acc
=
_C_ops
.
final_state_accuracy
(
topk_out
,
topk_indices
,
label
)
return
_acc
_acc
,
_
,
_
=
_C_ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
...
...
python/paddle/nn/functional/activation.py
浏览文件 @
733d3109
...
...
@@ -22,7 +22,7 @@ from ...tensor.math import multiply
import
warnings
from
...fluid.layer_helper
import
LayerHelper
from
...fluid.framework
import
convert_np_dtype_to_dtype_
from
...fluid.framework
import
convert_np_dtype_to_dtype_
,
_in_eager_mode
from
...fluid.data_feeder
import
check_variable_and_dtype
,
check_dtype
import
paddle
from
paddle
import
_C_ops
,
in_dynamic_mode
...
...
@@ -783,6 +783,8 @@ def selu(x,
"The alpha must be no less than zero. Received: {}."
.
format
(
alpha
))
if
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_selu
(
x
,
scale
,
alpha
)
return
_C_ops
.
selu
(
x
,
'scale'
,
scale
,
'alpha'
,
alpha
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'selu'
)
...
...
python/paddle/tensor/linalg.py
浏览文件 @
733d3109
...
...
@@ -1439,6 +1439,8 @@ def bincount(x, weights=None, minlength=0, name=None):
raise
TypeError
(
"Elements in Input(x) should all be integers"
)
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_bincount
(
x
,
weights
,
minlength
)
return
_C_ops
.
bincount
(
x
,
weights
,
"minlength"
,
minlength
)
helper
=
LayerHelper
(
'bincount'
,
**
locals
())
...
...
@@ -1748,6 +1750,8 @@ def matrix_power(x, n, name=None):
# [ 1.80555556 , -1.91666667 , 0.44444444 ]]
"""
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_matrix_power
(
x
,
n
)
return
_C_ops
.
matrix_power
(
x
,
"n"
,
n
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'matrix_power'
)
...
...
@@ -2266,6 +2270,8 @@ def eigh(x, UPLO='L', name=None):
"""
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_eigh
(
x
,
UPLO
)
return
_C_ops
.
eigh
(
x
,
'UPLO'
,
UPLO
)
def
__check_input
(
x
,
UPLO
):
...
...
python/paddle/tensor/logic.py
浏览文件 @
733d3109
...
...
@@ -453,8 +453,6 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
if
binary_op
:
return
op
(
x
,
y
)
else
:
if
_in_eager_mode
():
return
_C_op
.
final_state_bitewise_not
(
x
)
return
op
(
x
)
check_variable_and_dtype
(
...
...
@@ -581,7 +579,8 @@ def bitwise_not(x, out=None, name=None):
res = paddle.bitwise_not(x)
print(res) # [4, 0, -2]
"""
if
_in_eager_mode
()
and
out
==
None
:
return
_C_op
.
final_state_bitwise_not
(
x
)
return
_bitwise_op
(
op_name
=
"bitwise_not"
,
x
=
x
,
y
=
None
,
name
=
name
,
out
=
out
,
binary_op
=
False
)
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
733d3109
...
...
@@ -37,6 +37,7 @@ from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
import
paddle
from
paddle
import
_C_ops
from
paddle.tensor.attribute
import
_complex_to_real_dtype
,
_real_to_complex_dtype
from
paddle.fluid.framework
import
_in_eager_mode
__all__
=
[]
...
...
@@ -947,6 +948,9 @@ def split(x, num_or_sections, axis=0, name=None):
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 3, 5]
"""
if
paddle
.
in_dygraph_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_split
(
x
,
num_or_sections
,
dim
)
return
paddle
.
fluid
.
layers
.
split
(
input
=
x
,
num_or_sections
=
num_or_sections
,
dim
=
axis
,
name
=
name
)
...
...
@@ -1746,6 +1750,8 @@ def tile(x, repeat_times, name=None):
# [[1, 2, 3], [1, 2, 3]]
"""
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_tile
(
x
,
repeat_times
)
return
_C_ops
.
tile
(
x
,
'repeat_times'
,
repeat_times
)
check_type
(
repeat_times
,
'repeat_times'
,
(
list
,
tuple
,
Variable
),
'tile'
)
if
isinstance
(
repeat_times
,
Variable
):
...
...
@@ -2822,6 +2828,8 @@ def take_along_axis(arr, indices, axis):
broadcast_shape_list
[
axis
]
=
list
(
arr
.
shape
)[
axis
]
broadcast_shape
=
tuple
(
broadcast_shape_list
)
arr
=
paddle
.
broadcast_to
(
arr
,
broadcast_shape
)
if
_in_eager_mode
():
return
_C_ops
.
final_state_take_along_axis
(
arr
,
indices
,
axis
)
return
_C_ops
.
take_along_axis
(
arr
,
indices
,
'Axis'
,
axis
)
check_variable_and_dtype
(
arr
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
],
...
...
@@ -2887,6 +2895,9 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'):
if
broadcast_shape
:
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
values
=
paddle
.
broadcast_to
(
values
,
indices
.
shape
)
if
_in_eager_mode
():
return
_C_ops
.
final_state_put_alone_axis
(
arr
,
indices
,
value
,
axis
,
reduce
)
return
_C_ops
.
put_along_axis
(
arr
,
indices
,
values
,
"Axis"
,
axis
,
"Reduce"
,
reduce
)
...
...
python/paddle/tensor/math.py
浏览文件 @
733d3109
...
...
@@ -322,6 +322,8 @@ def subtract(x, y, name=None):
axis
=
-
1
act
=
None
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_subtract
(
x
,
y
)
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -2344,6 +2346,8 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
__check_input
(
input
,
offset
,
axis1
,
axis2
)
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_trace
(
x
,
offset
,
axis1
,
axis2
)
return
_C_ops
.
trace
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
inputs
=
{
'Input'
:
[
x
]}
...
...
@@ -2566,6 +2570,8 @@ def cumsum(x, axis=None, dtype=None, name=None):
x
=
cast
(
x
,
dtype
)
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
_final_state_cumsum
(
x
,
axis
,
flatten
,
False
,
False
)
if
axis
is
None
:
return
_C_ops
.
cumsum
(
x
,
'flatten'
,
flatten
)
else
:
...
...
@@ -2816,6 +2822,8 @@ def sign(x, name=None):
print(out) # [1.0, 0.0, -1.0, 1.0]
"""
if
paddle
.
in_dynamic_mode
():
if
_in_eager_model
():
return
_C_op
.
final_state_sign
(
x
)
return
_C_ops
.
sign
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sign'
)
...
...
@@ -2897,6 +2905,8 @@ def increment(x, value=1.0, name=None):
"""
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_increment
(
x
,
value
)
return
_C_ops
.
increment
(
x
,
'step'
,
value
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
...
@@ -3430,6 +3440,8 @@ def erfinv(x, name=None):
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'erfinv'
)
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_erfinv
(
x
)
return
_C_ops
.
erfinv
(
x
)
helper
=
LayerHelper
(
'erfinv'
,
**
locals
())
...
...
python/paddle/tensor/random.py
浏览文件 @
733d3109
...
...
@@ -22,6 +22,7 @@ from ..fluid.layers import utils
import
paddle
from
paddle
import
_C_ops
from
paddle.static
import
Variable
from
paddle.fluid.framework
import
_in_eager_mode
__all__
=
[]
...
...
@@ -67,6 +68,8 @@ def bernoulli(x, name=None):
"""
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_bernoulli
(
x
)
return
_C_ops
.
bernoulli
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"bernoulli"
)
...
...
@@ -175,6 +178,8 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
"multinomial op is not supported on ROCM yet."
)
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_multinomial
(
x
,
num_samples
,
replacement
)
return
_C_ops
.
multinomial
(
x
,
'num_samples'
,
num_samples
,
'replacement'
,
replacement
)
...
...
python/paddle/tensor/search.py
浏览文件 @
733d3109
...
...
@@ -23,6 +23,7 @@ from paddle.common_ops_import import Variable
from
paddle.common_ops_import
import
VarDesc
from
paddle
import
_C_ops
from
.logic
import
logical_not
from
paddle.fluid.framework
import
_in_eager_mode
# TODO: define searching & indexing functions of a tensor
# from ..fluid.layers import has_inf #DEFINE_ALIAS
...
...
@@ -170,6 +171,9 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
axis
=
0
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_argmin
(
x
,
axis
,
keepdim
,
flatten
,
var_dtype
)
out
=
_C_ops
.
arg_max
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
)
return
out
...
...
python/paddle/utils/code_gen/api.yaml
浏览文件 @
733d3109
...
...
@@ -267,18 +267,6 @@
func
:
diagonal
backward
:
diagonal_grad
# softmax
-
api
:
softmax
args
:
(Tensor x, int axis)
output
:
Tensor
infer_meta
:
func
:
SoftmaxInferMeta
kernel
:
func
:
softmax
backward
:
softmax_grad
# # maxout
# - api : maxout
# args : (Tensor x, int groups, int axis)
...
...
@@ -298,6 +286,7 @@
param
:
[
index
]
kernel
:
func
:
put_along_axis
dtype
:
x
backward
:
put_along_axis_grad
...
...
@@ -310,6 +299,7 @@
param
:
[
index
]
kernel
:
func
:
take_along_axis
dtype
:
x
backward
:
take_along_axis_grad
# matrix_power
...
...
@@ -342,6 +332,7 @@
kernel
:
func
:
segment_pool
backward
:
segment_pool_grad
# accuracy
-
api
:
accuracy
...
...
@@ -351,6 +342,7 @@
func
:
AccuracyInferMeta
kernel
:
func
:
accuracy
dtype
:
x
# sin
-
api
:
sin
...
...
@@ -465,19 +457,9 @@
func
:
atanh
backward
:
atanh_grad
# relu
-
api
:
relu
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
relu
backward
:
relu_grad
# arg_min # int64 ???? dtype
-
api
:
arg
_
min
-
api
:
argmin
args
:
(Tensor x, int64 axis, bool keepdims, bool flatten, int dtype)
output
:
Tensor
infer_meta
:
...
...
@@ -486,7 +468,7 @@
func
:
arg_min
# arg_max # int64 ???? dtype
-
api
:
arg
_
max
-
api
:
argmax
args
:
(Tensor x, int64 axis, bool keepdims, bool flatten, int dtype)
output
:
Tensor
infer_meta
:
...
...
python/paddle/utils/code_gen/backward.yaml
浏览文件 @
733d3109
...
...
@@ -57,7 +57,7 @@
# - backward_api : norm_grad
# forward : norm (Tensor x, int axis, float epsilon, bool is_test) -> Tensor(out), Tensor(norm)
# args : (Tensor
out_grad, Tensor x, Tensor norm
, int axis, float epsilon, bool is_test)
# args : (Tensor
x, Tensor norm, Tensor out_grad
, int axis, float epsilon, bool is_test)
# output : Tensor(x_grad)
# infer_meta :
# func : UnchangedInferMeta
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录