Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
9cd5cd4e
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9cd5cd4e
编写于
3月 15, 2022
作者:
P
phlrain
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update
上级
a67fc4be
变更
25
隐藏空白更改
内联
并排
Showing
25 changed file
with
617 addition
and
521 deletion
+617
-521
paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py
...er/auto_code_generator/final_state_generator/eager_gen.py
+8
-6
paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py
...auto_code_generator/final_state_generator/python_c_gen.py
+1
-0
paddle/phi/kernels/impl/trace_grad_kernel_impl.h
paddle/phi/kernels/impl/trace_grad_kernel_impl.h
+1
-1
paddle/phi/kernels/trace_grad_kernel.h
paddle/phi/kernels/trace_grad_kernel.h
+1
-1
paddle/phi/ops/compat/trace_sig.cc
paddle/phi/ops/compat/trace_sig.cc
+1
-1
python/paddle/fluid/layers/metric_op.py
python/paddle/fluid/layers/metric_op.py
+3
-0
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+5
-0
python/paddle/fluid/tests/unittests/op_test.py
python/paddle/fluid/tests/unittests/op_test.py
+2
-1
python/paddle/fluid/tests/unittests/test_accuracy_op.py
python/paddle/fluid/tests/unittests/test_accuracy_op.py
+3
-1
python/paddle/fluid/tests/unittests/test_arg_min_max_op.py
python/paddle/fluid/tests/unittests/test_arg_min_max_op.py
+8
-1
python/paddle/fluid/tests/unittests/test_argsort_op.py
python/paddle/fluid/tests/unittests/test_argsort_op.py
+12
-1
python/paddle/fluid/tests/unittests/test_bitwise_op.py
python/paddle/fluid/tests/unittests/test_bitwise_op.py
+3
-1
python/paddle/fluid/tests/unittests/test_cholesky_op.py
python/paddle/fluid/tests/unittests/test_cholesky_op.py
+2
-1
python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py
...on/paddle/fluid/tests/unittests/test_cholesky_solve_op.py
+129
-131
python/paddle/fluid/tests/unittests/test_softmax_op.py
python/paddle/fluid/tests/unittests/test_softmax_op.py
+7
-2
python/paddle/fluid/tests/unittests/test_unfold_op.py
python/paddle/fluid/tests/unittests/test_unfold_op.py
+13
-2
python/paddle/metric/metrics.py
python/paddle/metric/metrics.py
+4
-0
python/paddle/nn/functional/activation.py
python/paddle/nn/functional/activation.py
+4
-0
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+5
-1
python/paddle/tensor/logic.py
python/paddle/tensor/logic.py
+3
-0
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+2
-0
python/paddle/tensor/search.py
python/paddle/tensor/search.py
+5
-0
python/paddle/utils/code_gen/api.yaml
python/paddle/utils/code_gen/api.yaml
+248
-230
python/paddle/utils/code_gen/backward.yaml
python/paddle/utils/code_gen/backward.yaml
+143
-140
python/paddle/utils/code_gen/wrapped_infermeta_gen.py
python/paddle/utils/code_gen/wrapped_infermeta_gen.py
+4
-0
未找到文件。
paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py
浏览文件 @
9cd5cd4e
...
...
@@ -28,6 +28,7 @@ namespace = ""
yaml_types_mapping
=
{
'int'
:
'int'
,
'int32'
:
'int32_t'
,
'int64'
:
'int64_t'
,
'size_t'
:
'size_t'
,
\
'float'
:
'float'
,
'double'
:
'double'
,
'bool'
:
'bool'
,
\
'str'
:
'std::string'
,
\
'Backend'
:
'paddle::experimental::Backend'
,
'DataLayout'
:
'paddle::experimental::DataLayout'
,
'DataType'
:
'paddle::experimental::DataType'
,
\
'int64[]'
:
'std::vector<int64_t>'
,
'int[]'
:
'std::vector<int>'
,
'Tensor'
:
'Tensor'
,
...
...
@@ -235,7 +236,7 @@ def ParseYamlReturns(string):
else
:
ret_type
=
ret
.
strip
()
assert
ret_type
in
yaml_types_mapping
.
keys
()
assert
ret_type
in
yaml_types_mapping
.
keys
()
,
ret_type
ret_type
=
yaml_types_mapping
[
ret_type
]
assert
"Tensor"
in
ret_type
...
...
@@ -426,7 +427,7 @@ def SlotNameMatching(backward_inputs_list, backward_returns_list,
backward_input_type
,
False
,
backward_input_pos
]
else
:
assert
False
assert
False
,
backward_input_name
for
backward_output
in
backward_returns_list
:
backward_output_name
=
backward_output
[
0
]
...
...
@@ -435,7 +436,8 @@ def SlotNameMatching(backward_inputs_list, backward_returns_list,
backward_fwd_name
=
FindForwardName
(
backward_output_name
)
assert
backward_fwd_name
is
not
None
assert
backward_fwd_name
in
forward_inputs_position_map
.
keys
()
assert
backward_fwd_name
in
forward_inputs_position_map
.
keys
(
),
backward_fwd_name
matched_forward_input_type
=
forward_inputs_position_map
[
backward_fwd_name
][
0
]
...
...
@@ -684,10 +686,10 @@ def GenerateNodeCreationCodes(
else
:
# Tuple api_result
if
IsPlainTensorType
(
rtype
):
output_autograd_meta
=
f
" egr::AutogradMeta*
{
output_autograd_meta_name
}
= egr::EagerUtils::autograd_meta(&
api_result[
{
pos
}
]
);"
output_autograd_meta
=
f
" egr::AutogradMeta*
{
output_autograd_meta_name
}
= egr::EagerUtils::autograd_meta(&
std::get<
{
pos
}
>(api_result)
);"
else
:
assert
IsVectorTensorType
(
rtype
)
output_autograd_meta
=
f
" std::vector<egr::AutogradMeta*>
{
output_autograd_meta_vec_name
}
= egr::EagerUtils::autograd_meta(&
api_result[
{
pos
}
]
);
\n
"
output_autograd_meta
=
f
" std::vector<egr::AutogradMeta*>
{
output_autograd_meta_vec_name
}
= egr::EagerUtils::autograd_meta(&
std::get<
{
pos
}
>(api_result)
);
\n
"
output_autograd_meta
+=
f
" std::vector<egr::AutogradMeta*>*
{
output_autograd_meta_name
}
= &
{
output_autograd_meta_vec_name
}
;"
outputs_autograd_meta_list
.
append
(
output_autograd_meta
)
...
...
@@ -1198,7 +1200,7 @@ if __name__ == "__main__":
print
(
"Generated Backward Grad Output Map: "
,
backward_grad_output_map
)
# Backward Validation Check
# Backward Validation Check
BackwardValidationCheck
(
backward_fwd_input_map
,
backward_grad_input_map
,
backward_attrs_list
)
...
...
paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py
浏览文件 @
9cd5cd4e
...
...
@@ -22,6 +22,7 @@ atype_to_parsing_function = {
"bool"
:
"CastPyArg2Boolean"
,
"int"
:
"CastPyArg2Int"
,
"long"
:
"CastPyArg2Long"
,
"std::string"
:
"CastPyArgs2String"
,
"int64_t"
:
"CastPyArg2Long"
,
"float"
:
"CastPyArg2Float"
,
"string"
:
"CastPyArg2String"
,
...
...
paddle/phi/kernels/impl/trace_grad_kernel_impl.h
浏览文件 @
9cd5cd4e
...
...
@@ -82,8 +82,8 @@ struct TraceGradFunctor {
template
<
typename
T
,
typename
Context
>
void
TraceGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
x
,
const
DenseTensor
&
out_grad
,
int
offset
,
int
axis1
,
int
axis2
,
...
...
paddle/phi/kernels/trace_grad_kernel.h
浏览文件 @
9cd5cd4e
...
...
@@ -20,8 +20,8 @@ namespace phi {
template
<
typename
T
,
typename
Context
>
void
TraceGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
x
,
const
DenseTensor
&
out_grad
,
int
offset
,
int
axis1
,
int
axis2
,
...
...
paddle/phi/ops/compat/trace_sig.cc
浏览文件 @
9cd5cd4e
...
...
@@ -23,7 +23,7 @@ KernelSignature TraceOpArgumentMapping(const ArgumentMappingContext& ctx) {
KernelSignature
TraceGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"trace_grad"
,
{
GradVarName
(
"Out"
),
"Input"
},
{
"Input"
,
GradVarName
(
"Out"
)
},
{
"offset"
,
"axis1"
,
"axis2"
},
{
GradVarName
(
"Input"
)});
}
...
...
python/paddle/fluid/layers/metric_op.py
浏览文件 @
9cd5cd4e
...
...
@@ -87,6 +87,9 @@ def accuracy(input, label, k=1, correct=None, total=None):
_k
=
k
.
numpy
().
item
(
0
)
if
isinstance
(
k
,
Variable
)
else
k
topk_out
,
topk_indices
=
_C_ops
.
top_k_v2
(
input
,
'k'
,
_k
,
'sorted'
,
False
)
if
_in_eager_mode
:
_acc
=
_C_ops
.
final_state_accuracy
(
topk_out
,
topk_indices
,
label
)
return
_acc
_acc
,
_
,
_
=
_C_ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
total
)
return
_acc
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
9cd5cd4e
...
...
@@ -14827,6 +14827,11 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
"Unexpected type of paddings, it should be either an integer or a list"
"of 2 or 4 integers")
if in_dygraph_mode():
if _in_eager_mode():
return _C_op.final_state_unfold(x, kernel_sizes, strdides, paddings,
dilations)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="unfold",
...
...
python/paddle/fluid/tests/unittests/op_test.py
浏览文件 @
9cd5cd4e
...
...
@@ -545,7 +545,8 @@ class OpTest(unittest.TestCase):
v
.
value
().
get_tensor
().
set_recursive_sequence_lengths
(
lod
)
return
v
else
:
return
fluid
.
dygraph
.
base
.
to_variable
(
value
)
print
(
"init her"
)
return
paddle
.
to_tensor
(
value
)
def
get_sequence_batch_size_1_input
(
self
,
lod
=
None
,
shape
=
None
):
"""Get LoD input data whose batch size is 1.
...
...
python/paddle/fluid/tests/unittests/test_accuracy_op.py
浏览文件 @
9cd5cd4e
...
...
@@ -25,6 +25,7 @@ from paddle.fluid import compiler, Program, program_guard
class
TestAccuracyOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"accuracy"
self
.
python_api
=
paddle
.
metric
.
accuracy
self
.
dtype
=
np
.
float32
self
.
init_dtype
()
n
=
8192
...
...
@@ -48,7 +49,7 @@ class TestAccuracyOp(OpTest):
pass
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
False
)
class
TestAccuracyOpFp16
(
TestAccuracyOp
):
...
...
@@ -128,4 +129,5 @@ class TestAccuracyAPI(unittest.TestCase):
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_arg_min_max_op.py
浏览文件 @
9cd5cd4e
...
...
@@ -26,6 +26,7 @@ from paddle.fluid import Program, program_guard
class
BaseTestCase
(
OpTest
):
def
initTestCase
(
self
):
self
.
op_type
=
'arg_min'
self
.
python_api
=
paddle
.
argmin
self
.
dims
=
(
3
,
4
,
5
)
self
.
dtype
=
'float32'
self
.
axis
=
0
...
...
@@ -41,12 +42,13 @@ class BaseTestCase(OpTest):
self
.
outputs
=
{
'Out'
:
np
.
argmax
(
self
.
x
,
axis
=
self
.
axis
)}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
class
TestCase0
(
BaseTestCase
):
def
initTestCase
(
self
):
self
.
op_type
=
'arg_max'
self
.
python_api
=
paddle
.
argmax
self
.
dims
=
(
3
,
4
,
5
)
self
.
dtype
=
'float32'
self
.
axis
=
0
...
...
@@ -63,6 +65,7 @@ class TestCase1(BaseTestCase):
class
TestCase2
(
BaseTestCase
):
def
initTestCase
(
self
):
self
.
op_type
=
'arg_max'
self
.
python_api
=
paddle
.
argmax
self
.
dims
=
(
3
,
4
)
self
.
dtype
=
'int64'
self
.
axis
=
0
...
...
@@ -71,6 +74,7 @@ class TestCase2(BaseTestCase):
class
TestCase2_1
(
BaseTestCase
):
def
initTestCase
(
self
):
self
.
op_type
=
'arg_max'
self
.
python_api
=
paddle
.
argmax
self
.
dims
=
(
3
,
4
)
self
.
dtype
=
'int64'
self
.
axis
=
-
1
...
...
@@ -95,6 +99,7 @@ class TestCase4(BaseTestCase):
class
TestCase3_
(
BaseTestCase
):
def
initTestCase
(
self
):
self
.
op_type
=
'arg_max'
self
.
python_api
=
paddle
.
argmax
self
.
dims
=
(
3
,
)
self
.
axis
=
0
...
...
@@ -152,6 +157,7 @@ class BaseTestComplex1_2(OpTest):
class
BaseTestComplex2_1
(
OpTest
):
def
initTestCase
(
self
):
self
.
op_type
=
'arg_max'
self
.
python_api
=
paddle
.
argmax
self
.
dims
=
(
4
,
5
,
6
)
self
.
dtype
=
'int32'
self
.
axis
=
2
...
...
@@ -202,4 +208,5 @@ class BaseTestComplex2_2(OpTest):
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_argsort_op.py
浏览文件 @
9cd5cd4e
...
...
@@ -23,7 +23,7 @@ import six
import
paddle.fluid.core
as
core
from
paddle.fluid
import
ParamAttr
from
paddle.fluid.framework
import
Program
,
grad_var_name
from
paddle.fluid.framework
import
Program
,
grad_var_name
,
_test_eager_guard
from
paddle.fluid.executor
import
Executor
from
paddle.fluid.backward
import
append_backward
...
...
@@ -421,6 +421,16 @@ class TestArgsortImperative(unittest.TestCase):
expect2
=
np
.
argsort
(
-
self
.
input_data
,
axis
=
self
.
axis
)
self
.
assertEqual
((
expect2
==
out2
.
numpy
()).
all
(),
True
)
with
_test_eager_guard
():
var_x
=
paddle
.
to_tensor
(
self
.
input_data
)
out
=
paddle
.
argsort
(
var_x
,
axis
=
self
.
axis
)
expect
=
np
.
argsort
(
self
.
input_data
,
axis
=
self
.
axis
)
self
.
assertEqual
((
expect
==
out
.
numpy
()).
all
(),
True
)
out2
=
paddle
.
argsort
(
var_x
,
axis
=
self
.
axis
,
descending
=
True
)
expect2
=
np
.
argsort
(
-
self
.
input_data
,
axis
=
self
.
axis
)
self
.
assertEqual
((
expect2
==
out2
.
numpy
()).
all
(),
True
)
paddle
.
enable_static
()
...
...
@@ -443,4 +453,5 @@ class TestArgsortImperative4(TestArgsortImperative):
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_bitwise_op.py
浏览文件 @
9cd5cd4e
...
...
@@ -276,6 +276,7 @@ class TestBitwiseXorBool(TestBitwiseXor):
class
TestBitwiseNot
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"bitwise_not"
self
.
python_api
=
paddle
.
bitwise_not
self
.
init_dtype
()
self
.
init_shape
()
self
.
init_bound
()
...
...
@@ -288,7 +289,7 @@ class TestBitwiseNot(OpTest):
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
False
)
def
test_check_grad
(
self
):
pass
...
...
@@ -351,4 +352,5 @@ class TestBitwiseNotBool(TestBitwiseNot):
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_cholesky_op.py
浏览文件 @
9cd5cd4e
...
...
@@ -36,6 +36,7 @@ from decorator_helper import prog_scope
class
TestCholeskyOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"cholesky"
self
.
python_api
=
paddle
.
linalg
.
cholesky
self
.
_input_shape
=
(
2
,
32
,
32
)
self
.
_upper
=
True
self
.
init_config
()
...
...
@@ -54,7 +55,7 @@ class TestCholeskyOp(OpTest):
self
.
outputs
=
{
"Out"
:
output_data
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
places
=
[
fluid
.
CPUPlace
()]
...
...
python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py
浏览文件 @
9cd5cd4e
...
...
@@ -109,6 +109,7 @@ class TestCholeskySolveOp(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"cholesky_solve"
self
.
python_api
=
paddle
.
linalg
.
cholesky_solve
self
.
config
()
if
self
.
upper
:
...
...
@@ -125,137 +126,134 @@ class TestCholeskySolveOp(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
output
}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'Y'
],
'Out'
,
max_relative_error
=
0.01
)
# 3D(broadcast) + 3D, upper=True
class
TestCholeskySolveOp3
(
TestCholeskySolveOp
):
"""
case 3
"""
def
config
(
self
):
self
.
y_shape
=
[
1
,
10
,
10
]
self
.
x_shape
=
[
2
,
10
,
5
]
self
.
upper
=
True
self
.
dtype
=
np
.
float64
class
TestCholeskySolveAPI
(
unittest
.
TestCase
):
def
setUp
(
self
):
np
.
random
.
seed
(
2021
)
self
.
place
=
[
paddle
.
CPUPlace
()]
# self.place = [paddle.CUDAPlace(0)]
self
.
dtype
=
"float64"
self
.
upper
=
True
if
core
.
is_compiled_with_cuda
():
self
.
place
.
append
(
paddle
.
CUDAPlace
(
0
))
def
check_static_result
(
self
,
place
):
paddle
.
enable_static
()
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
10
,
2
],
dtype
=
self
.
dtype
)
y
=
fluid
.
data
(
name
=
"y"
,
shape
=
[
10
,
10
],
dtype
=
self
.
dtype
)
z
=
paddle
.
linalg
.
cholesky_solve
(
x
,
y
,
upper
=
self
.
upper
)
x_np
=
np
.
random
.
random
([
10
,
2
]).
astype
(
self
.
dtype
)
y_np
=
np
.
random
.
random
([
10
,
10
]).
astype
(
self
.
dtype
)
if
self
.
upper
:
umat
=
np
.
triu
(
y_np
)
else
:
umat
=
np
.
tril
(
y_np
)
z_np
=
cholesky_solution
(
umat
,
x_np
,
upper
=
self
.
upper
)
z2_np
=
scipy_cholesky_solution
(
umat
,
x_np
,
upper
=
self
.
upper
)
exe
=
fluid
.
Executor
(
place
)
fetches
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
x_np
,
"y"
:
umat
},
fetch_list
=
[
z
])
self
.
assertTrue
(
np
.
allclose
(
fetches
[
0
],
z_np
))
def
test_static
(
self
):
for
place
in
self
.
place
:
self
.
check_static_result
(
place
=
place
)
def
test_dygraph
(
self
):
def
run
(
place
):
paddle
.
disable_static
(
place
)
x_np
=
np
.
random
.
random
([
20
,
2
]).
astype
(
self
.
dtype
)
y_np
=
np
.
random
.
random
([
20
,
20
]).
astype
(
self
.
dtype
)
z_np
=
scipy_cholesky_solution
(
y_np
,
x_np
,
upper
=
self
.
upper
)
x
=
paddle
.
to_tensor
(
x_np
)
y
=
paddle
.
to_tensor
(
y_np
)
z
=
paddle
.
linalg
.
cholesky_solve
(
x
,
y
,
upper
=
self
.
upper
)
self
.
assertTrue
(
np
.
allclose
(
z_np
,
z
.
numpy
()))
self
.
assertEqual
(
z_np
.
shape
,
z
.
numpy
().
shape
)
paddle
.
enable_static
()
for
idx
,
place
in
enumerate
(
self
.
place
):
run
(
place
)
def
test_boardcast
(
self
):
def
run
(
place
):
paddle
.
disable_static
()
x_np
=
np
.
random
.
random
([
1
,
30
,
2
]).
astype
(
self
.
dtype
)
y_np
=
np
.
random
.
random
([
2
,
30
,
30
]).
astype
(
self
.
dtype
)
nx_np
=
np
.
concatenate
((
x_np
,
x_np
),
axis
=
0
)
z_sci
=
scipy_cholesky_solution_batch
(
y_np
,
nx_np
,
upper
=
self
.
upper
)
x
=
paddle
.
to_tensor
(
x_np
)
y
=
paddle
.
to_tensor
(
y_np
)
z
=
paddle
.
linalg
.
cholesky_solve
(
x
,
y
,
upper
=
self
.
upper
)
self
.
assertEqual
(
z_sci
.
shape
,
z
.
numpy
().
shape
)
self
.
assertTrue
(
np
.
allclose
(
z_sci
,
z
.
numpy
()))
for
idx
,
place
in
enumerate
(
self
.
place
):
run
(
place
)
class
TestCholeskySolveOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
paddle
.
enable_static
()
with
program_guard
(
Program
(),
Program
()):
# The input type of solve_op must be Variable.
x1
=
fluid
.
create_lod_tensor
(
np
.
array
([[
-
1
]]),
[[
1
]],
fluid
.
CPUPlace
())
y1
=
fluid
.
create_lod_tensor
(
np
.
array
([[
-
1
]]),
[[
1
]],
fluid
.
CPUPlace
())
self
.
assertRaises
(
TypeError
,
paddle
.
linalg
.
cholesky_solve
,
x1
,
y1
)
# The data type of input must be float32 or float64.
x2
=
fluid
.
data
(
name
=
"x2"
,
shape
=
[
30
,
30
],
dtype
=
"bool"
)
y2
=
fluid
.
data
(
name
=
"y2"
,
shape
=
[
30
,
10
],
dtype
=
"bool"
)
self
.
assertRaises
(
TypeError
,
paddle
.
linalg
.
cholesky_solve
,
x2
,
y2
)
x3
=
fluid
.
data
(
name
=
"x3"
,
shape
=
[
30
,
30
],
dtype
=
"int32"
)
y3
=
fluid
.
data
(
name
=
"y3"
,
shape
=
[
30
,
10
],
dtype
=
"int32"
)
self
.
assertRaises
(
TypeError
,
paddle
.
linalg
.
cholesky_solve
,
x3
,
y3
)
x4
=
fluid
.
data
(
name
=
"x4"
,
shape
=
[
30
,
30
],
dtype
=
"float16"
)
y4
=
fluid
.
data
(
name
=
"y4"
,
shape
=
[
30
,
10
],
dtype
=
"float16"
)
self
.
assertRaises
(
TypeError
,
paddle
.
linalg
.
cholesky_solve
,
x4
,
y4
)
# The number of dimensions of input'X must be >= 2.
x5
=
fluid
.
data
(
name
=
"x5"
,
shape
=
[
30
],
dtype
=
"float64"
)
y5
=
fluid
.
data
(
name
=
"y5"
,
shape
=
[
30
,
30
],
dtype
=
"float64"
)
self
.
assertRaises
(
ValueError
,
paddle
.
linalg
.
cholesky_solve
,
x5
,
y5
)
# The number of dimensions of input'Y must be >= 2.
x6
=
fluid
.
data
(
name
=
"x6"
,
shape
=
[
30
,
30
],
dtype
=
"float64"
)
y6
=
fluid
.
data
(
name
=
"y6"
,
shape
=
[
30
],
dtype
=
"float64"
)
self
.
assertRaises
(
ValueError
,
paddle
.
linalg
.
cholesky_solve
,
x6
,
y6
)
# The inner-most 2 dimensions of input'X should be equal to each other
x7
=
fluid
.
data
(
name
=
"x7"
,
shape
=
[
2
,
3
,
4
],
dtype
=
"float64"
)
y7
=
fluid
.
data
(
name
=
"y7"
,
shape
=
[
2
,
4
,
3
],
dtype
=
"float64"
)
self
.
assertRaises
(
ValueError
,
paddle
.
linalg
.
cholesky_solve
,
x7
,
y7
)
self
.
check_output
(
check_eager
=
True
)
# def test_check_grad_normal(self):
# self.check_grad(['Y'], 'Out', max_relative_error=0.01, check_eager=True)
# # 3D(broadcast) + 3D, upper=True
# class TestCholeskySolveOp3(TestCholeskySolveOp):
# """
# case 3
# """
# def config(self):
# self.y_shape = [1, 10, 10]
# self.x_shape = [2, 10, 5]
# self.upper = True
# self.dtype = np.float64
# class TestCholeskySolveAPI(unittest.TestCase):
# def setUp(self):
# np.random.seed(2021)
# self.place = [paddle.CPUPlace()]
# # self.place = [paddle.CUDAPlace(0)]
# self.dtype = "float64"
# self.upper = True
# if core.is_compiled_with_cuda():
# self.place.append(paddle.CUDAPlace(0))
# def check_static_result(self, place):
# paddle.enable_static()
# with fluid.program_guard(fluid.Program(), fluid.Program()):
# x = fluid.data(name="x", shape=[10, 2], dtype=self.dtype)
# y = fluid.data(name="y", shape=[10, 10], dtype=self.dtype)
# z = paddle.linalg.cholesky_solve(x, y, upper=self.upper)
# x_np = np.random.random([10, 2]).astype(self.dtype)
# y_np = np.random.random([10, 10]).astype(self.dtype)
# if self.upper:
# umat = np.triu(y_np)
# else:
# umat = np.tril(y_np)
# z_np = cholesky_solution(umat, x_np, upper=self.upper)
# z2_np = scipy_cholesky_solution(umat, x_np, upper=self.upper)
# exe = fluid.Executor(place)
# fetches = exe.run(fluid.default_main_program(),
# feed={"x": x_np,
# "y": umat},
# fetch_list=[z])
# self.assertTrue(np.allclose(fetches[0], z_np))
# def test_static(self):
# for place in self.place:
# self.check_static_result(place=place)
# def test_dygraph(self):
# def run(place):
# paddle.disable_static(place)
# x_np = np.random.random([20, 2]).astype(self.dtype)
# y_np = np.random.random([20, 20]).astype(self.dtype)
# z_np = scipy_cholesky_solution(y_np, x_np, upper=self.upper)
# x = paddle.to_tensor(x_np)
# y = paddle.to_tensor(y_np)
# z = paddle.linalg.cholesky_solve(x, y, upper=self.upper)
# self.assertTrue(np.allclose(z_np, z.numpy()))
# self.assertEqual(z_np.shape, z.numpy().shape)
# paddle.enable_static()
# for idx, place in enumerate(self.place):
# run(place)
# def test_boardcast(self):
# def run(place):
# paddle.disable_static()
# x_np = np.random.random([1, 30, 2]).astype(self.dtype)
# y_np = np.random.random([2, 30, 30]).astype(self.dtype)
# nx_np = np.concatenate((x_np, x_np), axis=0)
# z_sci = scipy_cholesky_solution_batch(y_np, nx_np, upper=self.upper)
# x = paddle.to_tensor(x_np)
# y = paddle.to_tensor(y_np)
# z = paddle.linalg.cholesky_solve(x, y, upper=self.upper)
# self.assertEqual(z_sci.shape, z.numpy().shape)
# self.assertTrue(np.allclose(z_sci, z.numpy()))
# for idx, place in enumerate(self.place):
# run(place)
# class TestCholeskySolveOpError(unittest.TestCase):
# def test_errors(self):
# paddle.enable_static()
# with program_guard(Program(), Program()):
# # The input type of solve_op must be Variable.
# x1 = fluid.create_lod_tensor(
# np.array([[-1]]), [[1]], fluid.CPUPlace())
# y1 = fluid.create_lod_tensor(
# np.array([[-1]]), [[1]], fluid.CPUPlace())
# self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x1, y1)
# # The data type of input must be float32 or float64.
# x2 = fluid.data(name="x2", shape=[30, 30], dtype="bool")
# y2 = fluid.data(name="y2", shape=[30, 10], dtype="bool")
# self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x2, y2)
# x3 = fluid.data(name="x3", shape=[30, 30], dtype="int32")
# y3 = fluid.data(name="y3", shape=[30, 10], dtype="int32")
# self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x3, y3)
# x4 = fluid.data(name="x4", shape=[30, 30], dtype="float16")
# y4 = fluid.data(name="y4", shape=[30, 10], dtype="float16")
# self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x4, y4)
# # The number of dimensions of input'X must be >= 2.
# x5 = fluid.data(name="x5", shape=[30], dtype="float64")
# y5 = fluid.data(name="y5", shape=[30, 30], dtype="float64")
# self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x5, y5)
# # The number of dimensions of input'Y must be >= 2.
# x6 = fluid.data(name="x6", shape=[30, 30], dtype="float64")
# y6 = fluid.data(name="y6", shape=[30], dtype="float64")
# self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x6, y6)
# # The inner-most 2 dimensions of input'X should be equal to each other
# x7 = fluid.data(name="x7", shape=[2, 3, 4], dtype="float64")
# y7 = fluid.data(name="y7", shape=[2, 4, 3], dtype="float64")
# self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x7, y7)
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_softmax_op.py
浏览文件 @
9cd5cd4e
...
...
@@ -53,6 +53,7 @@ class TestSoftmaxOp(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"softmax"
self
.
python_api
=
paddle
.
nn
.
functional
.
softmax
self
.
use_cudnn
=
False
self
.
use_mkldnn
=
False
# explicilty use float32 for ROCm, as MIOpen does not yet support float64
...
...
@@ -81,9 +82,13 @@ class TestSoftmaxOp(OpTest):
if
self
.
use_cudnn
:
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
,
check_dygraph
=
(
self
.
use_mkldnn
==
False
))
place
,
atol
=
1e-5
,
check_dygraph
=
(
self
.
use_mkldnn
==
False
),
check_eager
=
False
)
else
:
self
.
check_output
(
check_dygraph
=
(
self
.
use_mkldnn
==
False
))
self
.
check_output
(
check_dygraph
=
(
self
.
use_mkldnn
==
False
),
check_eager
=
False
)
def
test_check_grad
(
self
):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
...
...
python/paddle/fluid/tests/unittests/test_unfold_op.py
浏览文件 @
9cd5cd4e
...
...
@@ -21,6 +21,7 @@ from op_test import OpTest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.framework
import
_test_eager_guard
class
TestUnfoldOp
(
OpTest
):
...
...
@@ -37,6 +38,7 @@ class TestUnfoldOp(OpTest):
self
.
strides
=
[
1
,
1
]
self
.
paddings
=
[
1
,
1
,
1
,
1
]
self
.
dilations
=
[
1
,
1
]
self
.
python_api
=
paddle
.
nn
.
functional
.
unfold
input_shape
=
[
self
.
batch_size
,
self
.
input_channels
,
self
.
input_height
,
self
.
input_width
...
...
@@ -95,10 +97,10 @@ class TestUnfoldOp(OpTest):
self
.
set_data
()
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Y'
)
self
.
check_grad
([
'X'
],
'Y'
,
check_eager
=
True
)
class
TestUnfoldAPI
(
TestUnfoldOp
):
...
...
@@ -122,9 +124,18 @@ class TestUnfoldAPI(TestUnfoldOp):
result
=
m
(
input
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
self
.
outputs
[
'Y'
]))
with
_test_eager_guard
():
input
=
fluid
.
dygraph
.
to_variable
(
self
.
inputs
[
'X'
])
m
=
paddle
.
nn
.
Unfold
(
**
self
.
attrs
)
m
.
eval
()
result
=
m
(
input
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
self
.
outputs
[
'Y'
]))
def
test_info
(
self
):
str
(
paddle
.
nn
.
Unfold
(
**
self
.
attrs
))
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/metric/metrics.py
浏览文件 @
9cd5cd4e
...
...
@@ -798,8 +798,12 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None):
total
=
_varbase_creator
(
dtype
=
"int32"
)
topk_out
,
topk_indices
=
paddle
.
topk
(
input
,
k
=
k
)
if
_in_eager_mode
:
_acc
=
_C_ops
.
final_state_accuracy
(
topk_out
,
topk_indices
,
label
)
return
_acc
_acc
,
_
,
_
=
_C_ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
total
)
return
_acc
helper
=
LayerHelper
(
"accuracy"
,
**
locals
())
...
...
python/paddle/nn/functional/activation.py
浏览文件 @
9cd5cd4e
...
...
@@ -561,6 +561,8 @@ def relu(x, name=None):
"""
if
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_relu
(
x
)
return
_C_ops
.
relu
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'relu'
)
...
...
@@ -954,6 +956,8 @@ def softmax(x, axis=-1, dtype=None, name=None):
if
in_dynamic_mode
():
outs_cast
=
x
if
dtype
is
None
\
else
_C_ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
if
_in_eager_mode
():
return
_C_ops
.
final_state_softmax
(
outs_cast
,
axis
)
return
_C_ops
.
softmax
(
outs_cast
,
'axis'
,
axis
,
'use_cudnn'
,
use_cudnn
)
if
dtype
is
None
:
...
...
python/paddle/tensor/linalg.py
浏览文件 @
9cd5cd4e
...
...
@@ -14,7 +14,7 @@
import
numpy
as
np
from
..fluid.layer_helper
import
LayerHelper
from
..framework
import
_varbase_creator
,
_dygraph_tracer
from
..framework
import
_varbase_creator
,
_dygraph_tracer
,
_in_eager_mode
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
from
..static
import
Variable
...
...
@@ -1205,6 +1205,8 @@ def cholesky(x, upper=False, name=None):
"""
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_cholesky
(
x
,
upper
)
return
_C_ops
.
cholesky
(
x
,
"upper"
,
upper
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
...
...
@@ -2734,6 +2736,8 @@ def cholesky_solve(x, y, upper=False, name=None):
# [-2.5, -7, 9.5]
"""
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_cholesky_solve
(
x
,
y
,
upper
)
return
_C_ops
.
cholesky_solve
(
x
,
y
,
'upper'
,
upper
)
helper
=
LayerHelper
(
"cholesky_solve"
,
**
locals
())
...
...
python/paddle/tensor/logic.py
浏览文件 @
9cd5cd4e
...
...
@@ -17,6 +17,7 @@ from ..fluid.data_feeder import check_type, check_variable_and_dtype
from
..fluid.layers.layer_function_generator
import
templatedoc
from
..static
import
Variable
from
..framework
import
VarBase
as
Tensor
from
paddle.fluid.framework
import
_in_eager_mode
# TODO: define logic functions of a tensor
from
..fluid.layers
import
is_empty
# noqa: F401
...
...
@@ -452,6 +453,8 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
if
binary_op
:
return
op
(
x
,
y
)
else
:
if
_in_eager_mode
():
return
_C_op
.
final_state_bitewise_not
(
x
)
return
op
(
x
)
check_variable_and_dtype
(
...
...
python/paddle/tensor/math.py
浏览文件 @
9cd5cd4e
...
...
@@ -2853,6 +2853,8 @@ def tanh(x, name=None):
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
"""
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_tanh
(
x
)
return
_C_ops
.
tanh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
...
...
python/paddle/tensor/search.py
浏览文件 @
9cd5cd4e
...
...
@@ -91,6 +91,8 @@ def argsort(x, axis=-1, descending=False, name=None):
# [0 2 1 1]]]
"""
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
_
,
ids
,
=
_C_ops
.
final_state_argsort
(
x
,
axis
,
descending
)
_
,
ids
=
_C_ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
return
ids
check_variable_and_dtype
(
...
...
@@ -245,6 +247,9 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
axis
=
0
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
out
=
_C_ops
.
final_state_arg_min
(
x
,
axis
,
keepdim
,
flattern
,
var_dtype
)
out
=
_C_ops
.
arg_min
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
)
return
out
...
...
python/paddle/utils/code_gen/api.yaml
浏览文件 @
9cd5cd4e
...
...
@@ -262,56 +262,59 @@
# func : maxout
# backward : maxout_grad
# # put_along_axis
# - api : put_along_axis
# args : (Tensor x, Tensor index, Tensor value, int axis, string reduce)
# output : Tensor
# infer_meta :
# func : PutAlongAxisInferMeta
# kernel :
# func : put_along_axis
# backward : put_along_axis_grad
# put_along_axis
-
api
:
put_along_axis
args
:
(Tensor x, Tensor index, Tensor value, int axis, str reduce)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
index
]
kernel
:
func
:
put_along_axis
backward
:
put_along_axis_grad
# # take_along_axis
# - api : take_along_axis
# args : (Tensor x, Tensor index, int axis)
# output : Tensor
# infer_meta :
# func : TakeAlongAxisInferMeta
# kernel :
# func : take_along_axis
# backward : take_along_axis_grad
# take_along_axis
-
api
:
take_along_axis
args
:
(Tensor x, Tensor index, int axis)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
index
]
kernel
:
func
:
take_along_axis
backward
:
take_along_axis_grad
# # matrix_power
# - api : maxtrix_power
# args : (Tensor x, int n)
# output : Tensor
# infer_meta :
# func : MaxtrixPowerInferMeta
# kernel :
# func : maxtrix_power
# backward : maxtrix_power_grad
# matrix_power
-
api
:
matrix_power
args
:
(Tensor x, int n)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
matrix_power
backward
:
matrix_power_grad
#
#
eigh
#
- api : eigh
# args : (Tensor x, string
uplo)
#
output : Tensor(out_w), Tensor(out_v)
#
infer_meta :
#
func : EighInferMeta
#
kernel :
#
func : eigh
#
backward : eigh_grad
# eigh
-
api
:
eigh
args
:
(Tensor x, str
uplo)
output
:
Tensor(out_w), Tensor(out_v)
infer_meta
:
func
:
EighInferMeta
kernel
:
func
:
eigh
backward
:
eigh_grad
#
#
segment_pool
#
- api : segment_pool
# args : (Tensor x, Tensor segment_ids, string
pooltype)
#
output : Tensor(out), Tensor(summed_ids)
#
infer_meta :
#
func : SegmentPoolInferMeta
#
kernel :
#
func : segment_pool
#
backward : segment_pool_grad
# segment_pool
-
api
:
segment_pool
args
:
(Tensor x, Tensor segment_ids, str
pooltype)
output
:
Tensor(out), Tensor(summed_ids)
infer_meta
:
func
:
SegmentPoolInferMeta
kernel
:
func
:
segment_pool
backward
:
segment_pool_grad
# accuracy
-
api
:
accuracy
...
...
@@ -447,44 +450,45 @@
# arg_min # int64 ???? dtype
-
api
:
a
gr
min
args
:
(Tensor x, int axis, bool keepdims, bool flatten, int dtype)
-
api
:
a
rg_
min
args
:
(Tensor x, int
64
axis, bool keepdims, bool flatten, int dtype)
output
:
Tensor
infer_meta
:
func
:
ArgMinMaxInferMeta
kernel
:
func
:
argmin
func
:
arg
_
min
# arg_max # int64 ???? dtype
-
api
:
a
gr
max
args
:
(Tensor x, int axis, bool keepdims, bool flatten, int dtype)
-
api
:
a
rg_
max
args
:
(Tensor x, int
64
axis, bool keepdims, bool flatten, int dtype)
output
:
Tensor
infer_meta
:
func
:
ArgMinMaxInferMeta
kernel
:
func
:
argmax
func
:
arg
_
max
# argsort
; kernel why input?
# argsort
-
api
:
argsort
args
:
(Tensor x, int axis, bool descending)
output
:
Tensor(out), Tensor(indices)
infer_meta
:
func
:
ArgsortInferMeta
func
:
ArgsortInferMeta
kernel
:
func
:
argsort
backward
:
argsort_grad
# auc
; kernel why input?
#
- api : auc
#
args : (Tensor x, Tensor label, Tensor stat_pos, Tensor stat_neg, str curve, int num_thresholds, int slide_steps)
#
output : Tensor(auc), Tensor(stat_pos_out), Tensor(stat_neg_out)
#
infer_meta :
#
func : AucInferMeta
#
kernel :
#
func : auc
# auc
-
api
:
auc
args
:
(Tensor x, Tensor label, Tensor stat_pos, Tensor stat_neg, str curve, int num_thresholds, int slide_steps)
output
:
Tensor(auc), Tensor(stat_pos_out), Tensor(stat_neg_out)
infer_meta
:
func
:
AucInferMeta
kernel
:
func
:
auc
# # batch_norm
# - api : batch_norm
# args : (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str
ing
data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu)
# args : (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu)
# output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
# infer_meta :
# func : XXXXInferMeta
...
...
@@ -492,14 +496,14 @@
# func : batch_norm
# backward: batch_norm_grad
#
#
bernoulli
#
- api : bernoulli
#
args : (Tensor x)
#
output : Tensor
#
infer_meta :
# func : Bernoulli
InferMeta
#
kernel :
#
func : bernoulli
# bernoulli
-
api
:
bernoulli
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
Unchanged
InferMeta
kernel
:
func
:
bernoulli
# # bilinear_tensor_product ?? optional
# - api : bilinear_tensor_product
...
...
@@ -510,15 +514,18 @@
# kernel :
# func : bilinear_tensor_product
# backward : bilinear_tensor_product_grad
# optional : bias
# bincount
-
api
:
bincount
args
:
(Tensor x, Tensor weight, int minlength)
output
:
Tensor
infer_meta
:
func
:
BincountInferMeta
kernel
:
func
:
bincount
optional
:
weight
# bincount ?? optional
# - api : bincount
# args : (Tensor x, Tensor weight, int minlength)
# output : Tensor
# infer_meta :
# func : BincountInferMeta
# kernel :
# func : bincount
# bitwise_and
...
...
@@ -604,7 +611,7 @@
# # depthwise_conv2d
# # dropout ?? optional, intermediate
# - api : dropout
# args : (Tensor x, Tensor seed_tensor, float p, bool is_test, str
ing
mode, int seed, bool fix_seed)
# args : (Tensor x, Tensor seed_tensor, float p, bool is_test, str mode, int seed, bool fix_seed)
# output : Tensor(out), Tensor(mask)
# infer_meta :
# func : DropoutInferMeta
...
...
@@ -613,27 +620,27 @@
# # embedding
#
#
erf
#
- api : erf
#
args : (Tensor x)
#
output : Tensor
#
infer_meta :
# func : Erf
InferMeta
#
kernel :
#
func : erf
#
backward : erf_grad
# erf
-
api
:
erf
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
Unchanged
InferMeta
kernel
:
func
:
erf
backward
:
erf_grad
#
#
erfinv
#
- api : erfinv
#
args : (Tensor x)
#
output : Tensor
#
infer_meta :
# func : Erfinv
InferMeta
#
kernel :
#
func : erfinv
#
backward : erfinv_grad
# erfinv
-
api
:
erfinv
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
Unchanged
InferMeta
kernel
:
func
:
erfinv
backward
:
erfinv_grad
#
# expand_as ?? optional
#
expand_as
# - api : expand_as
# args : (Tensor x, Tensor y, int[] shape)
# output : Tensor
...
...
@@ -642,6 +649,7 @@
# kernel :
# func : expand_as
# backward : expand_as_grad
# optional : y
# # expand
...
...
@@ -655,13 +663,13 @@
# backward : expand_grad
# eye
-
api
:
eye
args
:
(int64 num_rows, int64 num_colums, DataType dtype
)
output
:
Tensor
infer_meta
:
func
:
EyeInferMeta
kernel
:
func
:
eye
#
- api : eye
# args : (int64 num_rows, int64 num_colums, DataType dtype = DataType::FLOAT32
)
#
output : Tensor
#
infer_meta :
#
func : EyeInferMeta
#
kernel :
#
func : eye
# # flip
# - api : flip
...
...
@@ -674,16 +682,18 @@
# gaussian_random
# - api : gaussian_random
# args : (Scala
yArray shape, float mean, float std, int seed, DataType dtype
)
# args : (Scala
rArray shape, float mean, float std, int seed, DataType dtype=DataType::FLOAT32
)
# output : Tensor
# infer_meta :
# func : GaussianRandomInferMeta
# func : CreateInferMeta
# param : [shape, dtype]
# kernel :
# func : gaussian_random
# data_type : dtype
# # graph_send_recv
# - api : graph_send_recv
# args : (Tensor x, Tensor src_index, Tensor dst_index, str
ing
pool_type)
# args : (Tensor x, Tensor src_index, Tensor dst_index, str pool_type)
# output : Tensor(out), Tensor(dst_count)
# infer_meta :
# func : GraphSendRecvInferMeta
...
...
@@ -693,30 +703,30 @@
# # histogram int64 ???
# - api : histogram
# args : (Tensor x, int64
_t
bins, int min, int max)
# args : (Tensor x, int64 bins, int min, int max)
# output : Tensor
# infer_meta :
# func : HistogramInferMeta
# kernel :
# func : histogram
#
#
increment
#
- api : increment
#
args : (Tensor x, float value)
#
output : Tensor
#
infer_meta :
#
func : IncrementInferMeta
#
kernel :
#
func : increment
# increment
-
api
:
increment
args
:
(Tensor x, float value)
output
:
Tensor
infer_meta
:
func
:
IncrementInferMeta
kernel
:
func
:
increment
#
#
is_empty
#
- api : is_empty
#
args : (Tensor x)
#
output : Tensor
#
infer_meta :
#
func : IsEmptyInferMeta
#
kernel :
#
func : is_empty
# is_empty
-
api
:
is_empty
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
IsEmptyInferMeta
kernel
:
func
:
is_empty
# # isinf selected rows??? involk
# - api : isinf
...
...
@@ -727,7 +737,7 @@
# kernel :
# func : isinf
# # isnan
# # isnan
selected rows??? involk
# - api : isnan
# args : (Tensor x)
# output : Tensor
...
...
@@ -736,7 +746,7 @@
# kernel :
# func : isnan
# # isfinite
# # isfinite
selected rows??? involk
# - api : isfinite
# args : (Tensor x)
# output : Tensor
...
...
@@ -745,34 +755,37 @@
# kernel :
# func : isfinite
#
#
label_smooth ?? optional
# label_smooth ?? optional
# - api : label_smooth
# args : (Tensor label, Tensor prior_dist, float epsilon)
# output : Tensor
# infer_meta :
# func : LabelSmoothInferMeta
# func : UnchangedInferMeta
# param : [label]
# kernel :
# func : label_smooth
# backward : label_smooth_grad
# optional : prior_dist
#
# linspace
#
linspace ???? start stop number,应该是sclar?
# - api : linspace
# args : (Tensor start, Tensor stop, Tensor number, DataType dtype)
# args : (Tensor start, Tensor stop, Tensor number, DataType dtype
=DataType::FLOAT32
)
# output : Tensor
# infer_meta :
# func : LinspaceInferMeta
# kernel :
# func : linspace
# # log_loss
# - api : log_loss
# args : (Tensor input, Tensor label, float epsilon)
# output : Tensor
# infer_meta :
# func : LogLossInferMeta
# kernel :
# func : log_loss
# backward : log_loss
# log_loss
-
api
:
log_loss
args
:
(Tensor input, Tensor label, float epsilon)
output
:
Tensor
infer_meta
:
func
:
LogLossInferMeta
kernel
:
func
:
log_loss
backward
:
log_loss_grad
# # logical_and
# - api : logical_and
...
...
@@ -802,14 +815,14 @@
# kernel :
# func : logical_xor
#
#
logical_not
#
- api : logical_not
#
args : (Tensor x)
#
output : Tensor
#
infer_meta :
#
func : UnchangedInferMeta
#
kernel :
#
func : logical_not
# logical_not
-
api
:
logical_not
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
logical_not
# # masked_select
# - api : masked_select
...
...
@@ -831,24 +844,25 @@
# func : multi_dot
# backward : multi_dot_grad
#
#
multinomial
#
- api : multinomial
#
args : (Tensor x, int num_samples, bool replacement)
#
output : Tensor
#
infer_meta :
#
func : MultinomialInferMeta
#
kernel :
#
func : multinomial
# multinomial
-
api
:
multinomial
args
:
(Tensor x, int num_samples, bool replacement)
output
:
Tensor
infer_meta
:
func
:
MultinomialInferMeta
kernel
:
func
:
multinomial
#
#
nll_loss ?? optional
# nll_loss ?? optional
# - api : nll_loss
# args : (Tensor
input, Tensor label, Tensor weight, int64_t ignore_index, string
reduction)
# args : (Tensor
x, Tensor label, Tensor weight, int64 ignore_index, str
reduction)
# output : Tensor(out), Tensor(total_weight)
# infer_meta :
# func : NllLossInferMeta
# func : NllLoss
Raw
InferMeta
# kernel :
# func : nll_loss
# backward : nll_loss_grad
# optional : weight
# # pad
# - api : pad
...
...
@@ -862,7 +876,7 @@
# # pixel_shuffle
# - api : pixel_shuffle
# args : (Tensor x, int upscale_factor, str
ing
data_format)
# args : (Tensor x, int upscale_factor, str data_format)
# output : Tensor
# infer_meta :
# func : PixelShuffleInferMeta
...
...
@@ -880,7 +894,7 @@
# func : poisson
# backward : poisson_grad
#
#
psroi_pool ?? optional
# psroi_pool ?? optional
# - api : psroi_pool
# args : (Tensor x, Tensor rois, Tensor rois_num, int pooled_weight, int pooled_width, int output_channels, float spatial_scale )
# output : Tensor
...
...
@@ -889,6 +903,7 @@
# kernel :
# func : psroi_pool
# backward : psroi_pool_grad
# optional : rois_num
# # randint_raw
# - api : randint
...
...
@@ -926,57 +941,59 @@
# kernel :
# func : reduce_prod
# # selu
# - api : selu
# args : (Tensor x, float scale, float alpha)
# output : Tensor
# infer_meta :
# func : SeluInferMeta
# kernel :
# func : selu
# backward : selu_grad
# selu
-
api
:
selu
args
:
(Tensor x, float scale, float alpha)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
selu
backward
:
selu_grad
# # set_value None api
# # sgd # need invoke
# # shape ??? selcted rows
#
#
shard_index
#
- api : shard_index
#
args : (Tensor in, int index_num, int nshards, int shard_id, int ignore_value)
#
output : Tensor
#
infer_meta :
#
func : ShardIndexInferMeta
#
kernel :
#
func : shard_index
# shard_index
-
api
:
shard_index
args
:
(Tensor in, int index_num, int nshards, int shard_id, int ignore_value)
output
:
Tensor
infer_meta
:
func
:
ShardIndexInferMeta
kernel
:
func
:
shard_index
# # sigmoid_cross_entropy_with_logits
# - api : sigmoid_cross_entropy_with_logits
# args : (Tensor x, Tensor label, bool normalize, int ignore_index)
# output : Tensor
# infer_meta :
# func : SigmoidCrossEntropyWithoLgitsInferMeta
# kernel :
# func : sigmoid_cross_entropy_with_logits
# sigmoid_cross_entropy_with_logits
-
api
:
sigmoid_cross_entropy_with_logits
args
:
(Tensor x, Tensor label, bool normalize, int ignore_index)
output
:
Tensor
infer_meta
:
func
:
SigmoidCrossEntropyWithLogitsInferMeta
kernel
:
func
:
sigmoid_cross_entropy_with_logits
backward
:
sigmoid_cross_entropy_with_logits_grad
#
#
size
#
- api : size
#
args : (Tensor x)
#
output : Tensor
#
infer_meta :
#
func : SizeInferMeta
#
kernel :
#
func : size
# size
-
api
:
size
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
SizeInferMeta
kernel
:
func
:
size
#
#
tile
#
- api : tile
#
args : (Tensor x, ScalarArray repeat_times)
#
output : Tensor
#
infer_meta :
#
func : TileInferMeta
#
kernel :
#
func : tile
#
backward : tile_grad
# tile
-
api
:
tile
args
:
(Tensor x, ScalarArray repeat_times)
output
:
Tensor
infer_meta
:
func
:
TileInferMeta
kernel
:
func
:
tile
backward
:
tile_grad
# # top_k
# - api : top_k
...
...
@@ -988,15 +1005,15 @@
# func : top_k
# backward : top_k_grad
#
#
trace
#
- api : trace
#
args : (Tensor x, int offset, int axis1, int axis2)
#
output : Tensor
#
infer_meta :
#
func : TraceInferMeta
#
kernel :
#
func : trace
#
backward : trace_grad
# trace
-
api
:
trace
args
:
(Tensor x, int offset, int axis1, int axis2)
output
:
Tensor
infer_meta
:
func
:
TraceInferMeta
kernel
:
func
:
trace
backward
:
trace_grad
# # phi_transfer_layout | not have python api
...
...
@@ -1018,18 +1035,19 @@
# kernel :
# func : unbind
#
#
unfold
#
- api : unfold
#
args : (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
#
output : Tensor
#
infer_meta :
#
func : UnfoldInferMeta
#
kernel :
#
func : unfold
#
backward : unfold_grad
# unfold
-
api
:
unfold
args
:
(Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output
:
Tensor
infer_meta
:
func
:
UnfoldInferMeta
kernel
:
func
:
unfold
backward
:
unfold_grad
# # uniform_random_raw selected rows ??
# # viterbi_decode
# viterbi_decode
# - api : viterbi_decode
# args : (Tensor input, Tensor transition, Tensor length, bool include_bos_eos_tag)
# output : Tensor(scores), Tensor(path)
...
...
@@ -1038,14 +1056,14 @@
# kernel :
# func : viterbi_decode
#
#
where_index
#
- api : where_index
#
args : (Tensor condition)
#
output : Tensor
#
infer_meta :
#
func : WhereIndexInferMeta
#
kernel :
#
func : where_index
# where_index
-
api
:
where_index
args
:
(Tensor condition)
output
:
Tensor
infer_meta
:
func
:
WhereIndexInferMeta
kernel
:
func
:
where_index
# # yolo_box
...
...
python/paddle/utils/code_gen/backward.yaml
浏览文件 @
9cd5cd4e
...
...
@@ -112,55 +112,56 @@
# func : maxout_grad
# - backward_api : put_along_axis_grad
# forward : put_along_axis (Tensor x, Tensor index, Tensor value, int axis, string reduce) -> Tensor(out)
# args : (Tensor x, Tensor index, Tensor out_grad, int axis, string reduce)
# output : Tensor(x_grad), Tensor(value_grad)
# infer_meta :
# func : GeneralBinaryGradInferMeta
# param : [x, valule]
# kernel :
# func : put_along_axis_grad
# output is optional 如何处理
-
backward_api
:
put_along_axis_grad
forward
:
put_along_axis (Tensor x, Tensor index, Tensor value, int axis, str reduce) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad, int axis, str reduce)
output
:
Tensor(x_grad), Tensor(value_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
index
]
kernel
:
func
:
put_along_axis_grad
#
- backward_api : take_along_axis_grad
#
forward : take_along_axis (Tensor x, Tensor index, int axis) -> Tensor(out)
#
args : (Tensor x, Tensor index, Tensor out_grad, int axis)
#
output : Tensor(x_grad)
#
infer_meta :
#
func : UnchangedInferMeta
#
param : [x]
#
kernel :
#
func : take_along_axis_grad
-
backward_api
:
take_along_axis_grad
forward
:
take_along_axis (Tensor x, Tensor index, int axis) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad, int axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
take_along_axis_grad
# - backward_api : max
trix_power_grad
# forward : max
trix_power (Tensor x, int n) -> Tensor(out)
#
args : (Tensor x, Tensor out, Tensor out_grad, int n)
#
output : Tensor(x_grad)
#
infer_meta :
#
func : UnchangedInferMeta
#
param : [x]
#
kernel :
# func : max
trix_power_grad
-
backward_api
:
ma
trix_power_grad
forward
:
ma
trix_power (Tensor x, int n) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int n)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
ma
trix_power_grad
#
- backward_api : eigh_grad
# forward : eigh (Tensor x, string
uplo) -> Tensor(out_w), Tensor(out_v)
#
args : (Tensor out_w, Tensor out_v, Tensor out_w_grad, Tensor out_v_grad)
#
output : Tensor(x_grad)
#
infer_meta :
#
func : UnchangedInferMeta
#
param : [out_v]
#
kernel :
#
func : eigh_grad
-
backward_api
:
eigh_grad
forward
:
eigh (Tensor x, str
uplo) -> Tensor(out_w), Tensor(out_v)
args
:
(Tensor out_w, Tensor out_v, Tensor out_w_grad, Tensor out_v_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_v
]
kernel
:
func
:
eigh_grad
#
- backward_api : segment_pool_grad
# forward : segment_pool (Tensor x, Tensor segment_ids, string
pooltype) -> Tensor(out), Tensor(summed_ids)
# args : (Tensor x, Tensor segment_ids, Tensor out, Tensor summed_ids, Tenosr out_grad, string
pooltype)
#
output : Tensor(x_grad)
#
infer_meta :
#
func : UnchangedInferMeta
#
param : [x]
#
kernel :
#
func : segment_pool_grad
-
backward_api
:
segment_pool_grad
forward
:
segment_pool (Tensor x, Tensor segment_ids, str
pooltype) -> Tensor(out), Tensor(summed_ids)
args
:
(Tensor x, Tensor segment_ids, Tensor out, Tensor summed_ids, Tensor out_grad, str
pooltype)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
segment_pool_grad
-
backward_api
:
cos_grad
forward
:
cos (Tensor x) -> Tensor(out)
...
...
@@ -282,19 +283,19 @@
kernel
:
func
:
relu_grad
#
- backward_api : argsort_grad
# forward : argsort (Tensor x, int axis, bool descending) -> Tensor(out), t
ensor(indices)
#
args : (Tensor indices, Tensor x, Tensor out_grad, int axis, bool descending)
#
output : Tensor(x_grad)
#
infer_meta :
#
func : UnchangedInferMeta
#
param : [x]
#
kernel :
#
func : argsort_grad
-
backward_api
:
argsort_grad
forward
:
argsort (Tensor x, int axis, bool descending) -> Tensor(out), T
ensor(indices)
args
:
(Tensor indices, Tensor x, Tensor out_grad, int axis, bool descending)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
argsort_grad
# - backward_api : batch_norm_grad
# forward : batch_norm (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str
ing
data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
# forward : batch_norm (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
# args : (Tensor indices, Tensor x, Tensor out_grad, int axis, bool descending)
# output : Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad)
# infer_meta :
...
...
@@ -312,6 +313,7 @@
# param : [x, y, weight, bias]
# kernel :
# func : bilinear_tensor_product_grad
# optional : bias
# - backward_api : broadcast_tensor_grad
# forward : broadcast_tensors (Tensor[] x) -> Tensor [] (out)
...
...
@@ -344,8 +346,8 @@
func
:
cholesky_solve_grad
# - backward_api : dropout_grad
# forward : dropout (Tensor x, Tensor seed_tensor, float p, bool is_test, str
ing
mode, int seed, bool fix_seed) -> Tensor(out), Tensor(mask)
# args : (Tensor mask, Tensor out_grad, float p, bool is_test, str
ing
mode)
# forward : dropout (Tensor x, Tensor seed_tensor, float p, bool is_test, str mode, int seed, bool fix_seed) -> Tensor(out), Tensor(mask)
# args : (Tensor mask, Tensor out_grad, float p, bool is_test, str mode)
# output : Tensor(x_grad)
# infer_meta :
# func : UnchangedInferMeta
...
...
@@ -354,25 +356,25 @@
# func : dropout_grad
#
- backward_api : erf_grad
#
forward : erf (Tensor x) -> Tensor(out)
#
args : (Tensor x, Tensor out, Tensor out_grad)
#
output : Tensor(x_grad)
#
infer_meta :
#
func : UnchangedInferMeta
#
param : [x]
#
kernel :
#
func : erf_grad
-
backward_api
:
erf_grad
forward
:
erf (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
erf_grad
#
- backward_api : erfinv_grad
#
forward : erf (Tensor x) -> Tensor(out)
#
args : (Tensor out, Tensor out_grad)
#
output : Tensor(x_grad)
#
infer_meta :
# func : ErfinvGra
dInferMeta
#
param : [out]
#
kernel :
#
func : erfinv_grad
-
backward_api
:
erfinv_grad
forward
:
erf (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
Unchange
dInferMeta
param
:
[
out
]
kernel
:
func
:
erfinv_grad
# - backward_api : expand_as_grad
# forward : expand_as (Tensor x, Tensor y, int[] target_shape) -> Tensor(out)
...
...
@@ -395,11 +397,11 @@
# func : expand_grad
# - backward_api : graph_send_recv_grad
# forward : graph_send_recv (Tensor x, Tensor src_index, Tensor dst_index, str
ing
pool_type) -> Tensor(out), Tensor(dst_count)
# args : (Tensor out_grad, Tensor x, Tensor out, Tensor src_index, Tensor dst_index, Tensor dst_count, str
ing
pool_type)
# forward : graph_send_recv (Tensor x, Tensor src_index, Tensor dst_index, str pool_type) -> Tensor(out), Tensor(dst_count)
# args : (Tensor out_grad, Tensor x, Tensor out, Tensor src_index, Tensor dst_index, Tensor dst_count, str pool_type)
# output : Tensor(x_grad)
# infer_meta :
# func : Unchanged
Grad
InferMeta
# func : UnchangedInferMeta
# param : [x]
# kernel :
# func : graph_send_recv_grad
...
...
@@ -407,22 +409,22 @@
# - backward_api : label_smooth_grad
# forward : label_smooth (Tensor label, Tensor prior_dist, float epsilon) -> Tensor(out)
# args : (Tensor out_grad, float epsilon)
# output : Tensor(
x
_grad)
# output : Tensor(
label
_grad)
# infer_meta :
# func :
XXXX
InferMeta
# param : [
x
]
# func :
Unchanged
InferMeta
# param : [
out_grad
]
# kernel :
# func : label_smooth_grad
#
- backward_api : log_loss_grad
#
forward : log_loss (Tensor input, Tensor label, float epsilon) -> Tensor(out)
#
args : (Tensor input, Tensor label, Tensor out_grad, float epsilon)
#
output : Tensor(input_grad)
#
infer_meta :
#
func : UnchangedInferMeta
# param : [x
]
#
kernel :
#
func : log_loss_grad
-
backward_api
:
log_loss_grad
forward
:
log_loss (Tensor input, Tensor label, float epsilon) -> Tensor(out)
args
:
(Tensor input, Tensor label, Tensor out_grad, float epsilon)
output
:
Tensor(input_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
input
]
kernel
:
func
:
log_loss_grad
# - backward_api : masked_selecte_grad
# forward : masked_select (Tensor x, Tensor mask) -> Tensor(out)
...
...
@@ -445,8 +447,8 @@
# func : multi_dot_grad
# - backward_api : nll_loss_grad
# forward : nll_loss (Tensor
input, Tensor label, Tensor weight, int64_t ignore_index, string
reduction) -> Tensor(out), Tensor(total_weight)
# args : (Tensor x, Tensor label, Tensor total_weight, Tensor weight, Tensor out_grad, int64
_t ignore_index, string
reduction)
# forward : nll_loss (Tensor
x, Tensor label, Tensor weight, int64 ignore_index, str
reduction) -> Tensor(out), Tensor(total_weight)
# args : (Tensor x, Tensor label, Tensor total_weight, Tensor weight, Tensor out_grad, int64
ignore_index, str
reduction)
# output : Tensor[] (x_grad)
# infer_meta :
# func : UnchangedInferMeta
...
...
@@ -465,8 +467,8 @@
# func : pad_grad
# - backward_api : pixel_shuffle_grad
# forward : pixel_shuffle (Tensor x, int upscale_factor, str
ing
data_format) -> Tensor(out)
# args : (Tensor out_grad, int upscale_factor, str
ing
data_format)
# forward : pixel_shuffle (Tensor x, int upscale_factor, str data_format) -> Tensor(out)
# args : (Tensor out_grad, int upscale_factor, str data_format)
# output : Tensor(x_grad)
# infer_meta :
# func : XXXXXInferMeta
...
...
@@ -493,36 +495,37 @@
# param : [x]
# kernel :
# func : psroi_pool_grad
# optional : rois_num
#
- backward_api : selu_grad
#
forward : selu (Tensor x, float scale, float alpha) -> Tensor(out)
#
args : (Tensor out, Tensor out_grad, float scale, float alpha)
#
output : Tensor(x_grad)
#
infer_meta :
# func : XXXXX
InferMeta
# param : [x
]
#
kernel :
#
func : selu_grad
-
backward_api
:
selu_grad
forward
:
selu (Tensor x, float scale, float alpha) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, float scale, float alpha)
output
:
Tensor(x_grad)
infer_meta
:
func
:
Unchanged
InferMeta
param
:
[
out
]
kernel
:
func
:
selu_grad
#
- backward_api : sigmoid_cross_entropy_with_logits_grad
#
forward : sigmoid_cross_entropy_with_logits (Tensor x, Tensor label, bool normalize, int ignore_index) -> Tensor(out)
# args : (Tensor x, Tensor label, Tensor out_grad, bool normalize, int ing
ore_index)
#
output : Tensor(x_grad)
#
infer_meta :
#
func : UnchangedInferMeta
#
param : [x]
#
kernel :
#
func : sigmoid_cross_entropy_with_logits_grad
-
backward_api
:
sigmoid_cross_entropy_with_logits_grad
forward
:
sigmoid_cross_entropy_with_logits (Tensor x, Tensor label, bool normalize, int ignore_index) -> Tensor(out)
args
:
(Tensor x, Tensor label, Tensor out_grad, bool normalize, int ign
ore_index)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
sigmoid_cross_entropy_with_logits_grad
#
- backward_api : tile_grad
#
forward : tile (Tensor x, ScalarArray repeat_times) -> Tensor(out)
#
args : (Tensor x, Tensor out_grad, ScalarArray repeat_times)
#
output : Tensor(x_grad)
#
infer_meta :
#
func : UnchangedInferMeta
#
param : [x]
#
kernel :
#
func : tile_grad
-
backward_api
:
tile_grad
forward
:
tile (Tensor x, ScalarArray repeat_times) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, ScalarArray repeat_times)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
tile_grad
# # forward backward type not match
# - backward_api : top_k_grad
...
...
@@ -536,25 +539,25 @@
# func : top_k_grad
#
- backward_api : trace_grad
#
forward : trace (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
# args : (Tensor out_grad, Tensor x
, int offset, int axis1, int axis2)
#
output : Tensor(x_grad)
#
infer_meta :
#
func : UnchangedInferMeta
#
param : [x]
#
kernel :
#
func : trace_grad
-
backward_api
:
trace_grad
forward
:
trace (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad
, int offset, int axis1, int axis2)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
trace_grad
#
- backward_api : unfold_grad
#
forward : unfold (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out)
#
args : (Tensor x, Tensor out_grad, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
#
output : Tensor(x_grad)
#
infer_meta :
#
func : UnchangedInferMeta
#
param : [x]
#
kernel :
#
func : unfold_grad
-
backward_api
:
unfold_grad
forward
:
unfold (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
unfold_grad
# - backward_api : where_index_grad
# forward : where_index (Tensor condition) -> Tensor(out)
...
...
@@ -564,4 +567,4 @@
# func : UnchangedInferMeta
# param : [x]
# kernel :
# func :
trace
_grad
# func :
where_index
_grad
python/paddle/utils/code_gen/wrapped_infermeta_gen.py
浏览文件 @
9cd5cd4e
...
...
@@ -43,9 +43,12 @@ PD_REGISTER_INFER_META_FN({api.kernel['func'][0]}, phi::{api.infer_meta['func']}
'const std::vector<Tensor>&'
:
'const std::vector<MetaTensor>&'
,
'Tensor'
:
'MetaTensor*'
,
'std::vector<Tensor>'
:
'std::vector<MetaTensor>*'
,
'const paddle::optional<Tensor&>'
:
'const paddle::optional<MetaTensor&>'
}
wrapped_infermeta_name
=
get_wrapped_infermeta_name
(
api
.
api
)
print
(
wrapped_infermeta_name
)
args
=
[]
for
input_name
in
api
.
inputs
[
'names'
]:
if
input_name
in
kernel_params
:
...
...
@@ -132,6 +135,7 @@ def generate_wrapped_infermeta_and_register(api_yaml_path, header_file_path,
for
api
in
apis
:
api_item
=
ForwardAPI
(
api
)
#print( str(api_item) )
declare_code
,
defind_code
,
register_code
=
gene_wrapped_infermeta_and_register
(
api_item
)
header_file
.
write
(
declare_code
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录