Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
9cd5cd4e
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9cd5cd4e
编写于
3月 15, 2022
作者:
P
phlrain
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update
上级
a67fc4be
变更
25
显示空白变更内容
内联
并排
Showing
25 changed file
with
617 addition
and
521 deletion
+617
-521
paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py
...er/auto_code_generator/final_state_generator/eager_gen.py
+8
-6
paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py
...auto_code_generator/final_state_generator/python_c_gen.py
+1
-0
paddle/phi/kernels/impl/trace_grad_kernel_impl.h
paddle/phi/kernels/impl/trace_grad_kernel_impl.h
+1
-1
paddle/phi/kernels/trace_grad_kernel.h
paddle/phi/kernels/trace_grad_kernel.h
+1
-1
paddle/phi/ops/compat/trace_sig.cc
paddle/phi/ops/compat/trace_sig.cc
+1
-1
python/paddle/fluid/layers/metric_op.py
python/paddle/fluid/layers/metric_op.py
+3
-0
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+5
-0
python/paddle/fluid/tests/unittests/op_test.py
python/paddle/fluid/tests/unittests/op_test.py
+2
-1
python/paddle/fluid/tests/unittests/test_accuracy_op.py
python/paddle/fluid/tests/unittests/test_accuracy_op.py
+3
-1
python/paddle/fluid/tests/unittests/test_arg_min_max_op.py
python/paddle/fluid/tests/unittests/test_arg_min_max_op.py
+8
-1
python/paddle/fluid/tests/unittests/test_argsort_op.py
python/paddle/fluid/tests/unittests/test_argsort_op.py
+12
-1
python/paddle/fluid/tests/unittests/test_bitwise_op.py
python/paddle/fluid/tests/unittests/test_bitwise_op.py
+3
-1
python/paddle/fluid/tests/unittests/test_cholesky_op.py
python/paddle/fluid/tests/unittests/test_cholesky_op.py
+2
-1
python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py
...on/paddle/fluid/tests/unittests/test_cholesky_solve_op.py
+129
-131
python/paddle/fluid/tests/unittests/test_softmax_op.py
python/paddle/fluid/tests/unittests/test_softmax_op.py
+7
-2
python/paddle/fluid/tests/unittests/test_unfold_op.py
python/paddle/fluid/tests/unittests/test_unfold_op.py
+13
-2
python/paddle/metric/metrics.py
python/paddle/metric/metrics.py
+4
-0
python/paddle/nn/functional/activation.py
python/paddle/nn/functional/activation.py
+4
-0
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+5
-1
python/paddle/tensor/logic.py
python/paddle/tensor/logic.py
+3
-0
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+2
-0
python/paddle/tensor/search.py
python/paddle/tensor/search.py
+5
-0
python/paddle/utils/code_gen/api.yaml
python/paddle/utils/code_gen/api.yaml
+248
-230
python/paddle/utils/code_gen/backward.yaml
python/paddle/utils/code_gen/backward.yaml
+143
-140
python/paddle/utils/code_gen/wrapped_infermeta_gen.py
python/paddle/utils/code_gen/wrapped_infermeta_gen.py
+4
-0
未找到文件。
paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py
浏览文件 @
9cd5cd4e
...
@@ -28,6 +28,7 @@ namespace = ""
...
@@ -28,6 +28,7 @@ namespace = ""
yaml_types_mapping
=
{
yaml_types_mapping
=
{
'int'
:
'int'
,
'int32'
:
'int32_t'
,
'int64'
:
'int64_t'
,
'size_t'
:
'size_t'
,
\
'int'
:
'int'
,
'int32'
:
'int32_t'
,
'int64'
:
'int64_t'
,
'size_t'
:
'size_t'
,
\
'float'
:
'float'
,
'double'
:
'double'
,
'bool'
:
'bool'
,
\
'float'
:
'float'
,
'double'
:
'double'
,
'bool'
:
'bool'
,
\
'str'
:
'std::string'
,
\
'Backend'
:
'paddle::experimental::Backend'
,
'DataLayout'
:
'paddle::experimental::DataLayout'
,
'DataType'
:
'paddle::experimental::DataType'
,
\
'Backend'
:
'paddle::experimental::Backend'
,
'DataLayout'
:
'paddle::experimental::DataLayout'
,
'DataType'
:
'paddle::experimental::DataType'
,
\
'int64[]'
:
'std::vector<int64_t>'
,
'int[]'
:
'std::vector<int>'
,
'int64[]'
:
'std::vector<int64_t>'
,
'int[]'
:
'std::vector<int>'
,
'Tensor'
:
'Tensor'
,
'Tensor'
:
'Tensor'
,
...
@@ -235,7 +236,7 @@ def ParseYamlReturns(string):
...
@@ -235,7 +236,7 @@ def ParseYamlReturns(string):
else
:
else
:
ret_type
=
ret
.
strip
()
ret_type
=
ret
.
strip
()
assert
ret_type
in
yaml_types_mapping
.
keys
()
assert
ret_type
in
yaml_types_mapping
.
keys
()
,
ret_type
ret_type
=
yaml_types_mapping
[
ret_type
]
ret_type
=
yaml_types_mapping
[
ret_type
]
assert
"Tensor"
in
ret_type
assert
"Tensor"
in
ret_type
...
@@ -426,7 +427,7 @@ def SlotNameMatching(backward_inputs_list, backward_returns_list,
...
@@ -426,7 +427,7 @@ def SlotNameMatching(backward_inputs_list, backward_returns_list,
backward_input_type
,
False
,
backward_input_pos
backward_input_type
,
False
,
backward_input_pos
]
]
else
:
else
:
assert
False
assert
False
,
backward_input_name
for
backward_output
in
backward_returns_list
:
for
backward_output
in
backward_returns_list
:
backward_output_name
=
backward_output
[
0
]
backward_output_name
=
backward_output
[
0
]
...
@@ -435,7 +436,8 @@ def SlotNameMatching(backward_inputs_list, backward_returns_list,
...
@@ -435,7 +436,8 @@ def SlotNameMatching(backward_inputs_list, backward_returns_list,
backward_fwd_name
=
FindForwardName
(
backward_output_name
)
backward_fwd_name
=
FindForwardName
(
backward_output_name
)
assert
backward_fwd_name
is
not
None
assert
backward_fwd_name
is
not
None
assert
backward_fwd_name
in
forward_inputs_position_map
.
keys
()
assert
backward_fwd_name
in
forward_inputs_position_map
.
keys
(
),
backward_fwd_name
matched_forward_input_type
=
forward_inputs_position_map
[
matched_forward_input_type
=
forward_inputs_position_map
[
backward_fwd_name
][
0
]
backward_fwd_name
][
0
]
...
@@ -684,10 +686,10 @@ def GenerateNodeCreationCodes(
...
@@ -684,10 +686,10 @@ def GenerateNodeCreationCodes(
else
:
else
:
# Tuple api_result
# Tuple api_result
if
IsPlainTensorType
(
rtype
):
if
IsPlainTensorType
(
rtype
):
output_autograd_meta
=
f
" egr::AutogradMeta*
{
output_autograd_meta_name
}
= egr::EagerUtils::autograd_meta(&
api_result[
{
pos
}
]
);"
output_autograd_meta
=
f
" egr::AutogradMeta*
{
output_autograd_meta_name
}
= egr::EagerUtils::autograd_meta(&
std::get<
{
pos
}
>(api_result)
);"
else
:
else
:
assert
IsVectorTensorType
(
rtype
)
assert
IsVectorTensorType
(
rtype
)
output_autograd_meta
=
f
" std::vector<egr::AutogradMeta*>
{
output_autograd_meta_vec_name
}
= egr::EagerUtils::autograd_meta(&
api_result[
{
pos
}
]
);
\n
"
output_autograd_meta
=
f
" std::vector<egr::AutogradMeta*>
{
output_autograd_meta_vec_name
}
= egr::EagerUtils::autograd_meta(&
std::get<
{
pos
}
>(api_result)
);
\n
"
output_autograd_meta
+=
f
" std::vector<egr::AutogradMeta*>*
{
output_autograd_meta_name
}
= &
{
output_autograd_meta_vec_name
}
;"
output_autograd_meta
+=
f
" std::vector<egr::AutogradMeta*>*
{
output_autograd_meta_name
}
= &
{
output_autograd_meta_vec_name
}
;"
outputs_autograd_meta_list
.
append
(
output_autograd_meta
)
outputs_autograd_meta_list
.
append
(
output_autograd_meta
)
...
...
paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py
浏览文件 @
9cd5cd4e
...
@@ -22,6 +22,7 @@ atype_to_parsing_function = {
...
@@ -22,6 +22,7 @@ atype_to_parsing_function = {
"bool"
:
"CastPyArg2Boolean"
,
"bool"
:
"CastPyArg2Boolean"
,
"int"
:
"CastPyArg2Int"
,
"int"
:
"CastPyArg2Int"
,
"long"
:
"CastPyArg2Long"
,
"long"
:
"CastPyArg2Long"
,
"std::string"
:
"CastPyArgs2String"
,
"int64_t"
:
"CastPyArg2Long"
,
"int64_t"
:
"CastPyArg2Long"
,
"float"
:
"CastPyArg2Float"
,
"float"
:
"CastPyArg2Float"
,
"string"
:
"CastPyArg2String"
,
"string"
:
"CastPyArg2String"
,
...
...
paddle/phi/kernels/impl/trace_grad_kernel_impl.h
浏览文件 @
9cd5cd4e
...
@@ -82,8 +82,8 @@ struct TraceGradFunctor {
...
@@ -82,8 +82,8 @@ struct TraceGradFunctor {
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
TraceGradKernel
(
const
Context
&
ctx
,
void
TraceGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
const
DenseTensor
&
out_grad
,
int
offset
,
int
offset
,
int
axis1
,
int
axis1
,
int
axis2
,
int
axis2
,
...
...
paddle/phi/kernels/trace_grad_kernel.h
浏览文件 @
9cd5cd4e
...
@@ -20,8 +20,8 @@ namespace phi {
...
@@ -20,8 +20,8 @@ namespace phi {
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
TraceGradKernel
(
const
Context
&
ctx
,
void
TraceGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
const
DenseTensor
&
out_grad
,
int
offset
,
int
offset
,
int
axis1
,
int
axis1
,
int
axis2
,
int
axis2
,
...
...
paddle/phi/ops/compat/trace_sig.cc
浏览文件 @
9cd5cd4e
...
@@ -23,7 +23,7 @@ KernelSignature TraceOpArgumentMapping(const ArgumentMappingContext& ctx) {
...
@@ -23,7 +23,7 @@ KernelSignature TraceOpArgumentMapping(const ArgumentMappingContext& ctx) {
KernelSignature
TraceGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
KernelSignature
TraceGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"trace_grad"
,
return
KernelSignature
(
"trace_grad"
,
{
GradVarName
(
"Out"
),
"Input"
},
{
"Input"
,
GradVarName
(
"Out"
)
},
{
"offset"
,
"axis1"
,
"axis2"
},
{
"offset"
,
"axis1"
,
"axis2"
},
{
GradVarName
(
"Input"
)});
{
GradVarName
(
"Input"
)});
}
}
...
...
python/paddle/fluid/layers/metric_op.py
浏览文件 @
9cd5cd4e
...
@@ -87,6 +87,9 @@ def accuracy(input, label, k=1, correct=None, total=None):
...
@@ -87,6 +87,9 @@ def accuracy(input, label, k=1, correct=None, total=None):
_k
=
k
.
numpy
().
item
(
0
)
if
isinstance
(
k
,
Variable
)
else
k
_k
=
k
.
numpy
().
item
(
0
)
if
isinstance
(
k
,
Variable
)
else
k
topk_out
,
topk_indices
=
_C_ops
.
top_k_v2
(
input
,
'k'
,
_k
,
'sorted'
,
topk_out
,
topk_indices
=
_C_ops
.
top_k_v2
(
input
,
'k'
,
_k
,
'sorted'
,
False
)
False
)
if
_in_eager_mode
:
_acc
=
_C_ops
.
final_state_accuracy
(
topk_out
,
topk_indices
,
label
)
return
_acc
_acc
,
_
,
_
=
_C_ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
_acc
,
_
,
_
=
_C_ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
total
)
total
)
return
_acc
return
_acc
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
9cd5cd4e
...
@@ -14827,6 +14827,11 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
...
@@ -14827,6 +14827,11 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
"Unexpected type of paddings, it should be either an integer or a list"
"Unexpected type of paddings, it should be either an integer or a list"
"of 2 or 4 integers")
"of 2 or 4 integers")
if in_dygraph_mode():
if _in_eager_mode():
return _C_op.final_state_unfold(x, kernel_sizes, strdides, paddings,
dilations)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
helper.append_op(
type="unfold",
type="unfold",
...
...
python/paddle/fluid/tests/unittests/op_test.py
浏览文件 @
9cd5cd4e
...
@@ -545,7 +545,8 @@ class OpTest(unittest.TestCase):
...
@@ -545,7 +545,8 @@ class OpTest(unittest.TestCase):
v
.
value
().
get_tensor
().
set_recursive_sequence_lengths
(
lod
)
v
.
value
().
get_tensor
().
set_recursive_sequence_lengths
(
lod
)
return
v
return
v
else
:
else
:
return
fluid
.
dygraph
.
base
.
to_variable
(
value
)
print
(
"init her"
)
return
paddle
.
to_tensor
(
value
)
def
get_sequence_batch_size_1_input
(
self
,
lod
=
None
,
shape
=
None
):
def
get_sequence_batch_size_1_input
(
self
,
lod
=
None
,
shape
=
None
):
"""Get LoD input data whose batch size is 1.
"""Get LoD input data whose batch size is 1.
...
...
python/paddle/fluid/tests/unittests/test_accuracy_op.py
浏览文件 @
9cd5cd4e
...
@@ -25,6 +25,7 @@ from paddle.fluid import compiler, Program, program_guard
...
@@ -25,6 +25,7 @@ from paddle.fluid import compiler, Program, program_guard
class
TestAccuracyOp
(
OpTest
):
class
TestAccuracyOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"accuracy"
self
.
op_type
=
"accuracy"
self
.
python_api
=
paddle
.
metric
.
accuracy
self
.
dtype
=
np
.
float32
self
.
dtype
=
np
.
float32
self
.
init_dtype
()
self
.
init_dtype
()
n
=
8192
n
=
8192
...
@@ -48,7 +49,7 @@ class TestAccuracyOp(OpTest):
...
@@ -48,7 +49,7 @@ class TestAccuracyOp(OpTest):
pass
pass
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
False
)
class
TestAccuracyOpFp16
(
TestAccuracyOp
):
class
TestAccuracyOpFp16
(
TestAccuracyOp
):
...
@@ -128,4 +129,5 @@ class TestAccuracyAPI(unittest.TestCase):
...
@@ -128,4 +129,5 @@ class TestAccuracyAPI(unittest.TestCase):
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_arg_min_max_op.py
浏览文件 @
9cd5cd4e
...
@@ -26,6 +26,7 @@ from paddle.fluid import Program, program_guard
...
@@ -26,6 +26,7 @@ from paddle.fluid import Program, program_guard
class
BaseTestCase
(
OpTest
):
class
BaseTestCase
(
OpTest
):
def
initTestCase
(
self
):
def
initTestCase
(
self
):
self
.
op_type
=
'arg_min'
self
.
op_type
=
'arg_min'
self
.
python_api
=
paddle
.
argmin
self
.
dims
=
(
3
,
4
,
5
)
self
.
dims
=
(
3
,
4
,
5
)
self
.
dtype
=
'float32'
self
.
dtype
=
'float32'
self
.
axis
=
0
self
.
axis
=
0
...
@@ -41,12 +42,13 @@ class BaseTestCase(OpTest):
...
@@ -41,12 +42,13 @@ class BaseTestCase(OpTest):
self
.
outputs
=
{
'Out'
:
np
.
argmax
(
self
.
x
,
axis
=
self
.
axis
)}
self
.
outputs
=
{
'Out'
:
np
.
argmax
(
self
.
x
,
axis
=
self
.
axis
)}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
class
TestCase0
(
BaseTestCase
):
class
TestCase0
(
BaseTestCase
):
def
initTestCase
(
self
):
def
initTestCase
(
self
):
self
.
op_type
=
'arg_max'
self
.
op_type
=
'arg_max'
self
.
python_api
=
paddle
.
argmax
self
.
dims
=
(
3
,
4
,
5
)
self
.
dims
=
(
3
,
4
,
5
)
self
.
dtype
=
'float32'
self
.
dtype
=
'float32'
self
.
axis
=
0
self
.
axis
=
0
...
@@ -63,6 +65,7 @@ class TestCase1(BaseTestCase):
...
@@ -63,6 +65,7 @@ class TestCase1(BaseTestCase):
class
TestCase2
(
BaseTestCase
):
class
TestCase2
(
BaseTestCase
):
def
initTestCase
(
self
):
def
initTestCase
(
self
):
self
.
op_type
=
'arg_max'
self
.
op_type
=
'arg_max'
self
.
python_api
=
paddle
.
argmax
self
.
dims
=
(
3
,
4
)
self
.
dims
=
(
3
,
4
)
self
.
dtype
=
'int64'
self
.
dtype
=
'int64'
self
.
axis
=
0
self
.
axis
=
0
...
@@ -71,6 +74,7 @@ class TestCase2(BaseTestCase):
...
@@ -71,6 +74,7 @@ class TestCase2(BaseTestCase):
class
TestCase2_1
(
BaseTestCase
):
class
TestCase2_1
(
BaseTestCase
):
def
initTestCase
(
self
):
def
initTestCase
(
self
):
self
.
op_type
=
'arg_max'
self
.
op_type
=
'arg_max'
self
.
python_api
=
paddle
.
argmax
self
.
dims
=
(
3
,
4
)
self
.
dims
=
(
3
,
4
)
self
.
dtype
=
'int64'
self
.
dtype
=
'int64'
self
.
axis
=
-
1
self
.
axis
=
-
1
...
@@ -95,6 +99,7 @@ class TestCase4(BaseTestCase):
...
@@ -95,6 +99,7 @@ class TestCase4(BaseTestCase):
class
TestCase3_
(
BaseTestCase
):
class
TestCase3_
(
BaseTestCase
):
def
initTestCase
(
self
):
def
initTestCase
(
self
):
self
.
op_type
=
'arg_max'
self
.
op_type
=
'arg_max'
self
.
python_api
=
paddle
.
argmax
self
.
dims
=
(
3
,
)
self
.
dims
=
(
3
,
)
self
.
axis
=
0
self
.
axis
=
0
...
@@ -152,6 +157,7 @@ class BaseTestComplex1_2(OpTest):
...
@@ -152,6 +157,7 @@ class BaseTestComplex1_2(OpTest):
class
BaseTestComplex2_1
(
OpTest
):
class
BaseTestComplex2_1
(
OpTest
):
def
initTestCase
(
self
):
def
initTestCase
(
self
):
self
.
op_type
=
'arg_max'
self
.
op_type
=
'arg_max'
self
.
python_api
=
paddle
.
argmax
self
.
dims
=
(
4
,
5
,
6
)
self
.
dims
=
(
4
,
5
,
6
)
self
.
dtype
=
'int32'
self
.
dtype
=
'int32'
self
.
axis
=
2
self
.
axis
=
2
...
@@ -202,4 +208,5 @@ class BaseTestComplex2_2(OpTest):
...
@@ -202,4 +208,5 @@ class BaseTestComplex2_2(OpTest):
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_argsort_op.py
浏览文件 @
9cd5cd4e
...
@@ -23,7 +23,7 @@ import six
...
@@ -23,7 +23,7 @@ import six
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
from
paddle.fluid
import
ParamAttr
from
paddle.fluid
import
ParamAttr
from
paddle.fluid.framework
import
Program
,
grad_var_name
from
paddle.fluid.framework
import
Program
,
grad_var_name
,
_test_eager_guard
from
paddle.fluid.executor
import
Executor
from
paddle.fluid.executor
import
Executor
from
paddle.fluid.backward
import
append_backward
from
paddle.fluid.backward
import
append_backward
...
@@ -421,6 +421,16 @@ class TestArgsortImperative(unittest.TestCase):
...
@@ -421,6 +421,16 @@ class TestArgsortImperative(unittest.TestCase):
expect2
=
np
.
argsort
(
-
self
.
input_data
,
axis
=
self
.
axis
)
expect2
=
np
.
argsort
(
-
self
.
input_data
,
axis
=
self
.
axis
)
self
.
assertEqual
((
expect2
==
out2
.
numpy
()).
all
(),
True
)
self
.
assertEqual
((
expect2
==
out2
.
numpy
()).
all
(),
True
)
with
_test_eager_guard
():
var_x
=
paddle
.
to_tensor
(
self
.
input_data
)
out
=
paddle
.
argsort
(
var_x
,
axis
=
self
.
axis
)
expect
=
np
.
argsort
(
self
.
input_data
,
axis
=
self
.
axis
)
self
.
assertEqual
((
expect
==
out
.
numpy
()).
all
(),
True
)
out2
=
paddle
.
argsort
(
var_x
,
axis
=
self
.
axis
,
descending
=
True
)
expect2
=
np
.
argsort
(
-
self
.
input_data
,
axis
=
self
.
axis
)
self
.
assertEqual
((
expect2
==
out2
.
numpy
()).
all
(),
True
)
paddle
.
enable_static
()
paddle
.
enable_static
()
...
@@ -443,4 +453,5 @@ class TestArgsortImperative4(TestArgsortImperative):
...
@@ -443,4 +453,5 @@ class TestArgsortImperative4(TestArgsortImperative):
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_bitwise_op.py
浏览文件 @
9cd5cd4e
...
@@ -276,6 +276,7 @@ class TestBitwiseXorBool(TestBitwiseXor):
...
@@ -276,6 +276,7 @@ class TestBitwiseXorBool(TestBitwiseXor):
class
TestBitwiseNot
(
OpTest
):
class
TestBitwiseNot
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"bitwise_not"
self
.
op_type
=
"bitwise_not"
self
.
python_api
=
paddle
.
bitwise_not
self
.
init_dtype
()
self
.
init_dtype
()
self
.
init_shape
()
self
.
init_shape
()
self
.
init_bound
()
self
.
init_bound
()
...
@@ -288,7 +289,7 @@ class TestBitwiseNot(OpTest):
...
@@ -288,7 +289,7 @@ class TestBitwiseNot(OpTest):
self
.
outputs
=
{
'Out'
:
out
}
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
False
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
pass
pass
...
@@ -351,4 +352,5 @@ class TestBitwiseNotBool(TestBitwiseNot):
...
@@ -351,4 +352,5 @@ class TestBitwiseNotBool(TestBitwiseNot):
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_cholesky_op.py
浏览文件 @
9cd5cd4e
...
@@ -36,6 +36,7 @@ from decorator_helper import prog_scope
...
@@ -36,6 +36,7 @@ from decorator_helper import prog_scope
class
TestCholeskyOp
(
OpTest
):
class
TestCholeskyOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"cholesky"
self
.
op_type
=
"cholesky"
self
.
python_api
=
paddle
.
linalg
.
cholesky
self
.
_input_shape
=
(
2
,
32
,
32
)
self
.
_input_shape
=
(
2
,
32
,
32
)
self
.
_upper
=
True
self
.
_upper
=
True
self
.
init_config
()
self
.
init_config
()
...
@@ -54,7 +55,7 @@ class TestCholeskyOp(OpTest):
...
@@ -54,7 +55,7 @@ class TestCholeskyOp(OpTest):
self
.
outputs
=
{
"Out"
:
output_data
}
self
.
outputs
=
{
"Out"
:
output_data
}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
places
=
[
fluid
.
CPUPlace
()]
places
=
[
fluid
.
CPUPlace
()]
...
...
python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py
浏览文件 @
9cd5cd4e
...
@@ -109,6 +109,7 @@ class TestCholeskySolveOp(OpTest):
...
@@ -109,6 +109,7 @@ class TestCholeskySolveOp(OpTest):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"cholesky_solve"
self
.
op_type
=
"cholesky_solve"
self
.
python_api
=
paddle
.
linalg
.
cholesky_solve
self
.
config
()
self
.
config
()
if
self
.
upper
:
if
self
.
upper
:
...
@@ -125,137 +126,134 @@ class TestCholeskySolveOp(OpTest):
...
@@ -125,137 +126,134 @@ class TestCholeskySolveOp(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
output
}
self
.
outputs
=
{
'Out'
:
self
.
output
}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad_normal
(
self
):
# def test_check_grad_normal(self):
self
.
check_grad
([
'Y'
],
'Out'
,
max_relative_error
=
0.01
)
# self.check_grad(['Y'], 'Out', max_relative_error=0.01, check_eager=True)
# # 3D(broadcast) + 3D, upper=True
# 3D(broadcast) + 3D, upper=True
# class TestCholeskySolveOp3(TestCholeskySolveOp):
class
TestCholeskySolveOp3
(
TestCholeskySolveOp
):
# """
"""
# case 3
case 3
# """
"""
# def config(self):
def
config
(
self
):
# self.y_shape = [1, 10, 10]
self
.
y_shape
=
[
1
,
10
,
10
]
# self.x_shape = [2, 10, 5]
self
.
x_shape
=
[
2
,
10
,
5
]
# self.upper = True
self
.
upper
=
True
# self.dtype = np.float64
self
.
dtype
=
np
.
float64
# class TestCholeskySolveAPI(unittest.TestCase):
# def setUp(self):
class
TestCholeskySolveAPI
(
unittest
.
TestCase
):
# np.random.seed(2021)
def
setUp
(
self
):
# self.place = [paddle.CPUPlace()]
np
.
random
.
seed
(
2021
)
# # self.place = [paddle.CUDAPlace(0)]
self
.
place
=
[
paddle
.
CPUPlace
()]
# self.dtype = "float64"
# self.place = [paddle.CUDAPlace(0)]
# self.upper = True
self
.
dtype
=
"float64"
# if core.is_compiled_with_cuda():
self
.
upper
=
True
# self.place.append(paddle.CUDAPlace(0))
if
core
.
is_compiled_with_cuda
():
self
.
place
.
append
(
paddle
.
CUDAPlace
(
0
))
# def check_static_result(self, place):
# paddle.enable_static()
def
check_static_result
(
self
,
place
):
# with fluid.program_guard(fluid.Program(), fluid.Program()):
paddle
.
enable_static
()
# x = fluid.data(name="x", shape=[10, 2], dtype=self.dtype)
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
# y = fluid.data(name="y", shape=[10, 10], dtype=self.dtype)
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
10
,
2
],
dtype
=
self
.
dtype
)
# z = paddle.linalg.cholesky_solve(x, y, upper=self.upper)
y
=
fluid
.
data
(
name
=
"y"
,
shape
=
[
10
,
10
],
dtype
=
self
.
dtype
)
z
=
paddle
.
linalg
.
cholesky_solve
(
x
,
y
,
upper
=
self
.
upper
)
# x_np = np.random.random([10, 2]).astype(self.dtype)
# y_np = np.random.random([10, 10]).astype(self.dtype)
x_np
=
np
.
random
.
random
([
10
,
2
]).
astype
(
self
.
dtype
)
# if self.upper:
y_np
=
np
.
random
.
random
([
10
,
10
]).
astype
(
self
.
dtype
)
# umat = np.triu(y_np)
if
self
.
upper
:
# else:
umat
=
np
.
triu
(
y_np
)
# umat = np.tril(y_np)
else
:
# z_np = cholesky_solution(umat, x_np, upper=self.upper)
umat
=
np
.
tril
(
y_np
)
# z2_np = scipy_cholesky_solution(umat, x_np, upper=self.upper)
z_np
=
cholesky_solution
(
umat
,
x_np
,
upper
=
self
.
upper
)
z2_np
=
scipy_cholesky_solution
(
umat
,
x_np
,
upper
=
self
.
upper
)
# exe = fluid.Executor(place)
# fetches = exe.run(fluid.default_main_program(),
exe
=
fluid
.
Executor
(
place
)
# feed={"x": x_np,
fetches
=
exe
.
run
(
fluid
.
default_main_program
(),
# "y": umat},
feed
=
{
"x"
:
x_np
,
# fetch_list=[z])
"y"
:
umat
},
# self.assertTrue(np.allclose(fetches[0], z_np))
fetch_list
=
[
z
])
self
.
assertTrue
(
np
.
allclose
(
fetches
[
0
],
z_np
))
# def test_static(self):
# for place in self.place:
def
test_static
(
self
):
# self.check_static_result(place=place)
for
place
in
self
.
place
:
self
.
check_static_result
(
place
=
place
)
# def test_dygraph(self):
# def run(place):
def
test_dygraph
(
self
):
# paddle.disable_static(place)
def
run
(
place
):
# x_np = np.random.random([20, 2]).astype(self.dtype)
paddle
.
disable_static
(
place
)
# y_np = np.random.random([20, 20]).astype(self.dtype)
x_np
=
np
.
random
.
random
([
20
,
2
]).
astype
(
self
.
dtype
)
# z_np = scipy_cholesky_solution(y_np, x_np, upper=self.upper)
y_np
=
np
.
random
.
random
([
20
,
20
]).
astype
(
self
.
dtype
)
z_np
=
scipy_cholesky_solution
(
y_np
,
x_np
,
upper
=
self
.
upper
)
# x = paddle.to_tensor(x_np)
# y = paddle.to_tensor(y_np)
x
=
paddle
.
to_tensor
(
x_np
)
# z = paddle.linalg.cholesky_solve(x, y, upper=self.upper)
y
=
paddle
.
to_tensor
(
y_np
)
z
=
paddle
.
linalg
.
cholesky_solve
(
x
,
y
,
upper
=
self
.
upper
)
# self.assertTrue(np.allclose(z_np, z.numpy()))
# self.assertEqual(z_np.shape, z.numpy().shape)
self
.
assertTrue
(
np
.
allclose
(
z_np
,
z
.
numpy
()))
# paddle.enable_static()
self
.
assertEqual
(
z_np
.
shape
,
z
.
numpy
().
shape
)
paddle
.
enable_static
()
# for idx, place in enumerate(self.place):
# run(place)
for
idx
,
place
in
enumerate
(
self
.
place
):
run
(
place
)
# def test_boardcast(self):
# def run(place):
def
test_boardcast
(
self
):
# paddle.disable_static()
def
run
(
place
):
# x_np = np.random.random([1, 30, 2]).astype(self.dtype)
paddle
.
disable_static
()
# y_np = np.random.random([2, 30, 30]).astype(self.dtype)
x_np
=
np
.
random
.
random
([
1
,
30
,
2
]).
astype
(
self
.
dtype
)
# nx_np = np.concatenate((x_np, x_np), axis=0)
y_np
=
np
.
random
.
random
([
2
,
30
,
30
]).
astype
(
self
.
dtype
)
nx_np
=
np
.
concatenate
((
x_np
,
x_np
),
axis
=
0
)
# z_sci = scipy_cholesky_solution_batch(y_np, nx_np, upper=self.upper)
z_sci
=
scipy_cholesky_solution_batch
(
y_np
,
nx_np
,
upper
=
self
.
upper
)
# x = paddle.to_tensor(x_np)
# y = paddle.to_tensor(y_np)
x
=
paddle
.
to_tensor
(
x_np
)
# z = paddle.linalg.cholesky_solve(x, y, upper=self.upper)
y
=
paddle
.
to_tensor
(
y_np
)
# self.assertEqual(z_sci.shape, z.numpy().shape)
z
=
paddle
.
linalg
.
cholesky_solve
(
x
,
y
,
upper
=
self
.
upper
)
# self.assertTrue(np.allclose(z_sci, z.numpy()))
self
.
assertEqual
(
z_sci
.
shape
,
z
.
numpy
().
shape
)
self
.
assertTrue
(
np
.
allclose
(
z_sci
,
z
.
numpy
()))
# for idx, place in enumerate(self.place):
# run(place)
for
idx
,
place
in
enumerate
(
self
.
place
):
run
(
place
)
# class TestCholeskySolveOpError(unittest.TestCase):
# def test_errors(self):
# paddle.enable_static()
class
TestCholeskySolveOpError
(
unittest
.
TestCase
):
# with program_guard(Program(), Program()):
def
test_errors
(
self
):
# # The input type of solve_op must be Variable.
paddle
.
enable_static
()
# x1 = fluid.create_lod_tensor(
with
program_guard
(
Program
(),
Program
()):
# np.array([[-1]]), [[1]], fluid.CPUPlace())
# The input type of solve_op must be Variable.
# y1 = fluid.create_lod_tensor(
x1
=
fluid
.
create_lod_tensor
(
# np.array([[-1]]), [[1]], fluid.CPUPlace())
np
.
array
([[
-
1
]]),
[[
1
]],
fluid
.
CPUPlace
())
# self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x1, y1)
y1
=
fluid
.
create_lod_tensor
(
np
.
array
([[
-
1
]]),
[[
1
]],
fluid
.
CPUPlace
())
# # The data type of input must be float32 or float64.
self
.
assertRaises
(
TypeError
,
paddle
.
linalg
.
cholesky_solve
,
x1
,
y1
)
# x2 = fluid.data(name="x2", shape=[30, 30], dtype="bool")
# y2 = fluid.data(name="y2", shape=[30, 10], dtype="bool")
# The data type of input must be float32 or float64.
# self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x2, y2)
x2
=
fluid
.
data
(
name
=
"x2"
,
shape
=
[
30
,
30
],
dtype
=
"bool"
)
y2
=
fluid
.
data
(
name
=
"y2"
,
shape
=
[
30
,
10
],
dtype
=
"bool"
)
# x3 = fluid.data(name="x3", shape=[30, 30], dtype="int32")
self
.
assertRaises
(
TypeError
,
paddle
.
linalg
.
cholesky_solve
,
x2
,
y2
)
# y3 = fluid.data(name="y3", shape=[30, 10], dtype="int32")
# self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x3, y3)
x3
=
fluid
.
data
(
name
=
"x3"
,
shape
=
[
30
,
30
],
dtype
=
"int32"
)
y3
=
fluid
.
data
(
name
=
"y3"
,
shape
=
[
30
,
10
],
dtype
=
"int32"
)
# x4 = fluid.data(name="x4", shape=[30, 30], dtype="float16")
self
.
assertRaises
(
TypeError
,
paddle
.
linalg
.
cholesky_solve
,
x3
,
y3
)
# y4 = fluid.data(name="y4", shape=[30, 10], dtype="float16")
# self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x4, y4)
x4
=
fluid
.
data
(
name
=
"x4"
,
shape
=
[
30
,
30
],
dtype
=
"float16"
)
y4
=
fluid
.
data
(
name
=
"y4"
,
shape
=
[
30
,
10
],
dtype
=
"float16"
)
# # The number of dimensions of input'X must be >= 2.
self
.
assertRaises
(
TypeError
,
paddle
.
linalg
.
cholesky_solve
,
x4
,
y4
)
# x5 = fluid.data(name="x5", shape=[30], dtype="float64")
# y5 = fluid.data(name="y5", shape=[30, 30], dtype="float64")
# The number of dimensions of input'X must be >= 2.
# self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x5, y5)
x5
=
fluid
.
data
(
name
=
"x5"
,
shape
=
[
30
],
dtype
=
"float64"
)
y5
=
fluid
.
data
(
name
=
"y5"
,
shape
=
[
30
,
30
],
dtype
=
"float64"
)
# # The number of dimensions of input'Y must be >= 2.
self
.
assertRaises
(
ValueError
,
paddle
.
linalg
.
cholesky_solve
,
x5
,
y5
)
# x6 = fluid.data(name="x6", shape=[30, 30], dtype="float64")
# y6 = fluid.data(name="y6", shape=[30], dtype="float64")
# The number of dimensions of input'Y must be >= 2.
# self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x6, y6)
x6
=
fluid
.
data
(
name
=
"x6"
,
shape
=
[
30
,
30
],
dtype
=
"float64"
)
y6
=
fluid
.
data
(
name
=
"y6"
,
shape
=
[
30
],
dtype
=
"float64"
)
# # The inner-most 2 dimensions of input'X should be equal to each other
self
.
assertRaises
(
ValueError
,
paddle
.
linalg
.
cholesky_solve
,
x6
,
y6
)
# x7 = fluid.data(name="x7", shape=[2, 3, 4], dtype="float64")
# y7 = fluid.data(name="y7", shape=[2, 4, 3], dtype="float64")
# The inner-most 2 dimensions of input'X should be equal to each other
# self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x7, y7)
x7
=
fluid
.
data
(
name
=
"x7"
,
shape
=
[
2
,
3
,
4
],
dtype
=
"float64"
)
y7
=
fluid
.
data
(
name
=
"y7"
,
shape
=
[
2
,
4
,
3
],
dtype
=
"float64"
)
self
.
assertRaises
(
ValueError
,
paddle
.
linalg
.
cholesky_solve
,
x7
,
y7
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/test_softmax_op.py
浏览文件 @
9cd5cd4e
...
@@ -53,6 +53,7 @@ class TestSoftmaxOp(OpTest):
...
@@ -53,6 +53,7 @@ class TestSoftmaxOp(OpTest):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"softmax"
self
.
op_type
=
"softmax"
self
.
python_api
=
paddle
.
nn
.
functional
.
softmax
self
.
use_cudnn
=
False
self
.
use_cudnn
=
False
self
.
use_mkldnn
=
False
self
.
use_mkldnn
=
False
# explicilty use float32 for ROCm, as MIOpen does not yet support float64
# explicilty use float32 for ROCm, as MIOpen does not yet support float64
...
@@ -81,9 +82,13 @@ class TestSoftmaxOp(OpTest):
...
@@ -81,9 +82,13 @@ class TestSoftmaxOp(OpTest):
if
self
.
use_cudnn
:
if
self
.
use_cudnn
:
place
=
core
.
CUDAPlace
(
0
)
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
self
.
check_output_with_place
(
place
,
atol
=
1e-5
,
check_dygraph
=
(
self
.
use_mkldnn
==
False
))
place
,
atol
=
1e-5
,
check_dygraph
=
(
self
.
use_mkldnn
==
False
),
check_eager
=
False
)
else
:
else
:
self
.
check_output
(
check_dygraph
=
(
self
.
use_mkldnn
==
False
))
self
.
check_output
(
check_dygraph
=
(
self
.
use_mkldnn
==
False
),
check_eager
=
False
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
# TODO(wangzhongpu): support mkldnn op in dygraph mode
...
...
python/paddle/fluid/tests/unittests/test_unfold_op.py
浏览文件 @
9cd5cd4e
...
@@ -21,6 +21,7 @@ from op_test import OpTest
...
@@ -21,6 +21,7 @@ from op_test import OpTest
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid
import
core
from
paddle.fluid.framework
import
_test_eager_guard
class
TestUnfoldOp
(
OpTest
):
class
TestUnfoldOp
(
OpTest
):
...
@@ -37,6 +38,7 @@ class TestUnfoldOp(OpTest):
...
@@ -37,6 +38,7 @@ class TestUnfoldOp(OpTest):
self
.
strides
=
[
1
,
1
]
self
.
strides
=
[
1
,
1
]
self
.
paddings
=
[
1
,
1
,
1
,
1
]
self
.
paddings
=
[
1
,
1
,
1
,
1
]
self
.
dilations
=
[
1
,
1
]
self
.
dilations
=
[
1
,
1
]
self
.
python_api
=
paddle
.
nn
.
functional
.
unfold
input_shape
=
[
input_shape
=
[
self
.
batch_size
,
self
.
input_channels
,
self
.
input_height
,
self
.
batch_size
,
self
.
input_channels
,
self
.
input_height
,
self
.
input_width
self
.
input_width
...
@@ -95,10 +97,10 @@ class TestUnfoldOp(OpTest):
...
@@ -95,10 +97,10 @@ class TestUnfoldOp(OpTest):
self
.
set_data
()
self
.
set_data
()
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Y'
)
self
.
check_grad
([
'X'
],
'Y'
,
check_eager
=
True
)
class
TestUnfoldAPI
(
TestUnfoldOp
):
class
TestUnfoldAPI
(
TestUnfoldOp
):
...
@@ -122,9 +124,18 @@ class TestUnfoldAPI(TestUnfoldOp):
...
@@ -122,9 +124,18 @@ class TestUnfoldAPI(TestUnfoldOp):
result
=
m
(
input
)
result
=
m
(
input
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
self
.
outputs
[
'Y'
]))
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
self
.
outputs
[
'Y'
]))
with
_test_eager_guard
():
input
=
fluid
.
dygraph
.
to_variable
(
self
.
inputs
[
'X'
])
m
=
paddle
.
nn
.
Unfold
(
**
self
.
attrs
)
m
.
eval
()
result
=
m
(
input
)
self
.
assertTrue
(
np
.
allclose
(
result
.
numpy
(),
self
.
outputs
[
'Y'
]))
def
test_info
(
self
):
def
test_info
(
self
):
str
(
paddle
.
nn
.
Unfold
(
**
self
.
attrs
))
str
(
paddle
.
nn
.
Unfold
(
**
self
.
attrs
))
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
unittest
.
main
()
python/paddle/metric/metrics.py
浏览文件 @
9cd5cd4e
...
@@ -798,8 +798,12 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None):
...
@@ -798,8 +798,12 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None):
total
=
_varbase_creator
(
dtype
=
"int32"
)
total
=
_varbase_creator
(
dtype
=
"int32"
)
topk_out
,
topk_indices
=
paddle
.
topk
(
input
,
k
=
k
)
topk_out
,
topk_indices
=
paddle
.
topk
(
input
,
k
=
k
)
if
_in_eager_mode
:
_acc
=
_C_ops
.
final_state_accuracy
(
topk_out
,
topk_indices
,
label
)
return
_acc
_acc
,
_
,
_
=
_C_ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
_acc
,
_
,
_
=
_C_ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
total
)
total
)
return
_acc
return
_acc
helper
=
LayerHelper
(
"accuracy"
,
**
locals
())
helper
=
LayerHelper
(
"accuracy"
,
**
locals
())
...
...
python/paddle/nn/functional/activation.py
浏览文件 @
9cd5cd4e
...
@@ -561,6 +561,8 @@ def relu(x, name=None):
...
@@ -561,6 +561,8 @@ def relu(x, name=None):
"""
"""
if
in_dynamic_mode
():
if
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_relu
(
x
)
return
_C_ops
.
relu
(
x
)
return
_C_ops
.
relu
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'relu'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'relu'
)
...
@@ -954,6 +956,8 @@ def softmax(x, axis=-1, dtype=None, name=None):
...
@@ -954,6 +956,8 @@ def softmax(x, axis=-1, dtype=None, name=None):
if
in_dynamic_mode
():
if
in_dynamic_mode
():
outs_cast
=
x
if
dtype
is
None
\
outs_cast
=
x
if
dtype
is
None
\
else
_C_ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
else
_C_ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
if
_in_eager_mode
():
return
_C_ops
.
final_state_softmax
(
outs_cast
,
axis
)
return
_C_ops
.
softmax
(
outs_cast
,
'axis'
,
axis
,
'use_cudnn'
,
use_cudnn
)
return
_C_ops
.
softmax
(
outs_cast
,
'axis'
,
axis
,
'use_cudnn'
,
use_cudnn
)
if
dtype
is
None
:
if
dtype
is
None
:
...
...
python/paddle/tensor/linalg.py
浏览文件 @
9cd5cd4e
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
import
numpy
as
np
import
numpy
as
np
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
..framework
import
_varbase_creator
,
_dygraph_tracer
from
..framework
import
_varbase_creator
,
_dygraph_tracer
,
_in_eager_mode
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
from
..static
import
Variable
from
..static
import
Variable
...
@@ -1205,6 +1205,8 @@ def cholesky(x, upper=False, name=None):
...
@@ -1205,6 +1205,8 @@ def cholesky(x, upper=False, name=None):
"""
"""
if
paddle
.
in_dynamic_mode
():
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_cholesky
(
x
,
upper
)
return
_C_ops
.
cholesky
(
x
,
"upper"
,
upper
)
return
_C_ops
.
cholesky
(
x
,
"upper"
,
upper
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
...
@@ -2734,6 +2736,8 @@ def cholesky_solve(x, y, upper=False, name=None):
...
@@ -2734,6 +2736,8 @@ def cholesky_solve(x, y, upper=False, name=None):
# [-2.5, -7, 9.5]
# [-2.5, -7, 9.5]
"""
"""
if
paddle
.
in_dynamic_mode
():
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_cholesky_solve
(
x
,
y
,
upper
)
return
_C_ops
.
cholesky_solve
(
x
,
y
,
'upper'
,
upper
)
return
_C_ops
.
cholesky_solve
(
x
,
y
,
'upper'
,
upper
)
helper
=
LayerHelper
(
"cholesky_solve"
,
**
locals
())
helper
=
LayerHelper
(
"cholesky_solve"
,
**
locals
())
...
...
python/paddle/tensor/logic.py
浏览文件 @
9cd5cd4e
...
@@ -17,6 +17,7 @@ from ..fluid.data_feeder import check_type, check_variable_and_dtype
...
@@ -17,6 +17,7 @@ from ..fluid.data_feeder import check_type, check_variable_and_dtype
from
..fluid.layers.layer_function_generator
import
templatedoc
from
..fluid.layers.layer_function_generator
import
templatedoc
from
..static
import
Variable
from
..static
import
Variable
from
..framework
import
VarBase
as
Tensor
from
..framework
import
VarBase
as
Tensor
from
paddle.fluid.framework
import
_in_eager_mode
# TODO: define logic functions of a tensor
# TODO: define logic functions of a tensor
from
..fluid.layers
import
is_empty
# noqa: F401
from
..fluid.layers
import
is_empty
# noqa: F401
...
@@ -452,6 +453,8 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
...
@@ -452,6 +453,8 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
if
binary_op
:
if
binary_op
:
return
op
(
x
,
y
)
return
op
(
x
,
y
)
else
:
else
:
if
_in_eager_mode
():
return
_C_op
.
final_state_bitewise_not
(
x
)
return
op
(
x
)
return
op
(
x
)
check_variable_and_dtype
(
check_variable_and_dtype
(
...
...
python/paddle/tensor/math.py
浏览文件 @
9cd5cd4e
...
@@ -2853,6 +2853,8 @@ def tanh(x, name=None):
...
@@ -2853,6 +2853,8 @@ def tanh(x, name=None):
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
"""
"""
if
paddle
.
in_dynamic_mode
():
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_tanh
(
x
)
return
_C_ops
.
tanh
(
x
)
return
_C_ops
.
tanh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
...
...
python/paddle/tensor/search.py
浏览文件 @
9cd5cd4e
...
@@ -91,6 +91,8 @@ def argsort(x, axis=-1, descending=False, name=None):
...
@@ -91,6 +91,8 @@ def argsort(x, axis=-1, descending=False, name=None):
# [0 2 1 1]]]
# [0 2 1 1]]]
"""
"""
if
paddle
.
in_dynamic_mode
():
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
_
,
ids
,
=
_C_ops
.
final_state_argsort
(
x
,
axis
,
descending
)
_
,
ids
=
_C_ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
_
,
ids
=
_C_ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
return
ids
return
ids
check_variable_and_dtype
(
check_variable_and_dtype
(
...
@@ -245,6 +247,9 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
...
@@ -245,6 +247,9 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
axis
=
0
axis
=
0
if
paddle
.
in_dynamic_mode
():
if
paddle
.
in_dynamic_mode
():
if
_in_eager_mode
():
out
=
_C_ops
.
final_state_arg_min
(
x
,
axis
,
keepdim
,
flattern
,
var_dtype
)
out
=
_C_ops
.
arg_min
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
out
=
_C_ops
.
arg_min
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
)
keepdim
,
'flatten'
,
flatten
)
return
out
return
out
...
...
python/paddle/utils/code_gen/api.yaml
浏览文件 @
9cd5cd4e
...
@@ -262,56 +262,59 @@
...
@@ -262,56 +262,59 @@
# func : maxout
# func : maxout
# backward : maxout_grad
# backward : maxout_grad
# # put_along_axis
# put_along_axis
# - api : put_along_axis
-
api
:
put_along_axis
# args : (Tensor x, Tensor index, Tensor value, int axis, string reduce)
args
:
(Tensor x, Tensor index, Tensor value, int axis, str reduce)
# output : Tensor
output
:
Tensor
# infer_meta :
infer_meta
:
# func : PutAlongAxisInferMeta
func
:
UnchangedInferMeta
# kernel :
param
:
[
index
]
# func : put_along_axis
kernel
:
# backward : put_along_axis_grad
func
:
put_along_axis
backward
:
put_along_axis_grad
# # take_along_axis
# take_along_axis
# - api : take_along_axis
-
api
:
take_along_axis
# args : (Tensor x, Tensor index, int axis)
args
:
(Tensor x, Tensor index, int axis)
# output : Tensor
output
:
Tensor
# infer_meta :
infer_meta
:
# func : TakeAlongAxisInferMeta
func
:
UnchangedInferMeta
# kernel :
param
:
[
index
]
# func : take_along_axis
kernel
:
# backward : take_along_axis_grad
func
:
take_along_axis
backward
:
take_along_axis_grad
# # matrix_power
# matrix_power
# - api : maxtrix_power
-
api
:
matrix_power
# args : (Tensor x, int n)
args
:
(Tensor x, int n)
# output : Tensor
output
:
Tensor
# infer_meta :
infer_meta
:
# func : MaxtrixPowerInferMeta
func
:
UnchangedInferMeta
# kernel :
param
:
[
x
]
# func : maxtrix_power
kernel
:
# backward : maxtrix_power_grad
func
:
matrix_power
backward
:
matrix_power_grad
#
#
eigh
# eigh
#
- api : eigh
-
api
:
eigh
# args : (Tensor x, string
uplo)
args
:
(Tensor x, str
uplo)
#
output : Tensor(out_w), Tensor(out_v)
output
:
Tensor(out_w), Tensor(out_v)
#
infer_meta :
infer_meta
:
#
func : EighInferMeta
func
:
EighInferMeta
#
kernel :
kernel
:
#
func : eigh
func
:
eigh
#
backward : eigh_grad
backward
:
eigh_grad
#
#
segment_pool
# segment_pool
#
- api : segment_pool
-
api
:
segment_pool
# args : (Tensor x, Tensor segment_ids, string
pooltype)
args
:
(Tensor x, Tensor segment_ids, str
pooltype)
#
output : Tensor(out), Tensor(summed_ids)
output
:
Tensor(out), Tensor(summed_ids)
#
infer_meta :
infer_meta
:
#
func : SegmentPoolInferMeta
func
:
SegmentPoolInferMeta
#
kernel :
kernel
:
#
func : segment_pool
func
:
segment_pool
#
backward : segment_pool_grad
backward
:
segment_pool_grad
# accuracy
# accuracy
-
api
:
accuracy
-
api
:
accuracy
...
@@ -447,24 +450,24 @@
...
@@ -447,24 +450,24 @@
# arg_min # int64 ???? dtype
# arg_min # int64 ???? dtype
-
api
:
a
gr
min
-
api
:
a
rg_
min
args
:
(Tensor x, int axis, bool keepdims, bool flatten, int dtype)
args
:
(Tensor x, int
64
axis, bool keepdims, bool flatten, int dtype)
output
:
Tensor
output
:
Tensor
infer_meta
:
infer_meta
:
func
:
ArgMinMaxInferMeta
func
:
ArgMinMaxInferMeta
kernel
:
kernel
:
func
:
argmin
func
:
arg
_
min
# arg_max # int64 ???? dtype
# arg_max # int64 ???? dtype
-
api
:
a
gr
max
-
api
:
a
rg_
max
args
:
(Tensor x, int axis, bool keepdims, bool flatten, int dtype)
args
:
(Tensor x, int
64
axis, bool keepdims, bool flatten, int dtype)
output
:
Tensor
output
:
Tensor
infer_meta
:
infer_meta
:
func
:
ArgMinMaxInferMeta
func
:
ArgMinMaxInferMeta
kernel
:
kernel
:
func
:
argmax
func
:
arg
_
max
# argsort
; kernel why input?
# argsort
-
api
:
argsort
-
api
:
argsort
args
:
(Tensor x, int axis, bool descending)
args
:
(Tensor x, int axis, bool descending)
output
:
Tensor(out), Tensor(indices)
output
:
Tensor(out), Tensor(indices)
...
@@ -472,19 +475,20 @@
...
@@ -472,19 +475,20 @@
func
:
ArgsortInferMeta
func
:
ArgsortInferMeta
kernel
:
kernel
:
func
:
argsort
func
:
argsort
backward
:
argsort_grad
# auc
; kernel why input?
# auc
#
- api : auc
-
api
:
auc
#
args : (Tensor x, Tensor label, Tensor stat_pos, Tensor stat_neg, str curve, int num_thresholds, int slide_steps)
args
:
(Tensor x, Tensor label, Tensor stat_pos, Tensor stat_neg, str curve, int num_thresholds, int slide_steps)
#
output : Tensor(auc), Tensor(stat_pos_out), Tensor(stat_neg_out)
output
:
Tensor(auc), Tensor(stat_pos_out), Tensor(stat_neg_out)
#
infer_meta :
infer_meta
:
#
func : AucInferMeta
func
:
AucInferMeta
#
kernel :
kernel
:
#
func : auc
func
:
auc
# # batch_norm
# # batch_norm
# - api : batch_norm
# - api : batch_norm
# args : (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str
ing
data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu)
# args : (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu)
# output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
# output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
# infer_meta :
# infer_meta :
# func : XXXXInferMeta
# func : XXXXInferMeta
...
@@ -492,14 +496,14 @@
...
@@ -492,14 +496,14 @@
# func : batch_norm
# func : batch_norm
# backward: batch_norm_grad
# backward: batch_norm_grad
#
#
bernoulli
# bernoulli
#
- api : bernoulli
-
api
:
bernoulli
#
args : (Tensor x)
args
:
(Tensor x)
#
output : Tensor
output
:
Tensor
#
infer_meta :
infer_meta
:
# func : Bernoulli
InferMeta
func
:
Unchanged
InferMeta
#
kernel :
kernel
:
#
func : bernoulli
func
:
bernoulli
# # bilinear_tensor_product ?? optional
# # bilinear_tensor_product ?? optional
# - api : bilinear_tensor_product
# - api : bilinear_tensor_product
...
@@ -510,15 +514,18 @@
...
@@ -510,15 +514,18 @@
# kernel :
# kernel :
# func : bilinear_tensor_product
# func : bilinear_tensor_product
# backward : bilinear_tensor_product_grad
# backward : bilinear_tensor_product_grad
# optional : bias
# bincount
-
api
:
bincount
args
:
(Tensor x, Tensor weight, int minlength)
output
:
Tensor
infer_meta
:
func
:
BincountInferMeta
kernel
:
func
:
bincount
optional
:
weight
# bincount ?? optional
# - api : bincount
# args : (Tensor x, Tensor weight, int minlength)
# output : Tensor
# infer_meta :
# func : BincountInferMeta
# kernel :
# func : bincount
# bitwise_and
# bitwise_and
...
@@ -604,7 +611,7 @@
...
@@ -604,7 +611,7 @@
# # depthwise_conv2d
# # depthwise_conv2d
# # dropout ?? optional, intermediate
# # dropout ?? optional, intermediate
# - api : dropout
# - api : dropout
# args : (Tensor x, Tensor seed_tensor, float p, bool is_test, str
ing
mode, int seed, bool fix_seed)
# args : (Tensor x, Tensor seed_tensor, float p, bool is_test, str mode, int seed, bool fix_seed)
# output : Tensor(out), Tensor(mask)
# output : Tensor(out), Tensor(mask)
# infer_meta :
# infer_meta :
# func : DropoutInferMeta
# func : DropoutInferMeta
...
@@ -613,27 +620,27 @@
...
@@ -613,27 +620,27 @@
# # embedding
# # embedding
#
#
erf
# erf
#
- api : erf
-
api
:
erf
#
args : (Tensor x)
args
:
(Tensor x)
#
output : Tensor
output
:
Tensor
#
infer_meta :
infer_meta
:
# func : Erf
InferMeta
func
:
Unchanged
InferMeta
#
kernel :
kernel
:
#
func : erf
func
:
erf
#
backward : erf_grad
backward
:
erf_grad
#
#
erfinv
# erfinv
#
- api : erfinv
-
api
:
erfinv
#
args : (Tensor x)
args
:
(Tensor x)
#
output : Tensor
output
:
Tensor
#
infer_meta :
infer_meta
:
# func : Erfinv
InferMeta
func
:
Unchanged
InferMeta
#
kernel :
kernel
:
#
func : erfinv
func
:
erfinv
#
backward : erfinv_grad
backward
:
erfinv_grad
#
# expand_as ?? optional
#
expand_as
# - api : expand_as
# - api : expand_as
# args : (Tensor x, Tensor y, int[] shape)
# args : (Tensor x, Tensor y, int[] shape)
# output : Tensor
# output : Tensor
...
@@ -642,6 +649,7 @@
...
@@ -642,6 +649,7 @@
# kernel :
# kernel :
# func : expand_as
# func : expand_as
# backward : expand_as_grad
# backward : expand_as_grad
# optional : y
# # expand
# # expand
...
@@ -655,13 +663,13 @@
...
@@ -655,13 +663,13 @@
# backward : expand_grad
# backward : expand_grad
# eye
# eye
-
api
:
eye
#
- api : eye
args
:
(int64 num_rows, int64 num_colums, DataType dtype
)
# args : (int64 num_rows, int64 num_colums, DataType dtype = DataType::FLOAT32
)
output
:
Tensor
#
output : Tensor
infer_meta
:
#
infer_meta :
func
:
EyeInferMeta
#
func : EyeInferMeta
kernel
:
#
kernel :
func
:
eye
#
func : eye
# # flip
# # flip
# - api : flip
# - api : flip
...
@@ -674,16 +682,18 @@
...
@@ -674,16 +682,18 @@
# gaussian_random
# gaussian_random
# - api : gaussian_random
# - api : gaussian_random
# args : (Scala
yArray shape, float mean, float std, int seed, DataType dtype
)
# args : (Scala
rArray shape, float mean, float std, int seed, DataType dtype=DataType::FLOAT32
)
# output : Tensor
# output : Tensor
# infer_meta :
# infer_meta :
# func : GaussianRandomInferMeta
# func : CreateInferMeta
# param : [shape, dtype]
# kernel :
# kernel :
# func : gaussian_random
# func : gaussian_random
# data_type : dtype
# # graph_send_recv
# # graph_send_recv
# - api : graph_send_recv
# - api : graph_send_recv
# args : (Tensor x, Tensor src_index, Tensor dst_index, str
ing
pool_type)
# args : (Tensor x, Tensor src_index, Tensor dst_index, str pool_type)
# output : Tensor(out), Tensor(dst_count)
# output : Tensor(out), Tensor(dst_count)
# infer_meta :
# infer_meta :
# func : GraphSendRecvInferMeta
# func : GraphSendRecvInferMeta
...
@@ -693,30 +703,30 @@
...
@@ -693,30 +703,30 @@
# # histogram int64 ???
# # histogram int64 ???
# - api : histogram
# - api : histogram
# args : (Tensor x, int64
_t
bins, int min, int max)
# args : (Tensor x, int64 bins, int min, int max)
# output : Tensor
# output : Tensor
# infer_meta :
# infer_meta :
# func : HistogramInferMeta
# func : HistogramInferMeta
# kernel :
# kernel :
# func : histogram
# func : histogram
#
#
increment
# increment
#
- api : increment
-
api
:
increment
#
args : (Tensor x, float value)
args
:
(Tensor x, float value)
#
output : Tensor
output
:
Tensor
#
infer_meta :
infer_meta
:
#
func : IncrementInferMeta
func
:
IncrementInferMeta
#
kernel :
kernel
:
#
func : increment
func
:
increment
#
#
is_empty
# is_empty
#
- api : is_empty
-
api
:
is_empty
#
args : (Tensor x)
args
:
(Tensor x)
#
output : Tensor
output
:
Tensor
#
infer_meta :
infer_meta
:
#
func : IsEmptyInferMeta
func
:
IsEmptyInferMeta
#
kernel :
kernel
:
#
func : is_empty
func
:
is_empty
# # isinf selected rows??? involk
# # isinf selected rows??? involk
# - api : isinf
# - api : isinf
...
@@ -727,7 +737,7 @@
...
@@ -727,7 +737,7 @@
# kernel :
# kernel :
# func : isinf
# func : isinf
# # isnan
# # isnan
selected rows??? involk
# - api : isnan
# - api : isnan
# args : (Tensor x)
# args : (Tensor x)
# output : Tensor
# output : Tensor
...
@@ -736,7 +746,7 @@
...
@@ -736,7 +746,7 @@
# kernel :
# kernel :
# func : isnan
# func : isnan
# # isfinite
# # isfinite
selected rows??? involk
# - api : isfinite
# - api : isfinite
# args : (Tensor x)
# args : (Tensor x)
# output : Tensor
# output : Tensor
...
@@ -745,34 +755,37 @@
...
@@ -745,34 +755,37 @@
# kernel :
# kernel :
# func : isfinite
# func : isfinite
#
#
label_smooth ?? optional
# label_smooth ?? optional
# - api : label_smooth
# - api : label_smooth
# args : (Tensor label, Tensor prior_dist, float epsilon)
# args : (Tensor label, Tensor prior_dist, float epsilon)
# output : Tensor
# output : Tensor
# infer_meta :
# infer_meta :
# func : LabelSmoothInferMeta
# func : UnchangedInferMeta
# param : [label]
# kernel :
# kernel :
# func : label_smooth
# func : label_smooth
# backward : label_smooth_grad
# backward : label_smooth_grad
# optional : prior_dist
#
# linspace
#
linspace ???? start stop number,应该是sclar?
# - api : linspace
# - api : linspace
# args : (Tensor start, Tensor stop, Tensor number, DataType dtype)
# args : (Tensor start, Tensor stop, Tensor number, DataType dtype
=DataType::FLOAT32
)
# output : Tensor
# output : Tensor
# infer_meta :
# infer_meta :
# func : LinspaceInferMeta
# func : LinspaceInferMeta
# kernel :
# kernel :
# func : linspace
# func : linspace
# # log_loss
# - api : log_loss
# log_loss
# args : (Tensor input, Tensor label, float epsilon)
-
api
:
log_loss
# output : Tensor
args
:
(Tensor input, Tensor label, float epsilon)
# infer_meta :
output
:
Tensor
# func : LogLossInferMeta
infer_meta
:
# kernel :
func
:
LogLossInferMeta
# func : log_loss
kernel
:
# backward : log_loss
func
:
log_loss
backward
:
log_loss_grad
# # logical_and
# # logical_and
# - api : logical_and
# - api : logical_and
...
@@ -802,14 +815,14 @@
...
@@ -802,14 +815,14 @@
# kernel :
# kernel :
# func : logical_xor
# func : logical_xor
#
#
logical_not
# logical_not
#
- api : logical_not
-
api
:
logical_not
#
args : (Tensor x)
args
:
(Tensor x)
#
output : Tensor
output
:
Tensor
#
infer_meta :
infer_meta
:
#
func : UnchangedInferMeta
func
:
UnchangedInferMeta
#
kernel :
kernel
:
#
func : logical_not
func
:
logical_not
# # masked_select
# # masked_select
# - api : masked_select
# - api : masked_select
...
@@ -831,24 +844,25 @@
...
@@ -831,24 +844,25 @@
# func : multi_dot
# func : multi_dot
# backward : multi_dot_grad
# backward : multi_dot_grad
#
#
multinomial
# multinomial
#
- api : multinomial
-
api
:
multinomial
#
args : (Tensor x, int num_samples, bool replacement)
args
:
(Tensor x, int num_samples, bool replacement)
#
output : Tensor
output
:
Tensor
#
infer_meta :
infer_meta
:
#
func : MultinomialInferMeta
func
:
MultinomialInferMeta
#
kernel :
kernel
:
#
func : multinomial
func
:
multinomial
#
#
nll_loss ?? optional
# nll_loss ?? optional
# - api : nll_loss
# - api : nll_loss
# args : (Tensor
input, Tensor label, Tensor weight, int64_t ignore_index, string
reduction)
# args : (Tensor
x, Tensor label, Tensor weight, int64 ignore_index, str
reduction)
# output : Tensor(out), Tensor(total_weight)
# output : Tensor(out), Tensor(total_weight)
# infer_meta :
# infer_meta :
# func : NllLossInferMeta
# func : NllLoss
Raw
InferMeta
# kernel :
# kernel :
# func : nll_loss
# func : nll_loss
# backward : nll_loss_grad
# backward : nll_loss_grad
# optional : weight
# # pad
# # pad
# - api : pad
# - api : pad
...
@@ -862,7 +876,7 @@
...
@@ -862,7 +876,7 @@
# # pixel_shuffle
# # pixel_shuffle
# - api : pixel_shuffle
# - api : pixel_shuffle
# args : (Tensor x, int upscale_factor, str
ing
data_format)
# args : (Tensor x, int upscale_factor, str data_format)
# output : Tensor
# output : Tensor
# infer_meta :
# infer_meta :
# func : PixelShuffleInferMeta
# func : PixelShuffleInferMeta
...
@@ -880,7 +894,7 @@
...
@@ -880,7 +894,7 @@
# func : poisson
# func : poisson
# backward : poisson_grad
# backward : poisson_grad
#
#
psroi_pool ?? optional
# psroi_pool ?? optional
# - api : psroi_pool
# - api : psroi_pool
# args : (Tensor x, Tensor rois, Tensor rois_num, int pooled_weight, int pooled_width, int output_channels, float spatial_scale )
# args : (Tensor x, Tensor rois, Tensor rois_num, int pooled_weight, int pooled_width, int output_channels, float spatial_scale )
# output : Tensor
# output : Tensor
...
@@ -889,6 +903,7 @@
...
@@ -889,6 +903,7 @@
# kernel :
# kernel :
# func : psroi_pool
# func : psroi_pool
# backward : psroi_pool_grad
# backward : psroi_pool_grad
# optional : rois_num
# # randint_raw
# # randint_raw
# - api : randint
# - api : randint
...
@@ -926,57 +941,59 @@
...
@@ -926,57 +941,59 @@
# kernel :
# kernel :
# func : reduce_prod
# func : reduce_prod
# # selu
# selu
# - api : selu
-
api
:
selu
# args : (Tensor x, float scale, float alpha)
args
:
(Tensor x, float scale, float alpha)
# output : Tensor
output
:
Tensor
# infer_meta :
infer_meta
:
# func : SeluInferMeta
func
:
UnchangedInferMeta
# kernel :
param
:
[
x
]
# func : selu
kernel
:
# backward : selu_grad
func
:
selu
backward
:
selu_grad
# # set_value None api
# # set_value None api
# # sgd # need invoke
# # sgd # need invoke
# # shape ??? selcted rows
# # shape ??? selcted rows
#
#
shard_index
# shard_index
#
- api : shard_index
-
api
:
shard_index
#
args : (Tensor in, int index_num, int nshards, int shard_id, int ignore_value)
args
:
(Tensor in, int index_num, int nshards, int shard_id, int ignore_value)
#
output : Tensor
output
:
Tensor
#
infer_meta :
infer_meta
:
#
func : ShardIndexInferMeta
func
:
ShardIndexInferMeta
#
kernel :
kernel
:
#
func : shard_index
func
:
shard_index
# # sigmoid_cross_entropy_with_logits
# sigmoid_cross_entropy_with_logits
# - api : sigmoid_cross_entropy_with_logits
-
api
:
sigmoid_cross_entropy_with_logits
# args : (Tensor x, Tensor label, bool normalize, int ignore_index)
args
:
(Tensor x, Tensor label, bool normalize, int ignore_index)
# output : Tensor
output
:
Tensor
# infer_meta :
infer_meta
:
# func : SigmoidCrossEntropyWithoLgitsInferMeta
func
:
SigmoidCrossEntropyWithLogitsInferMeta
# kernel :
kernel
:
# func : sigmoid_cross_entropy_with_logits
func
:
sigmoid_cross_entropy_with_logits
backward
:
sigmoid_cross_entropy_with_logits_grad
#
#
size
# size
#
- api : size
-
api
:
size
#
args : (Tensor x)
args
:
(Tensor x)
#
output : Tensor
output
:
Tensor
#
infer_meta :
infer_meta
:
#
func : SizeInferMeta
func
:
SizeInferMeta
#
kernel :
kernel
:
#
func : size
func
:
size
#
#
tile
# tile
#
- api : tile
-
api
:
tile
#
args : (Tensor x, ScalarArray repeat_times)
args
:
(Tensor x, ScalarArray repeat_times)
#
output : Tensor
output
:
Tensor
#
infer_meta :
infer_meta
:
#
func : TileInferMeta
func
:
TileInferMeta
#
kernel :
kernel
:
#
func : tile
func
:
tile
#
backward : tile_grad
backward
:
tile_grad
# # top_k
# # top_k
# - api : top_k
# - api : top_k
...
@@ -988,15 +1005,15 @@
...
@@ -988,15 +1005,15 @@
# func : top_k
# func : top_k
# backward : top_k_grad
# backward : top_k_grad
#
#
trace
# trace
#
- api : trace
-
api
:
trace
#
args : (Tensor x, int offset, int axis1, int axis2)
args
:
(Tensor x, int offset, int axis1, int axis2)
#
output : Tensor
output
:
Tensor
#
infer_meta :
infer_meta
:
#
func : TraceInferMeta
func
:
TraceInferMeta
#
kernel :
kernel
:
#
func : trace
func
:
trace
#
backward : trace_grad
backward
:
trace_grad
# # phi_transfer_layout | not have python api
# # phi_transfer_layout | not have python api
...
@@ -1018,18 +1035,19 @@
...
@@ -1018,18 +1035,19 @@
# kernel :
# kernel :
# func : unbind
# func : unbind
#
#
unfold
# unfold
#
- api : unfold
-
api
:
unfold
#
args : (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
args
:
(Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
#
output : Tensor
output
:
Tensor
#
infer_meta :
infer_meta
:
#
func : UnfoldInferMeta
func
:
UnfoldInferMeta
#
kernel :
kernel
:
#
func : unfold
func
:
unfold
#
backward : unfold_grad
backward
:
unfold_grad
# # uniform_random_raw selected rows ??
# # uniform_random_raw selected rows ??
# # viterbi_decode
# viterbi_decode
# - api : viterbi_decode
# - api : viterbi_decode
# args : (Tensor input, Tensor transition, Tensor length, bool include_bos_eos_tag)
# args : (Tensor input, Tensor transition, Tensor length, bool include_bos_eos_tag)
# output : Tensor(scores), Tensor(path)
# output : Tensor(scores), Tensor(path)
...
@@ -1038,14 +1056,14 @@
...
@@ -1038,14 +1056,14 @@
# kernel :
# kernel :
# func : viterbi_decode
# func : viterbi_decode
#
#
where_index
# where_index
#
- api : where_index
-
api
:
where_index
#
args : (Tensor condition)
args
:
(Tensor condition)
#
output : Tensor
output
:
Tensor
#
infer_meta :
infer_meta
:
#
func : WhereIndexInferMeta
func
:
WhereIndexInferMeta
#
kernel :
kernel
:
#
func : where_index
func
:
where_index
# # yolo_box
# # yolo_box
...
...
python/paddle/utils/code_gen/backward.yaml
浏览文件 @
9cd5cd4e
...
@@ -112,55 +112,56 @@
...
@@ -112,55 +112,56 @@
# func : maxout_grad
# func : maxout_grad
# - backward_api : put_along_axis_grad
# output is optional 如何处理
# forward : put_along_axis (Tensor x, Tensor index, Tensor value, int axis, string reduce) -> Tensor(out)
-
backward_api
:
put_along_axis_grad
# args : (Tensor x, Tensor index, Tensor out_grad, int axis, string reduce)
forward
:
put_along_axis (Tensor x, Tensor index, Tensor value, int axis, str reduce) -> Tensor(out)
# output : Tensor(x_grad), Tensor(value_grad)
args
:
(Tensor x, Tensor index, Tensor out_grad, int axis, str reduce)
# infer_meta :
output
:
Tensor(x_grad), Tensor(value_grad)
# func : GeneralBinaryGradInferMeta
infer_meta
:
# param : [x, valule]
func
:
GeneralBinaryGradInferMeta
# kernel :
param
:
[
x
,
index
]
# func : put_along_axis_grad
kernel
:
func
:
put_along_axis_grad
#
- backward_api : take_along_axis_grad
-
backward_api
:
take_along_axis_grad
#
forward : take_along_axis (Tensor x, Tensor index, int axis) -> Tensor(out)
forward
:
take_along_axis (Tensor x, Tensor index, int axis) -> Tensor(out)
#
args : (Tensor x, Tensor index, Tensor out_grad, int axis)
args
:
(Tensor x, Tensor index, Tensor out_grad, int axis)
#
output : Tensor(x_grad)
output
:
Tensor(x_grad)
#
infer_meta :
infer_meta
:
#
func : UnchangedInferMeta
func
:
UnchangedInferMeta
#
param : [x]
param
:
[
x
]
#
kernel :
kernel
:
#
func : take_along_axis_grad
func
:
take_along_axis_grad
# - backward_api : max
trix_power_grad
-
backward_api
:
ma
trix_power_grad
# forward : max
trix_power (Tensor x, int n) -> Tensor(out)
forward
:
ma
trix_power (Tensor x, int n) -> Tensor(out)
#
args : (Tensor x, Tensor out, Tensor out_grad, int n)
args
:
(Tensor x, Tensor out, Tensor out_grad, int n)
#
output : Tensor(x_grad)
output
:
Tensor(x_grad)
#
infer_meta :
infer_meta
:
#
func : UnchangedInferMeta
func
:
UnchangedInferMeta
#
param : [x]
param
:
[
x
]
#
kernel :
kernel
:
# func : max
trix_power_grad
func
:
ma
trix_power_grad
#
- backward_api : eigh_grad
-
backward_api
:
eigh_grad
# forward : eigh (Tensor x, string
uplo) -> Tensor(out_w), Tensor(out_v)
forward
:
eigh (Tensor x, str
uplo) -> Tensor(out_w), Tensor(out_v)
#
args : (Tensor out_w, Tensor out_v, Tensor out_w_grad, Tensor out_v_grad)
args
:
(Tensor out_w, Tensor out_v, Tensor out_w_grad, Tensor out_v_grad)
#
output : Tensor(x_grad)
output
:
Tensor(x_grad)
#
infer_meta :
infer_meta
:
#
func : UnchangedInferMeta
func
:
UnchangedInferMeta
#
param : [out_v]
param
:
[
out_v
]
#
kernel :
kernel
:
#
func : eigh_grad
func
:
eigh_grad
#
- backward_api : segment_pool_grad
-
backward_api
:
segment_pool_grad
# forward : segment_pool (Tensor x, Tensor segment_ids, string
pooltype) -> Tensor(out), Tensor(summed_ids)
forward
:
segment_pool (Tensor x, Tensor segment_ids, str
pooltype) -> Tensor(out), Tensor(summed_ids)
# args : (Tensor x, Tensor segment_ids, Tensor out, Tensor summed_ids, Tenosr out_grad, string
pooltype)
args
:
(Tensor x, Tensor segment_ids, Tensor out, Tensor summed_ids, Tensor out_grad, str
pooltype)
#
output : Tensor(x_grad)
output
:
Tensor(x_grad)
#
infer_meta :
infer_meta
:
#
func : UnchangedInferMeta
func
:
UnchangedInferMeta
#
param : [x]
param
:
[
x
]
#
kernel :
kernel
:
#
func : segment_pool_grad
func
:
segment_pool_grad
-
backward_api
:
cos_grad
-
backward_api
:
cos_grad
forward
:
cos (Tensor x) -> Tensor(out)
forward
:
cos (Tensor x) -> Tensor(out)
...
@@ -282,19 +283,19 @@
...
@@ -282,19 +283,19 @@
kernel
:
kernel
:
func
:
relu_grad
func
:
relu_grad
#
- backward_api : argsort_grad
-
backward_api
:
argsort_grad
# forward : argsort (Tensor x, int axis, bool descending) -> Tensor(out), t
ensor(indices)
forward
:
argsort (Tensor x, int axis, bool descending) -> Tensor(out), T
ensor(indices)
#
args : (Tensor indices, Tensor x, Tensor out_grad, int axis, bool descending)
args
:
(Tensor indices, Tensor x, Tensor out_grad, int axis, bool descending)
#
output : Tensor(x_grad)
output
:
Tensor(x_grad)
#
infer_meta :
infer_meta
:
#
func : UnchangedInferMeta
func
:
UnchangedInferMeta
#
param : [x]
param
:
[
x
]
#
kernel :
kernel
:
#
func : argsort_grad
func
:
argsort_grad
# - backward_api : batch_norm_grad
# - backward_api : batch_norm_grad
# forward : batch_norm (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str
ing
data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
# forward : batch_norm (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
# args : (Tensor indices, Tensor x, Tensor out_grad, int axis, bool descending)
# args : (Tensor indices, Tensor x, Tensor out_grad, int axis, bool descending)
# output : Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad)
# output : Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad)
# infer_meta :
# infer_meta :
...
@@ -312,6 +313,7 @@
...
@@ -312,6 +313,7 @@
# param : [x, y, weight, bias]
# param : [x, y, weight, bias]
# kernel :
# kernel :
# func : bilinear_tensor_product_grad
# func : bilinear_tensor_product_grad
# optional : bias
# - backward_api : broadcast_tensor_grad
# - backward_api : broadcast_tensor_grad
# forward : broadcast_tensors (Tensor[] x) -> Tensor [] (out)
# forward : broadcast_tensors (Tensor[] x) -> Tensor [] (out)
...
@@ -344,8 +346,8 @@
...
@@ -344,8 +346,8 @@
func
:
cholesky_solve_grad
func
:
cholesky_solve_grad
# - backward_api : dropout_grad
# - backward_api : dropout_grad
# forward : dropout (Tensor x, Tensor seed_tensor, float p, bool is_test, str
ing
mode, int seed, bool fix_seed) -> Tensor(out), Tensor(mask)
# forward : dropout (Tensor x, Tensor seed_tensor, float p, bool is_test, str mode, int seed, bool fix_seed) -> Tensor(out), Tensor(mask)
# args : (Tensor mask, Tensor out_grad, float p, bool is_test, str
ing
mode)
# args : (Tensor mask, Tensor out_grad, float p, bool is_test, str mode)
# output : Tensor(x_grad)
# output : Tensor(x_grad)
# infer_meta :
# infer_meta :
# func : UnchangedInferMeta
# func : UnchangedInferMeta
...
@@ -354,25 +356,25 @@
...
@@ -354,25 +356,25 @@
# func : dropout_grad
# func : dropout_grad
#
- backward_api : erf_grad
-
backward_api
:
erf_grad
#
forward : erf (Tensor x) -> Tensor(out)
forward
:
erf (Tensor x) -> Tensor(out)
#
args : (Tensor x, Tensor out, Tensor out_grad)
args
:
(Tensor x, Tensor out, Tensor out_grad)
#
output : Tensor(x_grad)
output
:
Tensor(x_grad)
#
infer_meta :
infer_meta
:
#
func : UnchangedInferMeta
func
:
UnchangedInferMeta
#
param : [x]
param
:
[
x
]
#
kernel :
kernel
:
#
func : erf_grad
func
:
erf_grad
#
- backward_api : erfinv_grad
-
backward_api
:
erfinv_grad
#
forward : erf (Tensor x) -> Tensor(out)
forward
:
erf (Tensor x) -> Tensor(out)
#
args : (Tensor out, Tensor out_grad)
args
:
(Tensor out, Tensor out_grad)
#
output : Tensor(x_grad)
output
:
Tensor(x_grad)
#
infer_meta :
infer_meta
:
# func : ErfinvGra
dInferMeta
func
:
Unchange
dInferMeta
#
param : [out]
param
:
[
out
]
#
kernel :
kernel
:
#
func : erfinv_grad
func
:
erfinv_grad
# - backward_api : expand_as_grad
# - backward_api : expand_as_grad
# forward : expand_as (Tensor x, Tensor y, int[] target_shape) -> Tensor(out)
# forward : expand_as (Tensor x, Tensor y, int[] target_shape) -> Tensor(out)
...
@@ -395,11 +397,11 @@
...
@@ -395,11 +397,11 @@
# func : expand_grad
# func : expand_grad
# - backward_api : graph_send_recv_grad
# - backward_api : graph_send_recv_grad
# forward : graph_send_recv (Tensor x, Tensor src_index, Tensor dst_index, str
ing
pool_type) -> Tensor(out), Tensor(dst_count)
# forward : graph_send_recv (Tensor x, Tensor src_index, Tensor dst_index, str pool_type) -> Tensor(out), Tensor(dst_count)
# args : (Tensor out_grad, Tensor x, Tensor out, Tensor src_index, Tensor dst_index, Tensor dst_count, str
ing
pool_type)
# args : (Tensor out_grad, Tensor x, Tensor out, Tensor src_index, Tensor dst_index, Tensor dst_count, str pool_type)
# output : Tensor(x_grad)
# output : Tensor(x_grad)
# infer_meta :
# infer_meta :
# func : Unchanged
Grad
InferMeta
# func : UnchangedInferMeta
# param : [x]
# param : [x]
# kernel :
# kernel :
# func : graph_send_recv_grad
# func : graph_send_recv_grad
...
@@ -407,22 +409,22 @@
...
@@ -407,22 +409,22 @@
# - backward_api : label_smooth_grad
# - backward_api : label_smooth_grad
# forward : label_smooth (Tensor label, Tensor prior_dist, float epsilon) -> Tensor(out)
# forward : label_smooth (Tensor label, Tensor prior_dist, float epsilon) -> Tensor(out)
# args : (Tensor out_grad, float epsilon)
# args : (Tensor out_grad, float epsilon)
# output : Tensor(
x
_grad)
# output : Tensor(
label
_grad)
# infer_meta :
# infer_meta :
# func :
XXXX
InferMeta
# func :
Unchanged
InferMeta
# param : [
x
]
# param : [
out_grad
]
# kernel :
# kernel :
# func : label_smooth_grad
# func : label_smooth_grad
#
- backward_api : log_loss_grad
-
backward_api
:
log_loss_grad
#
forward : log_loss (Tensor input, Tensor label, float epsilon) -> Tensor(out)
forward
:
log_loss (Tensor input, Tensor label, float epsilon) -> Tensor(out)
#
args : (Tensor input, Tensor label, Tensor out_grad, float epsilon)
args
:
(Tensor input, Tensor label, Tensor out_grad, float epsilon)
#
output : Tensor(input_grad)
output
:
Tensor(input_grad)
#
infer_meta :
infer_meta
:
#
func : UnchangedInferMeta
func
:
UnchangedInferMeta
# param : [x
]
param
:
[
input
]
#
kernel :
kernel
:
#
func : log_loss_grad
func
:
log_loss_grad
# - backward_api : masked_selecte_grad
# - backward_api : masked_selecte_grad
# forward : masked_select (Tensor x, Tensor mask) -> Tensor(out)
# forward : masked_select (Tensor x, Tensor mask) -> Tensor(out)
...
@@ -445,8 +447,8 @@
...
@@ -445,8 +447,8 @@
# func : multi_dot_grad
# func : multi_dot_grad
# - backward_api : nll_loss_grad
# - backward_api : nll_loss_grad
# forward : nll_loss (Tensor
input, Tensor label, Tensor weight, int64_t ignore_index, string
reduction) -> Tensor(out), Tensor(total_weight)
# forward : nll_loss (Tensor
x, Tensor label, Tensor weight, int64 ignore_index, str
reduction) -> Tensor(out), Tensor(total_weight)
# args : (Tensor x, Tensor label, Tensor total_weight, Tensor weight, Tensor out_grad, int64
_t ignore_index, string
reduction)
# args : (Tensor x, Tensor label, Tensor total_weight, Tensor weight, Tensor out_grad, int64
ignore_index, str
reduction)
# output : Tensor[] (x_grad)
# output : Tensor[] (x_grad)
# infer_meta :
# infer_meta :
# func : UnchangedInferMeta
# func : UnchangedInferMeta
...
@@ -465,8 +467,8 @@
...
@@ -465,8 +467,8 @@
# func : pad_grad
# func : pad_grad
# - backward_api : pixel_shuffle_grad
# - backward_api : pixel_shuffle_grad
# forward : pixel_shuffle (Tensor x, int upscale_factor, str
ing
data_format) -> Tensor(out)
# forward : pixel_shuffle (Tensor x, int upscale_factor, str data_format) -> Tensor(out)
# args : (Tensor out_grad, int upscale_factor, str
ing
data_format)
# args : (Tensor out_grad, int upscale_factor, str data_format)
# output : Tensor(x_grad)
# output : Tensor(x_grad)
# infer_meta :
# infer_meta :
# func : XXXXXInferMeta
# func : XXXXXInferMeta
...
@@ -493,36 +495,37 @@
...
@@ -493,36 +495,37 @@
# param : [x]
# param : [x]
# kernel :
# kernel :
# func : psroi_pool_grad
# func : psroi_pool_grad
# optional : rois_num
#
- backward_api : selu_grad
-
backward_api
:
selu_grad
#
forward : selu (Tensor x, float scale, float alpha) -> Tensor(out)
forward
:
selu (Tensor x, float scale, float alpha) -> Tensor(out)
#
args : (Tensor out, Tensor out_grad, float scale, float alpha)
args
:
(Tensor out, Tensor out_grad, float scale, float alpha)
#
output : Tensor(x_grad)
output
:
Tensor(x_grad)
#
infer_meta :
infer_meta
:
# func : XXXXX
InferMeta
func
:
Unchanged
InferMeta
# param : [x
]
param
:
[
out
]
#
kernel :
kernel
:
#
func : selu_grad
func
:
selu_grad
#
- backward_api : sigmoid_cross_entropy_with_logits_grad
-
backward_api
:
sigmoid_cross_entropy_with_logits_grad
#
forward : sigmoid_cross_entropy_with_logits (Tensor x, Tensor label, bool normalize, int ignore_index) -> Tensor(out)
forward
:
sigmoid_cross_entropy_with_logits (Tensor x, Tensor label, bool normalize, int ignore_index) -> Tensor(out)
# args : (Tensor x, Tensor label, Tensor out_grad, bool normalize, int ing
ore_index)
args
:
(Tensor x, Tensor label, Tensor out_grad, bool normalize, int ign
ore_index)
#
output : Tensor(x_grad)
output
:
Tensor(x_grad)
#
infer_meta :
infer_meta
:
#
func : UnchangedInferMeta
func
:
UnchangedInferMeta
#
param : [x]
param
:
[
x
]
#
kernel :
kernel
:
#
func : sigmoid_cross_entropy_with_logits_grad
func
:
sigmoid_cross_entropy_with_logits_grad
#
- backward_api : tile_grad
-
backward_api
:
tile_grad
#
forward : tile (Tensor x, ScalarArray repeat_times) -> Tensor(out)
forward
:
tile (Tensor x, ScalarArray repeat_times) -> Tensor(out)
#
args : (Tensor x, Tensor out_grad, ScalarArray repeat_times)
args
:
(Tensor x, Tensor out_grad, ScalarArray repeat_times)
#
output : Tensor(x_grad)
output
:
Tensor(x_grad)
#
infer_meta :
infer_meta
:
#
func : UnchangedInferMeta
func
:
UnchangedInferMeta
#
param : [x]
param
:
[
x
]
#
kernel :
kernel
:
#
func : tile_grad
func
:
tile_grad
# # forward backward type not match
# # forward backward type not match
# - backward_api : top_k_grad
# - backward_api : top_k_grad
...
@@ -536,25 +539,25 @@
...
@@ -536,25 +539,25 @@
# func : top_k_grad
# func : top_k_grad
#
- backward_api : trace_grad
-
backward_api
:
trace_grad
#
forward : trace (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
forward
:
trace (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
# args : (Tensor out_grad, Tensor x
, int offset, int axis1, int axis2)
args
:
(Tensor x, Tensor out_grad
, int offset, int axis1, int axis2)
#
output : Tensor(x_grad)
output
:
Tensor(x_grad)
#
infer_meta :
infer_meta
:
#
func : UnchangedInferMeta
func
:
UnchangedInferMeta
#
param : [x]
param
:
[
x
]
#
kernel :
kernel
:
#
func : trace_grad
func
:
trace_grad
#
- backward_api : unfold_grad
-
backward_api
:
unfold_grad
#
forward : unfold (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out)
forward
:
unfold (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out)
#
args : (Tensor x, Tensor out_grad, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
args
:
(Tensor x, Tensor out_grad, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
#
output : Tensor(x_grad)
output
:
Tensor(x_grad)
#
infer_meta :
infer_meta
:
#
func : UnchangedInferMeta
func
:
UnchangedInferMeta
#
param : [x]
param
:
[
x
]
#
kernel :
kernel
:
#
func : unfold_grad
func
:
unfold_grad
# - backward_api : where_index_grad
# - backward_api : where_index_grad
# forward : where_index (Tensor condition) -> Tensor(out)
# forward : where_index (Tensor condition) -> Tensor(out)
...
@@ -564,4 +567,4 @@
...
@@ -564,4 +567,4 @@
# func : UnchangedInferMeta
# func : UnchangedInferMeta
# param : [x]
# param : [x]
# kernel :
# kernel :
# func :
trace
_grad
# func :
where_index
_grad
python/paddle/utils/code_gen/wrapped_infermeta_gen.py
浏览文件 @
9cd5cd4e
...
@@ -43,9 +43,12 @@ PD_REGISTER_INFER_META_FN({api.kernel['func'][0]}, phi::{api.infer_meta['func']}
...
@@ -43,9 +43,12 @@ PD_REGISTER_INFER_META_FN({api.kernel['func'][0]}, phi::{api.infer_meta['func']}
'const std::vector<Tensor>&'
:
'const std::vector<MetaTensor>&'
,
'const std::vector<Tensor>&'
:
'const std::vector<MetaTensor>&'
,
'Tensor'
:
'MetaTensor*'
,
'Tensor'
:
'MetaTensor*'
,
'std::vector<Tensor>'
:
'std::vector<MetaTensor>*'
,
'std::vector<Tensor>'
:
'std::vector<MetaTensor>*'
,
'const paddle::optional<Tensor&>'
:
'const paddle::optional<MetaTensor&>'
}
}
wrapped_infermeta_name
=
get_wrapped_infermeta_name
(
api
.
api
)
wrapped_infermeta_name
=
get_wrapped_infermeta_name
(
api
.
api
)
print
(
wrapped_infermeta_name
)
args
=
[]
args
=
[]
for
input_name
in
api
.
inputs
[
'names'
]:
for
input_name
in
api
.
inputs
[
'names'
]:
if
input_name
in
kernel_params
:
if
input_name
in
kernel_params
:
...
@@ -132,6 +135,7 @@ def generate_wrapped_infermeta_and_register(api_yaml_path, header_file_path,
...
@@ -132,6 +135,7 @@ def generate_wrapped_infermeta_and_register(api_yaml_path, header_file_path,
for
api
in
apis
:
for
api
in
apis
:
api_item
=
ForwardAPI
(
api
)
api_item
=
ForwardAPI
(
api
)
#print( str(api_item) )
declare_code
,
defind_code
,
register_code
=
gene_wrapped_infermeta_and_register
(
declare_code
,
defind_code
,
register_code
=
gene_wrapped_infermeta_and_register
(
api_item
)
api_item
)
header_file
.
write
(
declare_code
)
header_file
.
write
(
declare_code
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录