Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
4a338796
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
4a338796
编写于
2月 22, 2022
作者:
C
Chen Weihang
提交者:
GitHub
2月 22, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[PTen->Phi PR2] Rename PT_REGISTER macro to PD_REGISTER (#39790)
* unify register macro * rename declare macro * fix infrt error
上级
73bf9673
变更
131
隐藏空白更改
内联
并排
Showing
131 changed file
with
354 addition
and
352 deletion
+354
-352
cmake/pten.cmake
cmake/pten.cmake
+11
-11
paddle/fluid/framework/infershape_utils_test.cc
paddle/fluid/framework/infershape_utils_test.cc
+1
-1
paddle/phi/api/ext/op_kernel_info.h
paddle/phi/api/ext/op_kernel_info.h
+9
-8
paddle/phi/api/lib/api_declare.h
paddle/phi/api/lib/api_declare.h
+3
-3
paddle/phi/api/lib/api_registry.h
paddle/phi/api/lib/api_registry.h
+2
-2
paddle/phi/api/lib/manual_api.cc
paddle/phi/api/lib/manual_api.cc
+5
-5
paddle/phi/api/lib/op_kernel_info.cc
paddle/phi/api/lib/op_kernel_info.cc
+1
-1
paddle/phi/api/lib/sparse_api.cc
paddle/phi/api/lib/sparse_api.cc
+13
-13
paddle/phi/common/backend.h
paddle/phi/common/backend.h
+7
-7
paddle/phi/core/compat/op_utils.h
paddle/phi/core/compat/op_utils.h
+12
-12
paddle/phi/core/infermeta_utils.h
paddle/phi/core/infermeta_utils.h
+3
-3
paddle/phi/core/kernel_registry.h
paddle/phi/core/kernel_registry.h
+34
-33
paddle/phi/infermeta/unary.cc
paddle/phi/infermeta/unary.cc
+2
-2
paddle/phi/kernels/cpu/abs_grad_kernel.cc
paddle/phi/kernels/cpu/abs_grad_kernel.cc
+2
-2
paddle/phi/kernels/cpu/abs_kernel.cc
paddle/phi/kernels/cpu/abs_kernel.cc
+1
-1
paddle/phi/kernels/cpu/bernoulli_kernel.cc
paddle/phi/kernels/cpu/bernoulli_kernel.cc
+1
-1
paddle/phi/kernels/cpu/cast_kernel.cc
paddle/phi/kernels/cpu/cast_kernel.cc
+1
-1
paddle/phi/kernels/cpu/complex_kernel.cc
paddle/phi/kernels/cpu/complex_kernel.cc
+1
-1
paddle/phi/kernels/cpu/concat_kernel.cc
paddle/phi/kernels/cpu/concat_kernel.cc
+1
-1
paddle/phi/kernels/cpu/copy_kernel.cc
paddle/phi/kernels/cpu/copy_kernel.cc
+1
-1
paddle/phi/kernels/cpu/diagonal_grad_kernel.cc
paddle/phi/kernels/cpu/diagonal_grad_kernel.cc
+1
-1
paddle/phi/kernels/cpu/diagonal_kernel.cc
paddle/phi/kernels/cpu/diagonal_kernel.cc
+1
-1
paddle/phi/kernels/cpu/digamma_grad_kernel.cc
paddle/phi/kernels/cpu/digamma_grad_kernel.cc
+1
-1
paddle/phi/kernels/cpu/digamma_kernel.cc
paddle/phi/kernels/cpu/digamma_kernel.cc
+1
-1
paddle/phi/kernels/cpu/dot_grad_kernel.cc
paddle/phi/kernels/cpu/dot_grad_kernel.cc
+1
-1
paddle/phi/kernels/cpu/dot_kernel.cc
paddle/phi/kernels/cpu/dot_kernel.cc
+1
-1
paddle/phi/kernels/cpu/elementwise_grad_kernel.cc
paddle/phi/kernels/cpu/elementwise_grad_kernel.cc
+5
-5
paddle/phi/kernels/cpu/expand_grad_kernel.cc
paddle/phi/kernels/cpu/expand_grad_kernel.cc
+1
-1
paddle/phi/kernels/cpu/expand_kernel.cc
paddle/phi/kernels/cpu/expand_kernel.cc
+1
-1
paddle/phi/kernels/cpu/full_kernel.cc
paddle/phi/kernels/cpu/full_kernel.cc
+2
-2
paddle/phi/kernels/cpu/histogram_kernel.cc
paddle/phi/kernels/cpu/histogram_kernel.cc
+1
-1
paddle/phi/kernels/cpu/huber_loss_grad_kernel.cc
paddle/phi/kernels/cpu/huber_loss_grad_kernel.cc
+1
-1
paddle/phi/kernels/cpu/huber_loss_kernel.cc
paddle/phi/kernels/cpu/huber_loss_kernel.cc
+1
-1
paddle/phi/kernels/cpu/lerp_grad_kernel.cc
paddle/phi/kernels/cpu/lerp_grad_kernel.cc
+1
-1
paddle/phi/kernels/cpu/lerp_kernel.cc
paddle/phi/kernels/cpu/lerp_kernel.cc
+1
-1
paddle/phi/kernels/cpu/masked_select_grad_kernel.cc
paddle/phi/kernels/cpu/masked_select_grad_kernel.cc
+1
-1
paddle/phi/kernels/cpu/masked_select_kernel.cc
paddle/phi/kernels/cpu/masked_select_kernel.cc
+1
-1
paddle/phi/kernels/cpu/math_kernel.cc
paddle/phi/kernels/cpu/math_kernel.cc
+6
-6
paddle/phi/kernels/cpu/matmul_grad_kernel.cc
paddle/phi/kernels/cpu/matmul_grad_kernel.cc
+3
-3
paddle/phi/kernels/cpu/matmul_kernel.cc
paddle/phi/kernels/cpu/matmul_kernel.cc
+1
-1
paddle/phi/kernels/cpu/norm_grad_kernel.cc
paddle/phi/kernels/cpu/norm_grad_kernel.cc
+1
-1
paddle/phi/kernels/cpu/norm_kernel.cc
paddle/phi/kernels/cpu/norm_kernel.cc
+1
-1
paddle/phi/kernels/cpu/scale_kernel.cc
paddle/phi/kernels/cpu/scale_kernel.cc
+1
-1
paddle/phi/kernels/cpu/sign_kernel.cc
paddle/phi/kernels/cpu/sign_kernel.cc
+1
-1
paddle/phi/kernels/cpu/split_kernel.cc
paddle/phi/kernels/cpu/split_kernel.cc
+1
-1
paddle/phi/kernels/cpu/trace_grad_kernel.cc
paddle/phi/kernels/cpu/trace_grad_kernel.cc
+1
-1
paddle/phi/kernels/cpu/trace_kernel.cc
paddle/phi/kernels/cpu/trace_kernel.cc
+1
-1
paddle/phi/kernels/cpu/trunc_grad_kernel.cc
paddle/phi/kernels/cpu/trunc_grad_kernel.cc
+1
-1
paddle/phi/kernels/cpu/trunc_kernel.cc
paddle/phi/kernels/cpu/trunc_kernel.cc
+1
-1
paddle/phi/kernels/empty_kernel.cc
paddle/phi/kernels/empty_kernel.cc
+4
-4
paddle/phi/kernels/flatten_grad_kernel.cc
paddle/phi/kernels/flatten_grad_kernel.cc
+3
-3
paddle/phi/kernels/flatten_kernel.cc
paddle/phi/kernels/flatten_kernel.cc
+6
-6
paddle/phi/kernels/gpu/abs_grad_kernel.cu
paddle/phi/kernels/gpu/abs_grad_kernel.cu
+2
-2
paddle/phi/kernels/gpu/abs_kernel.cu
paddle/phi/kernels/gpu/abs_kernel.cu
+1
-1
paddle/phi/kernels/gpu/bernoulli_kernel.cu
paddle/phi/kernels/gpu/bernoulli_kernel.cu
+1
-1
paddle/phi/kernels/gpu/cast_kernel.cu
paddle/phi/kernels/gpu/cast_kernel.cu
+1
-1
paddle/phi/kernels/gpu/complex_kernel.cu
paddle/phi/kernels/gpu/complex_kernel.cu
+1
-1
paddle/phi/kernels/gpu/concat_kernel.cu
paddle/phi/kernels/gpu/concat_kernel.cu
+1
-1
paddle/phi/kernels/gpu/copy_kernel.cu
paddle/phi/kernels/gpu/copy_kernel.cu
+1
-1
paddle/phi/kernels/gpu/diagonal_grad_kernel.cu
paddle/phi/kernels/gpu/diagonal_grad_kernel.cu
+1
-1
paddle/phi/kernels/gpu/diagonal_kernel.cu
paddle/phi/kernels/gpu/diagonal_kernel.cu
+1
-1
paddle/phi/kernels/gpu/digamma_grad_kernel.cu
paddle/phi/kernels/gpu/digamma_grad_kernel.cu
+1
-1
paddle/phi/kernels/gpu/digamma_kernel.cu
paddle/phi/kernels/gpu/digamma_kernel.cu
+1
-1
paddle/phi/kernels/gpu/dot_grad_kernel.cu
paddle/phi/kernels/gpu/dot_grad_kernel.cu
+1
-1
paddle/phi/kernels/gpu/dot_kernel.cu
paddle/phi/kernels/gpu/dot_kernel.cu
+1
-1
paddle/phi/kernels/gpu/elementwise_grad_kernel.cu
paddle/phi/kernels/gpu/elementwise_grad_kernel.cu
+5
-5
paddle/phi/kernels/gpu/expand_grad_kernel.cu
paddle/phi/kernels/gpu/expand_grad_kernel.cu
+1
-1
paddle/phi/kernels/gpu/expand_kernel.cu
paddle/phi/kernels/gpu/expand_kernel.cu
+1
-1
paddle/phi/kernels/gpu/full_kernel.cu
paddle/phi/kernels/gpu/full_kernel.cu
+2
-2
paddle/phi/kernels/gpu/histogram_kernel.cu
paddle/phi/kernels/gpu/histogram_kernel.cu
+1
-1
paddle/phi/kernels/gpu/huber_loss_grad_kernel.cu
paddle/phi/kernels/gpu/huber_loss_grad_kernel.cu
+1
-1
paddle/phi/kernels/gpu/huber_loss_kernel.cu
paddle/phi/kernels/gpu/huber_loss_kernel.cu
+1
-1
paddle/phi/kernels/gpu/lerp_grad_kernel.cu
paddle/phi/kernels/gpu/lerp_grad_kernel.cu
+1
-1
paddle/phi/kernels/gpu/lerp_kernel.cu
paddle/phi/kernels/gpu/lerp_kernel.cu
+1
-1
paddle/phi/kernels/gpu/masked_select_grad_kernel.cu
paddle/phi/kernels/gpu/masked_select_grad_kernel.cu
+1
-1
paddle/phi/kernels/gpu/masked_select_kernel.cu
paddle/phi/kernels/gpu/masked_select_kernel.cu
+1
-1
paddle/phi/kernels/gpu/math_kernel.cu
paddle/phi/kernels/gpu/math_kernel.cu
+6
-6
paddle/phi/kernels/gpu/matmul_grad_kernel.cu
paddle/phi/kernels/gpu/matmul_grad_kernel.cu
+3
-3
paddle/phi/kernels/gpu/matmul_kernel.cu
paddle/phi/kernels/gpu/matmul_kernel.cu
+1
-1
paddle/phi/kernels/gpu/norm_grad_kernel.cu
paddle/phi/kernels/gpu/norm_grad_kernel.cu
+1
-1
paddle/phi/kernels/gpu/norm_kernel.cu
paddle/phi/kernels/gpu/norm_kernel.cu
+1
-1
paddle/phi/kernels/gpu/scale_kernel.cu
paddle/phi/kernels/gpu/scale_kernel.cu
+1
-1
paddle/phi/kernels/gpu/sign_kernel.cu.cc
paddle/phi/kernels/gpu/sign_kernel.cu.cc
+1
-1
paddle/phi/kernels/gpu/split_kernel.cu
paddle/phi/kernels/gpu/split_kernel.cu
+1
-1
paddle/phi/kernels/gpu/trace_grad_kernel.cu
paddle/phi/kernels/gpu/trace_grad_kernel.cu
+1
-1
paddle/phi/kernels/gpu/trace_kernel.cu
paddle/phi/kernels/gpu/trace_kernel.cu
+1
-1
paddle/phi/kernels/gpu/trunc_grad_kernel.cu
paddle/phi/kernels/gpu/trunc_grad_kernel.cu
+1
-1
paddle/phi/kernels/gpu/trunc_kernel.cu
paddle/phi/kernels/gpu/trunc_kernel.cu
+1
-1
paddle/phi/kernels/math_kernel.cc
paddle/phi/kernels/math_kernel.cc
+12
-12
paddle/phi/kernels/reshape_grad_kernel.cc
paddle/phi/kernels/reshape_grad_kernel.cc
+6
-6
paddle/phi/kernels/reshape_kernel.cc
paddle/phi/kernels/reshape_kernel.cc
+6
-6
paddle/phi/kernels/selected_rows/full_kernel.cc
paddle/phi/kernels/selected_rows/full_kernel.cc
+2
-2
paddle/phi/kernels/selected_rows/scale_kernel.cc
paddle/phi/kernels/selected_rows/scale_kernel.cc
+2
-2
paddle/phi/kernels/sparse/cpu/sparse_utils_kernel.cc
paddle/phi/kernels/sparse/cpu/sparse_utils_kernel.cc
+6
-6
paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu
paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu
+6
-6
paddle/phi/kernels/transfer_layout_kernel.cc
paddle/phi/kernels/transfer_layout_kernel.cc
+1
-1
paddle/phi/kernels/xpu/cast_kernel.cc
paddle/phi/kernels/xpu/cast_kernel.cc
+1
-1
paddle/phi/kernels/xpu/copy_kernel.cc
paddle/phi/kernels/xpu/copy_kernel.cc
+1
-1
paddle/phi/kernels/xpu/full_kernel.cc
paddle/phi/kernels/xpu/full_kernel.cc
+2
-2
paddle/phi/kernels/xpu/scale_kernel.cc
paddle/phi/kernels/xpu/scale_kernel.cc
+1
-1
paddle/phi/ops/compat/abs_sig.cc
paddle/phi/ops/compat/abs_sig.cc
+3
-3
paddle/phi/ops/compat/cast_sig.cc
paddle/phi/ops/compat/cast_sig.cc
+1
-1
paddle/phi/ops/compat/concat_sig.cc
paddle/phi/ops/compat/concat_sig.cc
+1
-1
paddle/phi/ops/compat/diagonal_sig.cc
paddle/phi/ops/compat/diagonal_sig.cc
+1
-1
paddle/phi/ops/compat/digamma_sig.cc
paddle/phi/ops/compat/digamma_sig.cc
+1
-1
paddle/phi/ops/compat/dot_sig.cc
paddle/phi/ops/compat/dot_sig.cc
+1
-1
paddle/phi/ops/compat/elementwise_sig.cc
paddle/phi/ops/compat/elementwise_sig.cc
+17
-17
paddle/phi/ops/compat/empty_sig.cc
paddle/phi/ops/compat/empty_sig.cc
+1
-1
paddle/phi/ops/compat/expand_sig.cc
paddle/phi/ops/compat/expand_sig.cc
+4
-4
paddle/phi/ops/compat/fill_any_like_sig.cc
paddle/phi/ops/compat/fill_any_like_sig.cc
+2
-2
paddle/phi/ops/compat/fill_constant_sig.cc
paddle/phi/ops/compat/fill_constant_sig.cc
+2
-2
paddle/phi/ops/compat/flatten_sig.cc
paddle/phi/ops/compat/flatten_sig.cc
+4
-4
paddle/phi/ops/compat/histogram_sig.cc
paddle/phi/ops/compat/histogram_sig.cc
+1
-1
paddle/phi/ops/compat/huber_loss_sig.cc
paddle/phi/ops/compat/huber_loss_sig.cc
+2
-2
paddle/phi/ops/compat/lerp_sig.cc
paddle/phi/ops/compat/lerp_sig.cc
+2
-2
paddle/phi/ops/compat/masked_select_sig.cc
paddle/phi/ops/compat/masked_select_sig.cc
+2
-2
paddle/phi/ops/compat/matmul_sig.cc
paddle/phi/ops/compat/matmul_sig.cc
+7
-7
paddle/phi/ops/compat/norm_sig.cc
paddle/phi/ops/compat/norm_sig.cc
+2
-2
paddle/phi/ops/compat/reduce_sig.cc
paddle/phi/ops/compat/reduce_sig.cc
+4
-4
paddle/phi/ops/compat/reshape_sig.cc
paddle/phi/ops/compat/reshape_sig.cc
+6
-6
paddle/phi/ops/compat/scale_sig.cc
paddle/phi/ops/compat/scale_sig.cc
+1
-1
paddle/phi/ops/compat/split_sig.cc
paddle/phi/ops/compat/split_sig.cc
+1
-1
paddle/phi/ops/compat/trace_sig.cc
paddle/phi/ops/compat/trace_sig.cc
+2
-2
paddle/phi/ops/compat/trunc_sig.cc
paddle/phi/ops/compat/trunc_sig.cc
+2
-2
paddle/phi/tests/core/test_custom_kernel.cc
paddle/phi/tests/core/test_custom_kernel.cc
+12
-12
paddle/phi/tests/core/test_kernel_factory.cc
paddle/phi/tests/core/test_kernel_factory.cc
+2
-2
paddle/phi/tests/kernels/test_flatten_dev_api.cc
paddle/phi/tests/kernels/test_flatten_dev_api.cc
+3
-3
python/paddle/fluid/tests/custom_kernel/custom_kernel_dot.cc
python/paddle/fluid/tests/custom_kernel/custom_kernel_dot.cc
+1
-1
python/paddle/utils/code_gen/api_gen.py
python/paddle/utils/code_gen/api_gen.py
+1
-1
python/paddle/utils/code_gen/wrapped_infermeta_gen.py
python/paddle/utils/code_gen/wrapped_infermeta_gen.py
+2
-2
tools/infrt/get_pten_kernel_function.sh
tools/infrt/get_pten_kernel_function.sh
+3
-3
未找到文件。
cmake/pten.cmake
浏览文件 @
4a338796
...
...
@@ -58,26 +58,26 @@ endfunction()
function
(
kernel_declare TARGET_LIST
)
foreach
(
kernel_path
${
TARGET_LIST
}
)
file
(
READ
${
kernel_path
}
kernel_impl
)
# TODO(chenweihang): rename P
T_REGISTER_KERNEL to PT
_REGISTER_KERNEL
# TODO(chenweihang): rename P
D_REGISTER_KERNEL to PD
_REGISTER_KERNEL
# NOTE(chenweihang): now we don't recommend to use digit in kernel name
string
(
REGEX MATCH
"(P
T_REGISTER_KERNEL|PT
_REGISTER_GENERAL_KERNEL)
\\
([
\t\r\n
]*[a-z0-9_]*,"
first_registry
"
${
kernel_impl
}
"
)
string
(
REGEX MATCH
"(P
D_REGISTER_KERNEL|PD
_REGISTER_GENERAL_KERNEL)
\\
([
\t\r\n
]*[a-z0-9_]*,"
first_registry
"
${
kernel_impl
}
"
)
if
(
NOT first_registry STREQUAL
""
)
# parse the first kernel name
string
(
REPLACE
"P
T
_REGISTER_KERNEL("
""
kernel_name
"
${
first_registry
}
"
)
string
(
REPLACE
"P
T
_REGISTER_GENERAL_KERNEL("
""
kernel_name
"
${
kernel_name
}
"
)
string
(
REPLACE
"P
D
_REGISTER_KERNEL("
""
kernel_name
"
${
first_registry
}
"
)
string
(
REPLACE
"P
D
_REGISTER_GENERAL_KERNEL("
""
kernel_name
"
${
kernel_name
}
"
)
string
(
REPLACE
","
""
kernel_name
"
${
kernel_name
}
"
)
string
(
REGEX REPLACE
"[
\t\r\n
]+"
""
kernel_name
"
${
kernel_name
}
"
)
# append kernel declare into declarations.h
# TODO(chenweihang): default declare ALL_LAYOUT for each kernel
if
(
${
kernel_path
}
MATCHES
"./cpu\/"
)
file
(
APPEND
${
kernel_declare_file
}
"P
T
_DECLARE_KERNEL(
${
kernel_name
}
, CPU, ALL_LAYOUT);
\n
"
)
file
(
APPEND
${
kernel_declare_file
}
"P
D
_DECLARE_KERNEL(
${
kernel_name
}
, CPU, ALL_LAYOUT);
\n
"
)
elseif
(
${
kernel_path
}
MATCHES
"./gpu\/"
)
file
(
APPEND
${
kernel_declare_file
}
"P
T
_DECLARE_KERNEL(
${
kernel_name
}
, GPU, ALL_LAYOUT);
\n
"
)
file
(
APPEND
${
kernel_declare_file
}
"P
D
_DECLARE_KERNEL(
${
kernel_name
}
, GPU, ALL_LAYOUT);
\n
"
)
elseif
(
${
kernel_path
}
MATCHES
"./xpu\/"
)
file
(
APPEND
${
kernel_declare_file
}
"P
T
_DECLARE_KERNEL(
${
kernel_name
}
, XPU, ALL_LAYOUT);
\n
"
)
file
(
APPEND
${
kernel_declare_file
}
"P
D
_DECLARE_KERNEL(
${
kernel_name
}
, XPU, ALL_LAYOUT);
\n
"
)
else
()
# deal with device independent kernel, now we use CPU temporaary
file
(
APPEND
${
kernel_declare_file
}
"P
T
_DECLARE_KERNEL(
${
kernel_name
}
, CPU, ALL_LAYOUT);
\n
"
)
file
(
APPEND
${
kernel_declare_file
}
"P
D
_DECLARE_KERNEL(
${
kernel_name
}
, CPU, ALL_LAYOUT);
\n
"
)
endif
()
endif
()
endforeach
()
...
...
@@ -285,9 +285,9 @@ endfunction()
function
(
append_op_util_declare TARGET
)
file
(
READ
${
CMAKE_CURRENT_SOURCE_DIR
}
/
${
TARGET
}
target_content
)
string
(
REGEX MATCH
"(P
T_REGISTER_BASE_KERNEL_NAME|PT
_REGISTER_ARG_MAPPING_FN)
\\
([
\t\r\n
]*[a-z0-9_]*"
util_registrar
"
${
target_content
}
"
)
string
(
REPLACE
"P
T_REGISTER_ARG_MAPPING_FN"
"PT
_DECLARE_ARG_MAPPING_FN"
util_declare
"
${
util_registrar
}
"
)
string
(
REPLACE
"P
T_REGISTER_BASE_KERNEL_NAME"
"PT
_DECLARE_BASE_KERNEL_NAME"
util_declare
"
${
util_declare
}
"
)
string
(
REGEX MATCH
"(P
D_REGISTER_BASE_KERNEL_NAME|PD
_REGISTER_ARG_MAPPING_FN)
\\
([
\t\r\n
]*[a-z0-9_]*"
util_registrar
"
${
target_content
}
"
)
string
(
REPLACE
"P
D_REGISTER_ARG_MAPPING_FN"
"PD
_DECLARE_ARG_MAPPING_FN"
util_declare
"
${
util_registrar
}
"
)
string
(
REPLACE
"P
D_REGISTER_BASE_KERNEL_NAME"
"PD
_DECLARE_BASE_KERNEL_NAME"
util_declare
"
${
util_declare
}
"
)
string
(
APPEND util_declare
");
\n
"
)
file
(
APPEND
${
op_utils_header
}
"
${
util_declare
}
"
)
endfunction
()
...
...
paddle/fluid/framework/infershape_utils_test.cc
浏览文件 @
4a338796
...
...
@@ -118,7 +118,7 @@ REGISTER_OPERATOR(infer_shape_utils_test,
paddle
::
framework
::
InferShapeUtilsTestOpMaker
,
InferShapeUtilsTestInferShapeFunctor
);
P
T
_REGISTER_KERNEL
(
infer_shape_utils_test
,
CPU
,
ALL_LAYOUT
,
P
D
_REGISTER_KERNEL
(
infer_shape_utils_test
,
CPU
,
ALL_LAYOUT
,
paddle
::
framework
::
InferShapeUtilsTestKernel
,
int
)
{}
TEST
(
InferShapeUtilsTest
,
ALL
)
{
...
...
paddle/phi/api/ext/op_kernel_info.h
浏览文件 @
4a338796
...
...
@@ -630,16 +630,16 @@ class PADDLE_API OpKernelInfoBuilder {
};
/////////////////////// Custom kernel register API /////////////////////////
// For inference: compile directly with framework
// Call after PD_REGISTER_KERNEL(...)
// Call after PD_REGISTER_
BUILTIN_
KERNEL(...)
void
RegisterAllCustomKernel
();
//////////////// Custom kernel register macro /////////////////////
// Refer to paddle/phi/core/kernel_registry.h, we can not use
// P
T
_REGISTER_KERNEL directly, common macros and functions are
// P
D
_REGISTER_KERNEL directly, common macros and functions are
// not ready for custom kernel now.
// Difference: custom_kernel stores all kernels' info into global
// g_custom_kernel_info_map before loading and registering into
// pten kernel management. Only providing PD_REGISTER_KERNEL which
// pten kernel management. Only providing PD_REGISTER_
BUILTIN_
KERNEL which
// supports 2 template arguments.
#define PD_BACKEND(arg__) phi::Backend::arg__
...
...
@@ -666,11 +666,12 @@ void RegisterAllCustomKernel();
#define PD_ID __LINE__
#endif
#define PD_REGISTER_KERNEL(kernel_name, backend, layout, func, cpp_dtype, ...) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
_reg_custom_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"PD_REGISTER_KERNEL must be called in global namespace."); \
_PD_REGISTER_2TA_KERNEL( \
#define PD_REGISTER_BUILTIN_KERNEL( \
kernel_name, backend, layout, func, cpp_dtype, ...) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
_reg_custom_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"PD_REGISTER_BUILTIN_KERNEL must be called in global namespace."); \
_PD_REGISTER_2TA_KERNEL( \
kernel_name, backend, layout, func, cpp_dtype, ##__VA_ARGS__)
// WIN32 is not supported
...
...
paddle/phi/api/lib/api_declare.h
浏览文件 @
4a338796
...
...
@@ -17,6 +17,6 @@ limitations under the License. */
// api symbols declare, remove in the future
#include "paddle/phi/api/lib/api_registry.h"
P
T
_DECLARE_API
(
Math
);
P
T
_DECLARE_API
(
Utils
);
P
T
_DECLARE_API
(
SparseApi
);
P
D
_DECLARE_API
(
Math
);
P
D
_DECLARE_API
(
Utils
);
P
D
_DECLARE_API
(
SparseApi
);
paddle/phi/api/lib/api_registry.h
浏览文件 @
4a338796
...
...
@@ -36,10 +36,10 @@ namespace experimental {
*/
// use to declare symbol
#define P
T
_REGISTER_API(name) \
#define P
D
_REGISTER_API(name) \
PADDLE_API int RegisterSymbolsFor##name() { return 0; }
#define P
T
_DECLARE_API(name) \
#define P
D
_DECLARE_API(name) \
extern PADDLE_API int RegisterSymbolsFor##name(); \
UNUSED static int use_pten_api_##name = RegisterSymbolsFor##name()
...
...
paddle/phi/api/lib/manual_api.cc
浏览文件 @
4a338796
...
...
@@ -27,15 +27,15 @@ limitations under the License. */
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/infermeta/unary.h"
P
T
_DECLARE_KERNEL
(
copy
,
CPU
,
ALL_LAYOUT
);
P
T
_DECLARE_KERNEL
(
split
,
CPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
copy
,
CPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
split
,
CPU
,
ALL_LAYOUT
);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
P
T
_DECLARE_KERNEL
(
copy
,
GPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
copy
,
GPU
,
ALL_LAYOUT
);
#endif
#ifdef PADDLE_WITH_XPU
P
T
_DECLARE_KERNEL
(
copy
,
XPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
copy
,
XPU
,
ALL_LAYOUT
);
#endif
namespace
paddle
{
...
...
@@ -147,4 +147,4 @@ PADDLE_API std::vector<Tensor> split(const Tensor& x,
}
// namespace experimental
}
// namespace paddle
P
T
_REGISTER_API
(
Utils
);
P
D
_REGISTER_API
(
Utils
);
paddle/phi/api/lib/op_kernel_info.cc
浏览文件 @
4a338796
...
...
@@ -86,7 +86,7 @@ OpKernelInfoBuilder& OpKernelInfoBuilder::ArgsDef(CustomKernelArgsDefFn func) {
/////////////////////// Op register API /////////////////////////
// For inference: compile directly with framework
// Call after PD_REGISTER_KERNEL(...)
// Call after PD_REGISTER_
BUILTIN_
KERNEL(...)
void
RegisterAllCustomKernel
()
{
auto
&
op_kernel_info_map
=
OpKernelInfoMap
::
Instance
();
framework
::
RegisterKernelWithMetaInfoMap
(
op_kernel_info_map
);
...
...
paddle/phi/api/lib/sparse_api.cc
浏览文件 @
4a338796
...
...
@@ -22,20 +22,20 @@ limitations under the License. */
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/infermeta/unary.h"
P
T
_DECLARE_KERNEL
(
dense_to_sparse_coo
,
CPU
,
ALL_LAYOUT
);
P
T
_DECLARE_KERNEL
(
sparse_csr_to_coo
,
CPU
,
ALL_LAYOUT
);
P
T
_DECLARE_KERNEL
(
dense_to_sparse_csr
,
CPU
,
ALL_LAYOUT
);
P
T
_DECLARE_KERNEL
(
sparse_coo_to_csr
,
CPU
,
ALL_LAYOUT
);
P
T
_DECLARE_KERNEL
(
sparse_coo_to_dense
,
CPU
,
ALL_LAYOUT
);
P
T
_DECLARE_KERNEL
(
sparse_csr_to_dense
,
CPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
dense_to_sparse_coo
,
CPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
sparse_csr_to_coo
,
CPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
dense_to_sparse_csr
,
CPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
sparse_coo_to_csr
,
CPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
sparse_coo_to_dense
,
CPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
sparse_csr_to_dense
,
CPU
,
ALL_LAYOUT
);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
P
T
_DECLARE_KERNEL
(
dense_to_sparse_coo
,
GPU
,
ALL_LAYOUT
);
P
T
_DECLARE_KERNEL
(
sparse_csr_to_coo
,
GPU
,
ALL_LAYOUT
);
P
T
_DECLARE_KERNEL
(
dense_to_sparse_csr
,
GPU
,
ALL_LAYOUT
);
P
T
_DECLARE_KERNEL
(
sparse_coo_to_csr
,
GPU
,
ALL_LAYOUT
);
P
T
_DECLARE_KERNEL
(
sparse_coo_to_dense
,
GPU
,
ALL_LAYOUT
);
P
T
_DECLARE_KERNEL
(
sparse_csr_to_dense
,
GPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
dense_to_sparse_coo
,
GPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
sparse_csr_to_coo
,
GPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
dense_to_sparse_csr
,
GPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
sparse_coo_to_csr
,
GPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
sparse_coo_to_dense
,
GPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
sparse_csr_to_dense
,
GPU
,
ALL_LAYOUT
);
#endif
namespace
paddle
{
...
...
@@ -228,4 +228,4 @@ PADDLE_API Tensor to_dense(const Tensor& x, Backend backend) {
}
// namespace experimental
}
// namespace paddle
P
T
_REGISTER_API
(
SparseApi
);
P
D
_REGISTER_API
(
SparseApi
);
paddle/phi/common/backend.h
浏览文件 @
4a338796
...
...
@@ -71,17 +71,17 @@ enum class Backend : uint8_t {
* Of course, we have also considered solving this problem through different
* named macros, for example, if we define
*
* P
T
_REGISTER_KERNEL_FOR_ALL_BACKEND
* P
D
_REGISTER_KERNEL_FOR_ALL_BACKEND
*
* Based on this design pattern, the dtype and layout also have the same
* requirements, this cause we need to define a series of macros
*
* P
T
_REGISTER_KERNEL_FOR_ALL_DTYPE
* P
T
_REGISTER_KERNEL_FOR_ALL_LAYOUT
* P
T
_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_LAYOUT
* P
T
_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_DTYPE
* P
T
_REGISTER_KERNEL_FOR_ALL_LAYOUT_AND_DTYPE
* P
T
_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_LAYOUT_AND_DTYPE
* P
D
_REGISTER_KERNEL_FOR_ALL_DTYPE
* P
D
_REGISTER_KERNEL_FOR_ALL_LAYOUT
* P
D
_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_LAYOUT
* P
D
_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_DTYPE
* P
D
_REGISTER_KERNEL_FOR_ALL_LAYOUT_AND_DTYPE
* P
D
_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_LAYOUT_AND_DTYPE
*
* It makes the system of registering macros more complicated, we think
* this is not a simple design, so we still adopt the design of providing
...
...
paddle/phi/core/compat/op_utils.h
浏览文件 @
4a338796
...
...
@@ -164,34 +164,34 @@ struct ArgumentMappingFnRegistrar {
}
};
#define P
T
_REGISTER_BASE_KERNEL_NAME(op_type, base_kernel_name) \
#define P
D
_REGISTER_BASE_KERNEL_NAME(op_type, base_kernel_name) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_register
_base_kernel_name_ns_check_##op_type, \
"P
T
_REGISTER_BASE_KERNEL_NAME must be called in global namespace."); \
PD_REGISTER
_base_kernel_name_ns_check_##op_type, \
"P
D
_REGISTER_BASE_KERNEL_NAME must be called in global namespace."); \
static const ::phi::BaseKernelNameRegistrar \
__registrar_base_kernel_name_for_##op_type(#op_type, #base_kernel_name); \
int TouchBaseKernelNameSymbol_##op_type() { return 0; }
#define P
T
_DECLARE_BASE_KERNEL_NAME(op_type) \
#define P
D
_DECLARE_BASE_KERNEL_NAME(op_type) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_declare
_ai_name_ns_check_##op_type, \
"P
T
_DECLARE_BASE_KERNEL_NAME must be called in global namespace."); \
PD_DECLARE
_ai_name_ns_check_##op_type, \
"P
D
_DECLARE_BASE_KERNEL_NAME must be called in global namespace."); \
extern int TouchBaseKernelNameSymbol_##op_type(); \
UNUSED static int __declare_base_kernel_name_symbol_for_##op_type = \
TouchBaseKernelNameSymbol_##op_type()
#define P
T
_REGISTER_ARG_MAPPING_FN(op_type, arg_mapping_fn) \
#define P
D
_REGISTER_ARG_MAPPING_FN(op_type, arg_mapping_fn) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_register
_arg_map_fn_ns_check_##op_type, \
"P
T
_REGISTER_ARG_MAPPING_FN must be called in global namespace."); \
PD_REGISTER
_arg_map_fn_ns_check_##op_type, \
"P
D
_REGISTER_ARG_MAPPING_FN must be called in global namespace."); \
static const ::phi::ArgumentMappingFnRegistrar \
__registrar_arg_map_fn_for_##op_type(#op_type, arg_mapping_fn); \
int TouchArgumentMappingFnSymbol_##op_type() { return 0; }
#define P
T
_DECLARE_ARG_MAPPING_FN(op_type) \
#define P
D
_DECLARE_ARG_MAPPING_FN(op_type) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_declare
_arg_map_fn_ns_check_##op_type, \
"P
T
_DECLARE_ARG_MAPPING_FN must be called in global namespace."); \
PD_DECLARE
_arg_map_fn_ns_check_##op_type, \
"P
D
_DECLARE_ARG_MAPPING_FN must be called in global namespace."); \
extern int TouchArgumentMappingFnSymbol_##op_type(); \
UNUSED static int __declare_arg_map_fn_symbol_for_##op_type = \
TouchArgumentMappingFnSymbol_##op_type()
...
...
paddle/phi/core/infermeta_utils.h
浏览文件 @
4a338796
...
...
@@ -282,10 +282,10 @@ struct InferMetaFnRegistrar {
}
};
#define P
T
_REGISTER_INFER_META_FN(kernel_name_prefix, variadic_infer_meta_fn) \
#define P
D
_REGISTER_INFER_META_FN(kernel_name_prefix, variadic_infer_meta_fn) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_register
_infer_meta_fn_ns_check_##kernel_name_prefix, \
"P
T
_REGISTER_INFER_META_FN must be called in global namespace."); \
PD_REGISTER
_infer_meta_fn_ns_check_##kernel_name_prefix, \
"P
D
_REGISTER_INFER_META_FN must be called in global namespace."); \
static const ::phi::InferMetaFnRegistrar \
__registrar_arg_map_fn_for_##kernel_name_prefix( \
#kernel_name_prefix, PT_INFER_META(variadic_infer_meta_fn))
...
...
paddle/phi/core/kernel_registry.h
浏览文件 @
4a338796
...
...
@@ -234,7 +234,7 @@ struct KernelRegistrar {
#define _PT_ARG_N(args) _PT_ARG_N_EXPAND args
#define _PT_RESQ_N() 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
/** P
T
_REGISTER_KERNEL
/** P
D
_REGISTER_KERNEL
*
* The most frequently used kernel registration macro, used for kernel
* registration with only data type as template parameter, and the function
...
...
@@ -243,8 +243,8 @@ struct KernelRegistrar {
*
* Note: `2TA` means `2 template argument`
*/
#define P
T
_REGISTER_KERNEL(kernel_name, backend, layout, meta_kernel_fn, ...) \
_P
T
_REGISTER_KERNEL(::phi::RegType::BUILTIN, \
#define P
D
_REGISTER_KERNEL(kernel_name, backend, layout, meta_kernel_fn, ...) \
_P
D
_REGISTER_KERNEL(::phi::RegType::BUILTIN, \
kernel_name, \
backend, \
::phi::backend##Context, \
...
...
@@ -252,12 +252,12 @@ struct KernelRegistrar {
meta_kernel_fn, \
__VA_ARGS__)
#define _P
T
_REGISTER_KERNEL( \
#define _P
D
_REGISTER_KERNEL( \
reg_type, kernel_name, backend, context, layout, meta_kernel_fn, ...) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_register
_tp_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"P
T
_REGISTER_KERNEL must be called in global namespace."); \
PT_EXPAND(_P
T
_REGISTER_2TA_KERNEL(reg_type, \
PD_REGISTER
_tp_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"P
D
_REGISTER_KERNEL must be called in global namespace."); \
PT_EXPAND(_P
D
_REGISTER_2TA_KERNEL(reg_type, \
kernel_name, \
backend, \
context, \
...
...
@@ -266,7 +266,7 @@ struct KernelRegistrar {
__VA_ARGS__))
#ifndef _WIN32
#define _P
T
_REGISTER_2TA_KERNEL( \
#define _P
D
_REGISTER_2TA_KERNEL( \
reg_type, kernel_name, backend, context, layout, meta_kernel_fn, ...) \
PT_KERNEL_INSTANTIATION(meta_kernel_fn, backend, context, __VA_ARGS__); \
static void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \
...
...
@@ -295,7 +295,7 @@ struct KernelRegistrar {
*
* And msvc can work without template instantiation
*/
#define _P
T
_REGISTER_2TA_KERNEL( \
#define _P
D
_REGISTER_2TA_KERNEL( \
reg_type, kernel_name, backend, context, layout, meta_kernel_fn, ...) \
static void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel); \
...
...
@@ -909,27 +909,27 @@ struct KernelRegistrar {
args_def_fn, \
meta_kernel_fn, \
__VA_ARGS__))
/** P
T
_REGISTER_GENERAL_KERNEL
/** P
D
_REGISTER_GENERAL_KERNEL
*
* Basic Kernel register marco, used to register a instantiated kernel function
* with one template argument.
*/
#define P
T
_REGISTER_GENERAL_KERNEL( \
#define P
D
_REGISTER_GENERAL_KERNEL( \
kernel_name, backend, layout, kernel_fn, dtype) \
_P
T
_REGISTER_GENERAL_KERNEL( \
_P
D
_REGISTER_GENERAL_KERNEL( \
::phi::RegType::BUILTIN, kernel_name, backend, layout, kernel_fn, dtype)
#define _P
T
_REGISTER_GENERAL_KERNEL( \
#define _P
D
_REGISTER_GENERAL_KERNEL( \
reg_type, kernel_name, backend, layout, kernel_fn, dtype) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_register
_no_t_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"P
T
_REGISTER_NO_TEMPLATE_KERNEL must be called in global namespace."); \
__P
T
_REGISTER_GENERAL_KERNEL( \
PD_REGISTER
_no_t_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"P
D
_REGISTER_NO_TEMPLATE_KERNEL must be called in global namespace."); \
__P
D
_REGISTER_GENERAL_KERNEL( \
reg_type, kernel_name, backend, layout, kernel_fn, dtype)
#ifndef _WIN32
#define __P
T
_REGISTER_GENERAL_KERNEL( \
#define __P
D
_REGISTER_GENERAL_KERNEL( \
reg_type, kernel_name, backend, layout, kernel_fn, dtype) \
template decltype(kernel_fn) kernel_fn; \
static void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \
...
...
@@ -950,7 +950,7 @@ struct KernelRegistrar {
void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel)
#else
#define __P
T
_REGISTER_GENERAL_KERNEL( \
#define __P
D
_REGISTER_GENERAL_KERNEL( \
reg_type, kernel_name, backend, layout, kernel_fn, dtype) \
static void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel); \
...
...
@@ -971,42 +971,43 @@ struct KernelRegistrar {
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel)
#endif
/** P
T
_DECLARE_KERNEL
/** P
D
_DECLARE_KERNEL
*
* Used to export the symbols of the file where the kernel is located,
* to avoid being removed by linker
*/
#define P
T
_DECLARE_KERNEL(kernel_name, backend, layout) \
#define P
D
_DECLARE_KERNEL(kernel_name, backend, layout) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_declare
_tp_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"P
T
_DECLARE_KERNEL must be called in global namespace."); \
PD_DECLARE
_tp_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"P
D
_DECLARE_KERNEL must be called in global namespace."); \
extern int TouchKernelSymbolFor_##kernel_name##_##backend##_##layout(); \
UNUSED static int \
__declare_kernel_symbol_for_##kernel_name##_##backend##_##layout = \
TouchKernelSymbolFor_##kernel_name##_##backend##_##layout()
/** PD_REGISTER_KERNEL
/** PD_REGISTER_
BUILTIN_
KERNEL
*
* Used to register kernels for built-in backends.
* Support CPU GPU XPU.
*/
#define PD_REGISTER_KERNEL(kernel_name, backend, layout, meta_kernel_fn, ...) \
_PT_REGISTER_KERNEL(::phi::RegType::PLUGIN, \
kernel_name, \
backend, \
::phi::backend##Context, \
layout, \
meta_kernel_fn, \
#define PD_REGISTER_BUILTIN_KERNEL( \
kernel_name, backend, layout, meta_kernel_fn, ...) \
_PD_REGISTER_KERNEL(::phi::RegType::PLUGIN, \
kernel_name, \
backend, \
::phi::backend##Context, \
layout, \
meta_kernel_fn, \
__VA_ARGS__)
/** PD_REGISTER_
CUSTOM
_KERNEL
/** PD_REGISTER_
PLUGIN
_KERNEL
*
* Used to register kernels for plug-in backends.
* Support user-defined backend such as 'Ascend910'.
*/
#define PD_REGISTER_
CUSTOM
_KERNEL( \
#define PD_REGISTER_
PLUGIN
_KERNEL( \
kernel_name, backend, layout, meta_kernel_fn, ...) \
_P
T
_REGISTER_KERNEL(::phi::RegType::PLUGIN, \
_P
D
_REGISTER_KERNEL(::phi::RegType::PLUGIN, \
kernel_name, \
backend, \
::phi::CustomContext, \
...
...
paddle/phi/infermeta/unary.cc
浏览文件 @
4a338796
...
...
@@ -539,5 +539,5 @@ void TraceInferMeta(
}
// namespace phi
P
T
_REGISTER_INFER_META_FN
(
copy_to
,
phi
::
CopyToInferMeta
);
P
T
_REGISTER_INFER_META_FN
(
split
,
phi
::
SplitInferMeta
);
P
D
_REGISTER_INFER_META_FN
(
copy_to
,
phi
::
CopyToInferMeta
);
P
D
_REGISTER_INFER_META_FN
(
split
,
phi
::
SplitInferMeta
);
paddle/phi/kernels/cpu/abs_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -19,7 +19,7 @@
using
phi
::
dtype
::
complex
;
P
T
_REGISTER_KERNEL
(
abs_grad
,
P
D
_REGISTER_KERNEL
(
abs_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
AbsGradKernel
,
...
...
@@ -29,7 +29,7 @@ PT_REGISTER_KERNEL(abs_grad,
int64_t
,
complex
<
float
>
,
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
abs_double_grad
,
P
D
_REGISTER_KERNEL
(
abs_double_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
AbsDoubleGradKernel
,
...
...
paddle/phi/kernels/cpu/abs_kernel.cc
浏览文件 @
4a338796
...
...
@@ -36,7 +36,7 @@ void AbsKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
}
// namespace phi
P
T
_REGISTER_KERNEL
(
abs
,
P
D
_REGISTER_KERNEL
(
abs
,
CPU
,
ALL_LAYOUT
,
phi
::
AbsKernel
,
...
...
paddle/phi/kernels/cpu/bernoulli_kernel.cc
浏览文件 @
4a338796
...
...
@@ -51,5 +51,5 @@ void BernoulliKernel(const Context& ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
bernoulli
,
CPU
,
ALL_LAYOUT
,
phi
::
BernoulliKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/cast_kernel.cc
浏览文件 @
4a338796
...
...
@@ -58,7 +58,7 @@ void CastKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
cast
,
P
D
_REGISTER_KERNEL
(
cast
,
CPU
,
ALL_LAYOUT
,
phi
::
CastKernel
,
...
...
paddle/phi/kernels/cpu/complex_kernel.cc
浏览文件 @
4a338796
...
...
@@ -21,7 +21,7 @@
// See Note [ Why still include the fluid headers? ]
#include "paddle/phi/common/complex.h"
P
T
_REGISTER_KERNEL
(
conj
,
P
D
_REGISTER_KERNEL
(
conj
,
CPU
,
ALL_LAYOUT
,
phi
::
ConjKernel
,
...
...
paddle/phi/kernels/cpu/concat_kernel.cc
浏览文件 @
4a338796
...
...
@@ -110,7 +110,7 @@ void ConcatKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
concat
,
P
D
_REGISTER_KERNEL
(
concat
,
CPU
,
ALL_LAYOUT
,
phi
::
ConcatKernel
,
...
...
paddle/phi/kernels/cpu/copy_kernel.cc
浏览文件 @
4a338796
...
...
@@ -56,5 +56,5 @@ void Copy(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_GENERAL_KERNEL
(
P
D
_REGISTER_GENERAL_KERNEL
(
copy
,
CPU
,
ALL_LAYOUT
,
phi
::
Copy
<
phi
::
CPUContext
>
,
ALL_DTYPE
)
{}
paddle/phi/kernels/cpu/diagonal_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -82,7 +82,7 @@ void DiagonalGradKernel(const Context& dev_ctx,
}
}
}
// namespace phi
P
T
_REGISTER_KERNEL
(
diagonal_grad
,
P
D
_REGISTER_KERNEL
(
diagonal_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
DiagonalGradKernel
,
...
...
paddle/phi/kernels/cpu/diagonal_kernel.cc
浏览文件 @
4a338796
...
...
@@ -79,7 +79,7 @@ void DiagonalKernel(const Context& dev_ctx,
}
}
}
// namespace phi
P
T
_REGISTER_KERNEL
(
diagonal
,
P
D
_REGISTER_KERNEL
(
diagonal
,
CPU
,
ALL_LAYOUT
,
phi
::
DiagonalKernel
,
...
...
paddle/phi/kernels/cpu/digamma_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -19,5 +19,5 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/digamma_grad_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
digamma_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
DigammaGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/digamma_kernel.cc
浏览文件 @
4a338796
...
...
@@ -19,5 +19,5 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/digamma_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
digamma
,
CPU
,
ALL_LAYOUT
,
phi
::
DigammaKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/dot_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -20,7 +20,7 @@
#include "paddle/phi/common/complex.h"
P
T
_REGISTER_KERNEL
(
dot_grad
,
P
D
_REGISTER_KERNEL
(
dot_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
DotGradKernel
,
...
...
paddle/phi/kernels/cpu/dot_kernel.cc
浏览文件 @
4a338796
...
...
@@ -49,7 +49,7 @@ void DotKernel(const Context& dev_ctx,
using
complex64
=
::
phi
::
dtype
::
complex
<
float
>
;
using
complex128
=
::
phi
::
dtype
::
complex
<
double
>
;
P
T
_REGISTER_KERNEL
(
dot
,
P
D
_REGISTER_KERNEL
(
dot
,
CPU
,
ALL_LAYOUT
,
phi
::
DotKernel
,
...
...
paddle/phi/kernels/cpu/elementwise_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -125,7 +125,7 @@ void SubtractDoubleGradKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
add_grad
,
P
D
_REGISTER_KERNEL
(
add_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
AddGradKernel
,
...
...
@@ -137,7 +137,7 @@ PT_REGISTER_KERNEL(add_grad,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
add_double_grad
,
P
D
_REGISTER_KERNEL
(
add_double_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
AddDoubleGradKernel
,
...
...
@@ -149,7 +149,7 @@ PT_REGISTER_KERNEL(add_double_grad,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
add_triple_grad
,
P
D
_REGISTER_KERNEL
(
add_triple_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
AddTripleGradKernel
,
...
...
@@ -161,7 +161,7 @@ PT_REGISTER_KERNEL(add_triple_grad,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
subtract_grad
,
P
D
_REGISTER_KERNEL
(
subtract_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
SubtractGradKernel
,
...
...
@@ -173,7 +173,7 @@ PT_REGISTER_KERNEL(subtract_grad,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
subtract_double_grad
,
P
D
_REGISTER_KERNEL
(
subtract_double_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
SubtractDoubleGradKernel
,
...
...
paddle/phi/kernels/cpu/expand_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -19,7 +19,7 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/expand_grad_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
expand_grad
,
P
D
_REGISTER_KERNEL
(
expand_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
ExpandGradKernel
,
...
...
paddle/phi/kernels/cpu/expand_kernel.cc
浏览文件 @
4a338796
...
...
@@ -19,7 +19,7 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/expand_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
expand
,
P
D
_REGISTER_KERNEL
(
expand
,
CPU
,
ALL_LAYOUT
,
phi
::
ExpandKernel
,
...
...
paddle/phi/kernels/cpu/full_kernel.cc
浏览文件 @
4a338796
...
...
@@ -73,7 +73,7 @@ void FullLikeKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
full
,
P
D
_REGISTER_KERNEL
(
full
,
CPU
,
ALL_LAYOUT
,
phi
::
FullKernel
,
...
...
@@ -89,7 +89,7 @@ PT_REGISTER_KERNEL(full,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
full_like
,
P
D
_REGISTER_KERNEL
(
full_like
,
CPU
,
ALL_LAYOUT
,
phi
::
FullLikeKernel
,
...
...
paddle/phi/kernels/cpu/histogram_kernel.cc
浏览文件 @
4a338796
...
...
@@ -77,7 +77,7 @@ void HistogramKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
histogram
,
P
D
_REGISTER_KERNEL
(
histogram
,
CPU
,
ALL_LAYOUT
,
phi
::
HistogramKernel
,
...
...
paddle/phi/kernels/cpu/huber_loss_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -17,6 +17,6 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
huber_loss_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
HuberLossGradKernel
,
float
,
double
)
{
}
paddle/phi/kernels/cpu/huber_loss_kernel.cc
浏览文件 @
4a338796
...
...
@@ -17,5 +17,5 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/huber_loss_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
huber_loss
,
CPU
,
ALL_LAYOUT
,
phi
::
HuberLossKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/lerp_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -17,5 +17,5 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/lerp_grad_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
lerp_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
LerpGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/lerp_kernel.cc
浏览文件 @
4a338796
...
...
@@ -17,4 +17,4 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/lerp_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
lerp
,
CPU
,
ALL_LAYOUT
,
phi
::
LerpKernel
,
float
,
double
)
{}
P
D
_REGISTER_KERNEL
(
lerp
,
CPU
,
ALL_LAYOUT
,
phi
::
LerpKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/masked_select_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -43,7 +43,7 @@ void MaskedSelectGradKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
masked_select_grad
,
P
D
_REGISTER_KERNEL
(
masked_select_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
MaskedSelectGradKernel
,
...
...
paddle/phi/kernels/cpu/masked_select_kernel.cc
浏览文件 @
4a338796
...
...
@@ -61,7 +61,7 @@ void MaskedSelectKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
masked_select
,
P
D
_REGISTER_KERNEL
(
masked_select
,
CPU
,
ALL_LAYOUT
,
phi
::
MaskedSelectKernel
,
...
...
paddle/phi/kernels/cpu/math_kernel.cc
浏览文件 @
4a338796
...
...
@@ -118,7 +118,7 @@ using complex128 = ::phi::dtype::complex<double>;
// NOTE(chenweihang): using bfloat16 will cause redefine with xpu bfloat16
// using bfloat16 = ::phi::dtype::bfloat16;
P
T
_REGISTER_KERNEL
(
add_raw
,
P
D
_REGISTER_KERNEL
(
add_raw
,
CPU
,
ALL_LAYOUT
,
phi
::
AddRawKernel
,
...
...
@@ -129,7 +129,7 @@ PT_REGISTER_KERNEL(add_raw,
int64_t
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
subtract_raw
,
P
D
_REGISTER_KERNEL
(
subtract_raw
,
CPU
,
ALL_LAYOUT
,
phi
::
SubtractRawKernel
,
...
...
@@ -140,7 +140,7 @@ PT_REGISTER_KERNEL(subtract_raw,
int64_t
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
divide_raw
,
P
D
_REGISTER_KERNEL
(
divide_raw
,
CPU
,
ALL_LAYOUT
,
phi
::
DivideRawKernel
,
...
...
@@ -150,7 +150,7 @@ PT_REGISTER_KERNEL(divide_raw,
int64_t
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
multiply_raw
,
P
D
_REGISTER_KERNEL
(
multiply_raw
,
CPU
,
ALL_LAYOUT
,
phi
::
MultiplyRawKernel
,
...
...
@@ -161,7 +161,7 @@ PT_REGISTER_KERNEL(multiply_raw,
bool
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
sum_raw
,
P
D
_REGISTER_KERNEL
(
sum_raw
,
CPU
,
ALL_LAYOUT
,
phi
::
SumRawKernel
,
...
...
@@ -176,5 +176,5 @@ PT_REGISTER_KERNEL(sum_raw,
complex128
)
{
kernel
->
OutputAt
(
0
).
SetDataType
(
paddle
::
experimental
::
DataType
::
UNDEFINED
);
}
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
mean_raw
,
CPU
,
ALL_LAYOUT
,
phi
::
MeanRawKernel
,
float
,
double
,
bool
)
{}
paddle/phi/kernels/cpu/matmul_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -19,7 +19,7 @@ limitations under the License. */
#include "paddle/phi/kernels/impl/matmul_grad_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
matmul_grad
,
P
D
_REGISTER_KERNEL
(
matmul_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
MatmulGradKernel
,
...
...
@@ -28,7 +28,7 @@ PT_REGISTER_KERNEL(matmul_grad,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
matmul_double_grad
,
P
D
_REGISTER_KERNEL
(
matmul_double_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
MatmulDoubleGradKernel
,
...
...
@@ -37,7 +37,7 @@ PT_REGISTER_KERNEL(matmul_double_grad,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
matmul_triple_grad
,
P
D
_REGISTER_KERNEL
(
matmul_triple_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
MatmulTripleGradKernel
,
...
...
paddle/phi/kernels/cpu/matmul_kernel.cc
浏览文件 @
4a338796
...
...
@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/phi/common/complex.h"
#include "paddle/phi/kernels/impl/matmul_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
matmul
,
P
D
_REGISTER_KERNEL
(
matmul
,
CPU
,
ALL_LAYOUT
,
phi
::
MatmulKernel
,
...
...
paddle/phi/kernels/cpu/norm_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -83,5 +83,5 @@ void NormGradKernel(const Context& ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
norm_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
NormGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/norm_kernel.cc
浏览文件 @
4a338796
...
...
@@ -76,4 +76,4 @@ void NormKernel(const Context& ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
norm
,
CPU
,
ALL_LAYOUT
,
phi
::
NormKernel
,
float
,
double
)
{}
P
D
_REGISTER_KERNEL
(
norm
,
CPU
,
ALL_LAYOUT
,
phi
::
NormKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/scale_kernel.cc
浏览文件 @
4a338796
...
...
@@ -51,7 +51,7 @@ void ScaleKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
scale
,
P
D
_REGISTER_KERNEL
(
scale
,
CPU
,
ALL_LAYOUT
,
phi
::
ScaleKernel
,
...
...
paddle/phi/kernels/cpu/sign_kernel.cc
浏览文件 @
4a338796
...
...
@@ -21,4 +21,4 @@ limitations under the License. */
// See Note [ Why still include the fluid headers? ]
#include "paddle/phi/common/bfloat16.h"
P
T
_REGISTER_KERNEL
(
sign
,
CPU
,
ALL_LAYOUT
,
phi
::
SignKernel
,
float
,
double
)
{}
P
D
_REGISTER_KERNEL
(
sign
,
CPU
,
ALL_LAYOUT
,
phi
::
SignKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/split_kernel.cc
浏览文件 @
4a338796
...
...
@@ -60,7 +60,7 @@ void SplitKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
split
,
P
D
_REGISTER_KERNEL
(
split
,
CPU
,
ALL_LAYOUT
,
phi
::
SplitKernel
,
...
...
paddle/phi/kernels/cpu/trace_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -18,7 +18,7 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/trace_grad_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
trace_grad
,
P
D
_REGISTER_KERNEL
(
trace_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
TraceGradKernel
,
...
...
paddle/phi/kernels/cpu/trace_kernel.cc
浏览文件 @
4a338796
...
...
@@ -45,7 +45,7 @@ void TraceKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
trace
,
P
D
_REGISTER_KERNEL
(
trace
,
CPU
,
ALL_LAYOUT
,
phi
::
TraceKernel
,
...
...
paddle/phi/kernels/cpu/trunc_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -30,7 +30,7 @@ void TruncGradKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
trunc_grad
,
P
D
_REGISTER_KERNEL
(
trunc_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
TruncGradKernel
,
...
...
paddle/phi/kernels/cpu/trunc_kernel.cc
浏览文件 @
4a338796
...
...
@@ -35,5 +35,5 @@ void TruncKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
trunc
,
CPU
,
ALL_LAYOUT
,
phi
::
TruncKernel
,
float
,
double
,
int
,
int64_t
)
{}
paddle/phi/kernels/empty_kernel.cc
浏览文件 @
4a338796
...
...
@@ -38,7 +38,7 @@ void EmptyLikeKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
empty
,
P
D
_REGISTER_KERNEL
(
empty
,
CPU
,
ALL_LAYOUT
,
phi
::
EmptyKernel
,
...
...
@@ -54,7 +54,7 @@ PT_REGISTER_KERNEL(empty,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
empty_like
,
P
D
_REGISTER_KERNEL
(
empty_like
,
CPU
,
ALL_LAYOUT
,
phi
::
EmptyLikeKernel
,
...
...
@@ -71,7 +71,7 @@ PT_REGISTER_KERNEL(empty_like,
phi
::
dtype
::
complex
<
double
>
)
{}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
P
T
_REGISTER_KERNEL
(
empty
,
P
D
_REGISTER_KERNEL
(
empty
,
GPU
,
ALL_LAYOUT
,
phi
::
EmptyKernel
,
...
...
@@ -86,7 +86,7 @@ PT_REGISTER_KERNEL(empty,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
empty_like
,
P
D
_REGISTER_KERNEL
(
empty_like
,
GPU
,
ALL_LAYOUT
,
phi
::
EmptyLikeKernel
,
...
...
paddle/phi/kernels/flatten_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -32,7 +32,7 @@ void FlattenGradKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
flatten_grad
,
P
D
_REGISTER_KERNEL
(
flatten_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
FlattenGradKernel
,
...
...
@@ -44,7 +44,7 @@ PT_REGISTER_KERNEL(flatten_grad,
int64_t
)
{}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
P
T
_REGISTER_KERNEL
(
flatten_grad
,
P
D
_REGISTER_KERNEL
(
flatten_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
FlattenGradKernel
,
...
...
@@ -59,7 +59,7 @@ PT_REGISTER_KERNEL(flatten_grad,
#endif
#ifdef PADDLE_WITH_XPU
P
T
_REGISTER_KERNEL
(
flatten_grad
,
P
D
_REGISTER_KERNEL
(
flatten_grad
,
XPU
,
ALL_LAYOUT
,
phi
::
FlattenGradKernel
,
...
...
paddle/phi/kernels/flatten_kernel.cc
浏览文件 @
4a338796
...
...
@@ -48,7 +48,7 @@ void FlattenWithXShape(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
flatten
,
P
D
_REGISTER_KERNEL
(
flatten
,
CPU
,
ALL_LAYOUT
,
phi
::
FlattenKernel
,
...
...
@@ -60,7 +60,7 @@ PT_REGISTER_KERNEL(flatten,
int
,
int64_t
)
{}
P
T
_REGISTER_KERNEL
(
flatten_with_xshape
,
P
D
_REGISTER_KERNEL
(
flatten_with_xshape
,
CPU
,
ALL_LAYOUT
,
phi
::
FlattenWithXShape
,
...
...
@@ -73,7 +73,7 @@ PT_REGISTER_KERNEL(flatten_with_xshape,
int64_t
)
{}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
P
T
_REGISTER_KERNEL
(
flatten
,
P
D
_REGISTER_KERNEL
(
flatten
,
GPU
,
ALL_LAYOUT
,
phi
::
FlattenKernel
,
...
...
@@ -86,7 +86,7 @@ PT_REGISTER_KERNEL(flatten,
int
,
int64_t
)
{}
P
T
_REGISTER_KERNEL
(
flatten_with_xshape
,
P
D
_REGISTER_KERNEL
(
flatten_with_xshape
,
GPU
,
ALL_LAYOUT
,
phi
::
FlattenWithXShape
,
...
...
@@ -101,7 +101,7 @@ PT_REGISTER_KERNEL(flatten_with_xshape,
#endif
#ifdef PADDLE_WITH_XPU
P
T
_REGISTER_KERNEL
(
flatten
,
P
D
_REGISTER_KERNEL
(
flatten
,
XPU
,
ALL_LAYOUT
,
phi
::
FlattenKernel
,
...
...
@@ -112,7 +112,7 @@ PT_REGISTER_KERNEL(flatten,
int
,
int64_t
)
{}
P
T
_REGISTER_KERNEL
(
flatten_with_xshape
,
P
D
_REGISTER_KERNEL
(
flatten_with_xshape
,
XPU
,
ALL_LAYOUT
,
phi
::
FlattenWithXShape
,
...
...
paddle/phi/kernels/gpu/abs_grad_kernel.cu
浏览文件 @
4a338796
...
...
@@ -20,7 +20,7 @@
using
phi
::
dtype
::
complex
;
P
T
_REGISTER_KERNEL
(
abs_grad
,
P
D
_REGISTER_KERNEL
(
abs_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
AbsGradKernel
,
...
...
@@ -31,7 +31,7 @@ PT_REGISTER_KERNEL(abs_grad,
phi
::
dtype
::
float16
,
complex
<
float
>
,
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
abs_double_grad
,
P
D
_REGISTER_KERNEL
(
abs_double_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
AbsDoubleGradKernel
,
...
...
paddle/phi/kernels/gpu/abs_kernel.cu
浏览文件 @
4a338796
...
...
@@ -52,7 +52,7 @@ void AbsKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
}
// namespace phi
P
T
_REGISTER_KERNEL
(
abs
,
P
D
_REGISTER_KERNEL
(
abs
,
GPU
,
ALL_LAYOUT
,
phi
::
AbsKernel
,
...
...
paddle/phi/kernels/gpu/bernoulli_kernel.cu
浏览文件 @
4a338796
...
...
@@ -73,5 +73,5 @@ void BernoulliKernel(const Context& ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
bernoulli
,
GPU
,
ALL_LAYOUT
,
phi
::
BernoulliKernel
,
float
,
double
)
{}
paddle/phi/kernels/gpu/cast_kernel.cu
浏览文件 @
4a338796
...
...
@@ -61,7 +61,7 @@ void CastKernel(const Context& dev_ctx,
}
// namespace phi
#define PTEN_REGISTER_CAST_CUDA_BASE_TYPE(op_name, ...) \
P
T
_REGISTER_KERNEL(cast, \
P
D
_REGISTER_KERNEL(cast, \
GPU, \
ALL_LAYOUT, \
phi::CastKernel, \
...
...
paddle/phi/kernels/gpu/complex_kernel.cu
浏览文件 @
4a338796
...
...
@@ -21,7 +21,7 @@
// See Note [ Why still include the fluid headers? ]
#include "paddle/phi/common/complex.h"
P
T
_REGISTER_KERNEL
(
conj
,
P
D
_REGISTER_KERNEL
(
conj
,
GPU
,
ALL_LAYOUT
,
phi
::
ConjKernel
,
...
...
paddle/phi/kernels/gpu/concat_kernel.cu
浏览文件 @
4a338796
...
...
@@ -110,7 +110,7 @@ void ConcatKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
concat
,
P
D
_REGISTER_KERNEL
(
concat
,
GPU
,
ALL_LAYOUT
,
phi
::
ConcatKernel
,
...
...
paddle/phi/kernels/gpu/copy_kernel.cu
浏览文件 @
4a338796
...
...
@@ -207,5 +207,5 @@ void Copy(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_GENERAL_KERNEL
(
P
D
_REGISTER_GENERAL_KERNEL
(
copy
,
GPU
,
ALL_LAYOUT
,
phi
::
Copy
<
phi
::
GPUContext
>
,
ALL_DTYPE
)
{}
paddle/phi/kernels/gpu/diagonal_grad_kernel.cu
浏览文件 @
4a338796
...
...
@@ -158,7 +158,7 @@ void DiagonalGradKernel(const Context& dev_ctx,
}
}
}
// namespace phi
P
T
_REGISTER_KERNEL
(
diagonal_grad
,
P
D
_REGISTER_KERNEL
(
diagonal_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
DiagonalGradKernel
,
...
...
paddle/phi/kernels/gpu/diagonal_kernel.cu
浏览文件 @
4a338796
...
...
@@ -154,7 +154,7 @@ void DiagonalKernel(const Context& dev_ctx,
}
}
// namespace phi
P
T
_REGISTER_KERNEL
(
diagonal
,
P
D
_REGISTER_KERNEL
(
diagonal
,
GPU
,
ALL_LAYOUT
,
phi
::
DiagonalKernel
,
...
...
paddle/phi/kernels/gpu/digamma_grad_kernel.cu
浏览文件 @
4a338796
...
...
@@ -18,5 +18,5 @@
#include "paddle/phi/kernels/digamma_grad_kernel.h"
#include "paddle/phi/kernels/impl/digamma_grad_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
digamma_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
DigammaGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/gpu/digamma_kernel.cu
浏览文件 @
4a338796
...
...
@@ -19,5 +19,5 @@
#include "paddle/phi/kernels/digamma_kernel.h"
#include "paddle/phi/kernels/impl/digamma_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
digamma
,
GPU
,
ALL_LAYOUT
,
phi
::
DigammaKernel
,
float
,
double
)
{}
paddle/phi/kernels/gpu/dot_grad_kernel.cu
浏览文件 @
4a338796
...
...
@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/phi/common/complex.h"
P
T
_REGISTER_KERNEL
(
dot_grad
,
P
D
_REGISTER_KERNEL
(
dot_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
DotGradKernel
,
...
...
paddle/phi/kernels/gpu/dot_kernel.cu
浏览文件 @
4a338796
...
...
@@ -52,7 +52,7 @@ void DotKernel(const Context& dev_ctx,
using
complex64
=
::
phi
::
dtype
::
complex
<
float
>
;
using
complex128
=
::
phi
::
dtype
::
complex
<
double
>
;
P
T
_REGISTER_KERNEL
(
dot
,
P
D
_REGISTER_KERNEL
(
dot
,
GPU
,
ALL_LAYOUT
,
phi
::
DotKernel
,
...
...
paddle/phi/kernels/gpu/elementwise_grad_kernel.cu
浏览文件 @
4a338796
...
...
@@ -119,7 +119,7 @@ void SubtractDoubleGradKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
add_grad
,
P
D
_REGISTER_KERNEL
(
add_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
AddGradKernel
,
...
...
@@ -131,7 +131,7 @@ PT_REGISTER_KERNEL(add_grad,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
add_double_grad
,
P
D
_REGISTER_KERNEL
(
add_double_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
AddDoubleGradKernel
,
...
...
@@ -143,7 +143,7 @@ PT_REGISTER_KERNEL(add_double_grad,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
add_triple_grad
,
P
D
_REGISTER_KERNEL
(
add_triple_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
AddTripleGradKernel
,
...
...
@@ -155,7 +155,7 @@ PT_REGISTER_KERNEL(add_triple_grad,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
subtract_grad
,
P
D
_REGISTER_KERNEL
(
subtract_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
SubtractGradKernel
,
...
...
@@ -167,7 +167,7 @@ PT_REGISTER_KERNEL(subtract_grad,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
subtract_double_grad
,
P
D
_REGISTER_KERNEL
(
subtract_double_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
SubtractDoubleGradKernel
,
...
...
paddle/phi/kernels/gpu/expand_grad_kernel.cu
浏览文件 @
4a338796
...
...
@@ -18,7 +18,7 @@
#include "paddle/phi/kernels/expand_grad_kernel.h"
#include "paddle/phi/kernels/impl/expand_grad_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
expand_grad
,
P
D
_REGISTER_KERNEL
(
expand_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
ExpandGradKernel
,
...
...
paddle/phi/kernels/gpu/expand_kernel.cu
浏览文件 @
4a338796
...
...
@@ -19,7 +19,7 @@
#include "paddle/phi/kernels/expand_kernel.h"
#include "paddle/phi/kernels/impl/expand_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
expand
,
P
D
_REGISTER_KERNEL
(
expand
,
GPU
,
ALL_LAYOUT
,
phi
::
ExpandKernel
,
...
...
paddle/phi/kernels/gpu/full_kernel.cu
浏览文件 @
4a338796
...
...
@@ -98,7 +98,7 @@ void FullLikeKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
full
,
P
D
_REGISTER_KERNEL
(
full
,
GPU
,
ALL_LAYOUT
,
phi
::
FullKernel
,
...
...
@@ -113,7 +113,7 @@ PT_REGISTER_KERNEL(full,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
full_like
,
P
D
_REGISTER_KERNEL
(
full_like
,
GPU
,
ALL_LAYOUT
,
phi
::
FullLikeKernel
,
...
...
paddle/phi/kernels/gpu/histogram_kernel.cu
浏览文件 @
4a338796
...
...
@@ -149,7 +149,7 @@ void HistogramKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
histogram
,
P
D
_REGISTER_KERNEL
(
histogram
,
GPU
,
ALL_LAYOUT
,
phi
::
HistogramKernel
,
...
...
paddle/phi/kernels/gpu/huber_loss_grad_kernel.cu
浏览文件 @
4a338796
...
...
@@ -17,6 +17,6 @@
#include "paddle/phi/kernels/huber_loss_grad_kernel.h"
#include "paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
huber_loss_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
HuberLossGradKernel
,
float
,
double
)
{
}
paddle/phi/kernels/gpu/huber_loss_kernel.cu
浏览文件 @
4a338796
...
...
@@ -17,5 +17,5 @@
#include "paddle/phi/kernels/huber_loss_kernel.h"
#include "paddle/phi/kernels/impl/huber_loss_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
huber_loss
,
GPU
,
ALL_LAYOUT
,
phi
::
HuberLossKernel
,
float
,
double
)
{}
paddle/phi/kernels/gpu/lerp_grad_kernel.cu
浏览文件 @
4a338796
...
...
@@ -17,5 +17,5 @@
#include "paddle/phi/kernels/impl/lerp_grad_kernel_impl.h"
#include "paddle/phi/kernels/lerp_grad_kernel.h"
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
lerp_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
LerpGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/gpu/lerp_kernel.cu
浏览文件 @
4a338796
...
...
@@ -17,4 +17,4 @@
#include "paddle/phi/kernels/impl/lerp_kernel_impl.h"
#include "paddle/phi/kernels/lerp_kernel.h"
P
T
_REGISTER_KERNEL
(
lerp
,
GPU
,
ALL_LAYOUT
,
phi
::
LerpKernel
,
float
,
double
)
{}
P
D
_REGISTER_KERNEL
(
lerp
,
GPU
,
ALL_LAYOUT
,
phi
::
LerpKernel
,
float
,
double
)
{}
paddle/phi/kernels/gpu/masked_select_grad_kernel.cu
浏览文件 @
4a338796
...
...
@@ -96,7 +96,7 @@ void MaskedSelectGradKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
masked_select_grad
,
P
D
_REGISTER_KERNEL
(
masked_select_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
MaskedSelectGradKernel
,
...
...
paddle/phi/kernels/gpu/masked_select_kernel.cu
浏览文件 @
4a338796
...
...
@@ -108,7 +108,7 @@ void MaskedSelectKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
masked_select
,
P
D
_REGISTER_KERNEL
(
masked_select
,
GPU
,
ALL_LAYOUT
,
phi
::
MaskedSelectKernel
,
...
...
paddle/phi/kernels/gpu/math_kernel.cu
浏览文件 @
4a338796
...
...
@@ -95,7 +95,7 @@ using float16 = phi::dtype::float16;
using
complex64
=
::
phi
::
dtype
::
complex
<
float
>
;
using
complex128
=
::
phi
::
dtype
::
complex
<
double
>
;
P
T
_REGISTER_KERNEL
(
add_raw
,
P
D
_REGISTER_KERNEL
(
add_raw
,
GPU
,
ALL_LAYOUT
,
phi
::
AddRawKernel
,
...
...
@@ -107,7 +107,7 @@ PT_REGISTER_KERNEL(add_raw,
float16
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
subtract_raw
,
P
D
_REGISTER_KERNEL
(
subtract_raw
,
GPU
,
ALL_LAYOUT
,
phi
::
SubtractRawKernel
,
...
...
@@ -119,7 +119,7 @@ PT_REGISTER_KERNEL(subtract_raw,
float16
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
divide_raw
,
P
D
_REGISTER_KERNEL
(
divide_raw
,
GPU
,
ALL_LAYOUT
,
phi
::
DivideRawKernel
,
...
...
@@ -130,7 +130,7 @@ PT_REGISTER_KERNEL(divide_raw,
float16
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
multiply_raw
,
P
D
_REGISTER_KERNEL
(
multiply_raw
,
GPU
,
ALL_LAYOUT
,
phi
::
MultiplyRawKernel
,
...
...
@@ -142,7 +142,7 @@ PT_REGISTER_KERNEL(multiply_raw,
float16
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
sum_raw
,
P
D
_REGISTER_KERNEL
(
sum_raw
,
GPU
,
ALL_LAYOUT
,
phi
::
SumRawKernel
,
...
...
@@ -158,7 +158,7 @@ PT_REGISTER_KERNEL(sum_raw,
kernel
->
OutputAt
(
0
).
SetDataType
(
paddle
::
experimental
::
DataType
::
UNDEFINED
);
}
P
T
_REGISTER_KERNEL
(
mean_raw
,
P
D
_REGISTER_KERNEL
(
mean_raw
,
GPU
,
ALL_LAYOUT
,
phi
::
MeanRawKernel
,
...
...
paddle/phi/kernels/gpu/matmul_grad_kernel.cu
浏览文件 @
4a338796
...
...
@@ -19,7 +19,7 @@ limitations under the License. */
#include "paddle/phi/kernels/impl/matmul_grad_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
matmul_grad
,
P
D
_REGISTER_KERNEL
(
matmul_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
MatmulGradKernel
,
...
...
@@ -30,7 +30,7 @@ PT_REGISTER_KERNEL(matmul_grad,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
matmul_double_grad
,
P
D
_REGISTER_KERNEL
(
matmul_double_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
MatmulDoubleGradKernel
,
...
...
@@ -40,7 +40,7 @@ PT_REGISTER_KERNEL(matmul_double_grad,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
matmul_triple_grad
,
P
D
_REGISTER_KERNEL
(
matmul_triple_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
MatmulTripleGradKernel
,
...
...
paddle/phi/kernels/gpu/matmul_kernel.cu
浏览文件 @
4a338796
...
...
@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/phi/common/complex.h"
#include "paddle/phi/kernels/impl/matmul_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
matmul
,
P
D
_REGISTER_KERNEL
(
matmul
,
GPU
,
ALL_LAYOUT
,
phi
::
MatmulKernel
,
...
...
paddle/phi/kernels/gpu/norm_grad_kernel.cu
浏览文件 @
4a338796
...
...
@@ -111,7 +111,7 @@ void NormGradKernel(const Context& ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
norm_grad
,
P
D
_REGISTER_KERNEL
(
norm_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
NormGradKernel
,
...
...
paddle/phi/kernels/gpu/norm_kernel.cu
浏览文件 @
4a338796
...
...
@@ -124,7 +124,7 @@ void NormKernel(const Context& ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
norm
,
P
D
_REGISTER_KERNEL
(
norm
,
GPU
,
ALL_LAYOUT
,
phi
::
NormKernel
,
...
...
paddle/phi/kernels/gpu/scale_kernel.cu
浏览文件 @
4a338796
...
...
@@ -63,7 +63,7 @@ void ScaleKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
scale
,
P
D
_REGISTER_KERNEL
(
scale
,
GPU
,
ALL_LAYOUT
,
phi
::
ScaleKernel
,
...
...
paddle/phi/kernels/gpu/sign_kernel.cu.cc
浏览文件 @
4a338796
...
...
@@ -23,5 +23,5 @@ limitations under the License. */
using
float16
=
phi
::
dtype
::
float16
;
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
sign
,
GPU
,
ALL_LAYOUT
,
phi
::
SignKernel
,
float
,
double
,
float16
)
{}
paddle/phi/kernels/gpu/split_kernel.cu
浏览文件 @
4a338796
...
...
@@ -59,7 +59,7 @@ void SplitKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
split
,
P
D
_REGISTER_KERNEL
(
split
,
GPU
,
ALL_LAYOUT
,
phi
::
SplitKernel
,
...
...
paddle/phi/kernels/gpu/trace_grad_kernel.cu
浏览文件 @
4a338796
...
...
@@ -18,7 +18,7 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/trace_grad_kernel_impl.h"
P
T
_REGISTER_KERNEL
(
trace_grad
,
P
D
_REGISTER_KERNEL
(
trace_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
TraceGradKernel
,
...
...
paddle/phi/kernels/gpu/trace_kernel.cu
浏览文件 @
4a338796
...
...
@@ -44,7 +44,7 @@ void TraceKernel(const Context& ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
trace
,
P
D
_REGISTER_KERNEL
(
trace
,
GPU
,
ALL_LAYOUT
,
phi
::
TraceKernel
,
...
...
paddle/phi/kernels/gpu/trunc_grad_kernel.cu
浏览文件 @
4a338796
...
...
@@ -44,7 +44,7 @@ void TruncGradKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
trunc_grad
,
P
D
_REGISTER_KERNEL
(
trunc_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
TruncGradKernel
,
...
...
paddle/phi/kernels/gpu/trunc_kernel.cu
浏览文件 @
4a338796
...
...
@@ -77,5 +77,5 @@ void TruncKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
trunc
,
GPU
,
ALL_LAYOUT
,
phi
::
TruncKernel
,
float
,
double
,
int
,
int64_t
)
{}
paddle/phi/kernels/math_kernel.cc
浏览文件 @
4a338796
...
...
@@ -81,10 +81,10 @@ void MultiplyKernel(const Context& dev_ctx,
using
complex64
=
::
phi
::
dtype
::
complex
<
float
>
;
using
complex128
=
::
phi
::
dtype
::
complex
<
double
>
;
P
T
_REGISTER_KERNEL
(
P
D
_REGISTER_KERNEL
(
mean
,
CPU
,
ALL_LAYOUT
,
phi
::
MeanKernel
,
float
,
double
,
bool
)
{}
P
T
_REGISTER_KERNEL
(
sum
,
P
D
_REGISTER_KERNEL
(
sum
,
CPU
,
ALL_LAYOUT
,
phi
::
SumKernel
,
...
...
@@ -100,7 +100,7 @@ PT_REGISTER_KERNEL(sum,
kernel
->
OutputAt
(
0
).
SetDataType
(
paddle
::
experimental
::
DataType
::
UNDEFINED
);
}
P
T
_REGISTER_KERNEL
(
add
,
P
D
_REGISTER_KERNEL
(
add
,
CPU
,
ALL_LAYOUT
,
phi
::
AddKernel
,
...
...
@@ -111,7 +111,7 @@ PT_REGISTER_KERNEL(add,
int64_t
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
subtract
,
P
D
_REGISTER_KERNEL
(
subtract
,
CPU
,
ALL_LAYOUT
,
phi
::
SubtractKernel
,
...
...
@@ -122,7 +122,7 @@ PT_REGISTER_KERNEL(subtract,
int64_t
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
divide
,
P
D
_REGISTER_KERNEL
(
divide
,
CPU
,
ALL_LAYOUT
,
phi
::
DivideKernel
,
...
...
@@ -132,7 +132,7 @@ PT_REGISTER_KERNEL(divide,
int64_t
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
multiply
,
P
D
_REGISTER_KERNEL
(
multiply
,
CPU
,
ALL_LAYOUT
,
phi
::
MultiplyKernel
,
...
...
@@ -145,7 +145,7 @@ PT_REGISTER_KERNEL(multiply,
complex128
)
{}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
P
T
_REGISTER_KERNEL
(
mean
,
P
D
_REGISTER_KERNEL
(
mean
,
GPU
,
ALL_LAYOUT
,
phi
::
MeanKernel
,
...
...
@@ -155,7 +155,7 @@ PT_REGISTER_KERNEL(mean,
int
,
int64_t
,
phi
::
dtype
::
float16
)
{}
P
T
_REGISTER_KERNEL
(
sum
,
P
D
_REGISTER_KERNEL
(
sum
,
GPU
,
ALL_LAYOUT
,
phi
::
SumKernel
,
...
...
@@ -170,7 +170,7 @@ PT_REGISTER_KERNEL(sum,
complex128
)
{
kernel
->
OutputAt
(
0
).
SetDataType
(
paddle
::
experimental
::
DataType
::
UNDEFINED
);
}
P
T
_REGISTER_KERNEL
(
add
,
P
D
_REGISTER_KERNEL
(
add
,
GPU
,
ALL_LAYOUT
,
phi
::
AddKernel
,
...
...
@@ -182,7 +182,7 @@ PT_REGISTER_KERNEL(add,
phi
::
dtype
::
float16
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
subtract
,
P
D
_REGISTER_KERNEL
(
subtract
,
GPU
,
ALL_LAYOUT
,
phi
::
SubtractKernel
,
...
...
@@ -194,7 +194,7 @@ PT_REGISTER_KERNEL(subtract,
phi
::
dtype
::
float16
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
divide
,
P
D
_REGISTER_KERNEL
(
divide
,
GPU
,
ALL_LAYOUT
,
phi
::
DivideKernel
,
...
...
@@ -205,7 +205,7 @@ PT_REGISTER_KERNEL(divide,
phi
::
dtype
::
float16
,
complex64
,
complex128
)
{}
P
T
_REGISTER_KERNEL
(
multiply
,
P
D
_REGISTER_KERNEL
(
multiply
,
GPU
,
ALL_LAYOUT
,
phi
::
MultiplyKernel
,
...
...
paddle/phi/kernels/reshape_grad_kernel.cc
浏览文件 @
4a338796
...
...
@@ -37,24 +37,24 @@ void ReshapeDoubleGradKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_GENERAL_KERNEL
(
reshape_grad
,
P
D
_REGISTER_GENERAL_KERNEL
(
reshape_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
ReshapeGradKernel
<
phi
::
CPUContext
>
,
ALL_DTYPE
)
{}
P
T
_REGISTER_GENERAL_KERNEL
(
reshape_double_grad
,
P
D
_REGISTER_GENERAL_KERNEL
(
reshape_double_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
ReshapeDoubleGradKernel
<
phi
::
CPUContext
>
,
ALL_DTYPE
)
{}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
P
T
_REGISTER_GENERAL_KERNEL
(
reshape_grad
,
P
D
_REGISTER_GENERAL_KERNEL
(
reshape_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
ReshapeGradKernel
<
phi
::
GPUContext
>
,
ALL_DTYPE
)
{}
P
T
_REGISTER_GENERAL_KERNEL
(
reshape_double_grad
,
P
D
_REGISTER_GENERAL_KERNEL
(
reshape_double_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
ReshapeDoubleGradKernel
<
phi
::
GPUContext
>
,
...
...
@@ -62,12 +62,12 @@ PT_REGISTER_GENERAL_KERNEL(reshape_double_grad,
#endif
#ifdef PADDLE_WITH_XPU
P
T
_REGISTER_GENERAL_KERNEL
(
reshape_grad
,
P
D
_REGISTER_GENERAL_KERNEL
(
reshape_grad
,
XPU
,
ALL_LAYOUT
,
phi
::
ReshapeGradKernel
<
phi
::
XPUContext
>
,
ALL_DTYPE
)
{}
P
T
_REGISTER_GENERAL_KERNEL
(
reshape_double_grad
,
P
D
_REGISTER_GENERAL_KERNEL
(
reshape_double_grad
,
XPU
,
ALL_LAYOUT
,
phi
::
ReshapeDoubleGradKernel
<
phi
::
XPUContext
>
,
...
...
paddle/phi/kernels/reshape_kernel.cc
浏览文件 @
4a338796
...
...
@@ -52,18 +52,18 @@ void ReshapeWithXShape(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_GENERAL_KERNEL
(
P
D
_REGISTER_GENERAL_KERNEL
(
reshape
,
CPU
,
ALL_LAYOUT
,
phi
::
ReshapeKernel
<
phi
::
CPUContext
>
,
ALL_DTYPE
)
{}
P
T
_REGISTER_GENERAL_KERNEL
(
reshape_with_xshape
,
P
D
_REGISTER_GENERAL_KERNEL
(
reshape_with_xshape
,
CPU
,
ALL_LAYOUT
,
phi
::
ReshapeWithXShape
<
phi
::
CPUContext
>
,
ALL_DTYPE
)
{}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
P
T
_REGISTER_GENERAL_KERNEL
(
P
D
_REGISTER_GENERAL_KERNEL
(
reshape
,
GPU
,
ALL_LAYOUT
,
phi
::
ReshapeKernel
<
phi
::
GPUContext
>
,
ALL_DTYPE
)
{}
P
T
_REGISTER_GENERAL_KERNEL
(
reshape_with_xshape
,
P
D
_REGISTER_GENERAL_KERNEL
(
reshape_with_xshape
,
GPU
,
ALL_LAYOUT
,
phi
::
ReshapeWithXShape
<
phi
::
GPUContext
>
,
...
...
@@ -71,9 +71,9 @@ PT_REGISTER_GENERAL_KERNEL(reshape_with_xshape,
#endif
#ifdef PADDLE_WITH_XPU
P
T
_REGISTER_GENERAL_KERNEL
(
P
D
_REGISTER_GENERAL_KERNEL
(
reshape
,
XPU
,
ALL_LAYOUT
,
phi
::
ReshapeKernel
<
phi
::
XPUContext
>
,
ALL_DTYPE
)
{}
P
T
_REGISTER_GENERAL_KERNEL
(
reshape_with_xshape
,
P
D
_REGISTER_GENERAL_KERNEL
(
reshape_with_xshape
,
XPU
,
ALL_LAYOUT
,
phi
::
ReshapeWithXShape
<
phi
::
XPUContext
>
,
...
...
paddle/phi/kernels/selected_rows/full_kernel.cc
浏览文件 @
4a338796
...
...
@@ -36,7 +36,7 @@ void FullSR(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
full_sr
,
P
D
_REGISTER_KERNEL
(
full_sr
,
CPU
,
ALL_LAYOUT
,
phi
::
FullSR
,
...
...
@@ -53,7 +53,7 @@ PT_REGISTER_KERNEL(full_sr,
phi
::
dtype
::
complex
<
double
>
)
{}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
P
T
_REGISTER_KERNEL
(
full_sr
,
P
D
_REGISTER_KERNEL
(
full_sr
,
GPU
,
ALL_LAYOUT
,
phi
::
FullSR
,
...
...
paddle/phi/kernels/selected_rows/scale_kernel.cc
浏览文件 @
4a338796
...
...
@@ -38,7 +38,7 @@ void ScaleSR(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
scale_sr
,
P
D
_REGISTER_KERNEL
(
scale_sr
,
CPU
,
ALL_LAYOUT
,
phi
::
ScaleSR
,
...
...
@@ -52,7 +52,7 @@ PT_REGISTER_KERNEL(scale_sr,
int64_t
)
{}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
P
T
_REGISTER_KERNEL
(
scale_sr
,
P
D
_REGISTER_KERNEL
(
scale_sr
,
GPU
,
ALL_LAYOUT
,
phi
::
ScaleSR
,
...
...
paddle/phi/kernels/sparse/cpu/sparse_utils_kernel.cc
浏览文件 @
4a338796
...
...
@@ -284,7 +284,7 @@ void SparseCooToDenseKernel(const Context& dev_ctx,
}
// namespace sparse
}
// namespace phi
P
T
_REGISTER_KERNEL
(
dense_to_sparse_coo
,
P
D
_REGISTER_KERNEL
(
dense_to_sparse_coo
,
CPU
,
ALL_LAYOUT
,
phi
::
sparse
::
DenseToSparseCooKernel
,
...
...
@@ -297,7 +297,7 @@ PT_REGISTER_KERNEL(dense_to_sparse_coo,
int
,
int64_t
)
{}
P
T
_REGISTER_KERNEL
(
sparse_csr_to_coo
,
P
D
_REGISTER_KERNEL
(
sparse_csr_to_coo
,
CPU
,
ALL_LAYOUT
,
phi
::
sparse
::
SparseCsrToCooKernel
,
...
...
@@ -310,7 +310,7 @@ PT_REGISTER_KERNEL(sparse_csr_to_coo,
int
,
int64_t
)
{}
P
T
_REGISTER_KERNEL
(
sparse_coo_to_csr
,
P
D
_REGISTER_KERNEL
(
sparse_coo_to_csr
,
CPU
,
ALL_LAYOUT
,
phi
::
sparse
::
SparseCooToCsrKernel
,
...
...
@@ -323,7 +323,7 @@ PT_REGISTER_KERNEL(sparse_coo_to_csr,
int
,
int64_t
)
{}
P
T
_REGISTER_KERNEL
(
dense_to_sparse_csr
,
P
D
_REGISTER_KERNEL
(
dense_to_sparse_csr
,
CPU
,
ALL_LAYOUT
,
phi
::
sparse
::
DenseToSparseCsrKernel
,
...
...
@@ -336,7 +336,7 @@ PT_REGISTER_KERNEL(dense_to_sparse_csr,
int
,
int64_t
)
{}
P
T
_REGISTER_KERNEL
(
sparse_coo_to_dense
,
P
D
_REGISTER_KERNEL
(
sparse_coo_to_dense
,
CPU
,
ALL_LAYOUT
,
phi
::
sparse
::
SparseCooToDenseKernel
,
...
...
@@ -349,7 +349,7 @@ PT_REGISTER_KERNEL(sparse_coo_to_dense,
int
,
int64_t
)
{}
P
T
_REGISTER_KERNEL
(
sparse_csr_to_dense
,
P
D
_REGISTER_KERNEL
(
sparse_csr_to_dense
,
CPU
,
ALL_LAYOUT
,
phi
::
sparse
::
SparseCsrToDenseKernel
,
...
...
paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu
浏览文件 @
4a338796
...
...
@@ -553,7 +553,7 @@ void SparseCooToDenseKernel(const Context& dev_ctx,
}
// namespace sparse
}
// namespace phi
P
T
_REGISTER_KERNEL
(
dense_to_sparse_coo
,
P
D
_REGISTER_KERNEL
(
dense_to_sparse_coo
,
GPU
,
ALL_LAYOUT
,
phi
::
sparse
::
DenseToSparseCooKernel
,
...
...
@@ -566,7 +566,7 @@ PT_REGISTER_KERNEL(dense_to_sparse_coo,
int
,
int64_t
)
{}
P
T
_REGISTER_KERNEL
(
sparse_csr_to_coo
,
P
D
_REGISTER_KERNEL
(
sparse_csr_to_coo
,
GPU
,
ALL_LAYOUT
,
phi
::
sparse
::
SparseCsrToCooKernel
,
...
...
@@ -579,7 +579,7 @@ PT_REGISTER_KERNEL(sparse_csr_to_coo,
int
,
int64_t
)
{}
P
T
_REGISTER_KERNEL
(
sparse_coo_to_csr
,
P
D
_REGISTER_KERNEL
(
sparse_coo_to_csr
,
GPU
,
ALL_LAYOUT
,
phi
::
sparse
::
SparseCooToCsrKernel
,
...
...
@@ -592,7 +592,7 @@ PT_REGISTER_KERNEL(sparse_coo_to_csr,
int
,
int64_t
)
{}
P
T
_REGISTER_KERNEL
(
dense_to_sparse_csr
,
P
D
_REGISTER_KERNEL
(
dense_to_sparse_csr
,
GPU
,
ALL_LAYOUT
,
phi
::
sparse
::
DenseToSparseCsrKernel
,
...
...
@@ -605,7 +605,7 @@ PT_REGISTER_KERNEL(dense_to_sparse_csr,
int
,
int64_t
)
{}
P
T
_REGISTER_KERNEL
(
sparse_coo_to_dense
,
P
D
_REGISTER_KERNEL
(
sparse_coo_to_dense
,
GPU
,
ALL_LAYOUT
,
phi
::
sparse
::
SparseCooToDenseKernel
,
...
...
@@ -618,7 +618,7 @@ PT_REGISTER_KERNEL(sparse_coo_to_dense,
int
,
int64_t
)
{}
P
T
_REGISTER_KERNEL
(
sparse_csr_to_dense
,
P
D
_REGISTER_KERNEL
(
sparse_csr_to_dense
,
GPU
,
ALL_LAYOUT
,
phi
::
sparse
::
SparseCsrToDenseKernel
,
...
...
paddle/phi/kernels/transfer_layout_kernel.cc
浏览文件 @
4a338796
...
...
@@ -69,7 +69,7 @@ void TransferLayoutKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_GENERAL_KERNEL
(
pten_transfer_layout
,
P
D
_REGISTER_GENERAL_KERNEL
(
pten_transfer_layout
,
CPU
,
ALL_LAYOUT
,
phi
::
TransferLayoutKernel
<
phi
::
CPUContext
>
,
...
...
paddle/phi/kernels/xpu/cast_kernel.cc
浏览文件 @
4a338796
...
...
@@ -86,7 +86,7 @@ void CastKernel(const Context& dev_ctx,
}
}
// namespace phi
P
T
_REGISTER_KERNEL
(
cast
,
P
D
_REGISTER_KERNEL
(
cast
,
XPU
,
ALL_LAYOUT
,
phi
::
CastKernel
,
...
...
paddle/phi/kernels/xpu/copy_kernel.cc
浏览文件 @
4a338796
...
...
@@ -69,5 +69,5 @@ void Copy(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_GENERAL_KERNEL
(
P
D
_REGISTER_GENERAL_KERNEL
(
copy
,
XPU
,
ALL_LAYOUT
,
phi
::
Copy
<
phi
::
XPUContext
>
,
ALL_DTYPE
)
{}
paddle/phi/kernels/xpu/full_kernel.cc
浏览文件 @
4a338796
...
...
@@ -116,7 +116,7 @@ void FullLikeKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
full
,
P
D
_REGISTER_KERNEL
(
full
,
XPU
,
ALL_LAYOUT
,
phi
::
FullKernel
,
...
...
@@ -132,7 +132,7 @@ PT_REGISTER_KERNEL(full,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
P
T
_REGISTER_KERNEL
(
full_like
,
P
D
_REGISTER_KERNEL
(
full_like
,
XPU
,
ALL_LAYOUT
,
phi
::
FullLikeKernel
,
...
...
paddle/phi/kernels/xpu/scale_kernel.cc
浏览文件 @
4a338796
...
...
@@ -56,7 +56,7 @@ void ScaleKernel(const Context& dev_ctx,
}
// namespace phi
P
T
_REGISTER_KERNEL
(
scale
,
P
D
_REGISTER_KERNEL
(
scale
,
XPU
,
ALL_LAYOUT
,
phi
::
ScaleKernel
,
...
...
paddle/phi/ops/compat/abs_sig.cc
浏览文件 @
4a338796
...
...
@@ -32,7 +32,7 @@ KernelSignature AbsDoubleGradOpArgumentMapping(
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
abs
,
phi
::
AbsOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
abs_grad
,
phi
::
AbsGradOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
abs_double_grad
,
P
D
_REGISTER_ARG_MAPPING_FN
(
abs
,
phi
::
AbsOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
abs_grad
,
phi
::
AbsGradOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
abs_double_grad
,
phi
::
AbsDoubleGradOpArgumentMapping
);
paddle/phi/ops/compat/cast_sig.cc
浏览文件 @
4a338796
...
...
@@ -22,4 +22,4 @@ KernelSignature CastOpArgumentMapping(const ArgumentMappingContext& ctx) {
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
cast
,
phi
::
CastOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
cast
,
phi
::
CastOpArgumentMapping
);
paddle/phi/ops/compat/concat_sig.cc
浏览文件 @
4a338796
...
...
@@ -25,4 +25,4 @@ KernelSignature ConcatOpArgumentMapping(const ArgumentMappingContext& ctx) {
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
concat
,
phi
::
ConcatOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
concat
,
phi
::
ConcatOpArgumentMapping
);
paddle/phi/ops/compat/diagonal_sig.cc
浏览文件 @
4a338796
...
...
@@ -25,4 +25,4 @@ KernelSignature DiagonalGradOpArgumentMapping(
}
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
diagonal_grad
,
phi
::
DiagonalGradOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
diagonal_grad
,
phi
::
DiagonalGradOpArgumentMapping
);
paddle/phi/ops/compat/digamma_sig.cc
浏览文件 @
4a338796
...
...
@@ -24,4 +24,4 @@ KernelSignature DigammaGradOpArgumentMapping(
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
digamma_grad
,
phi
::
DigammaGradOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
digamma_grad
,
phi
::
DigammaGradOpArgumentMapping
);
paddle/phi/ops/compat/dot_sig.cc
浏览文件 @
4a338796
...
...
@@ -25,4 +25,4 @@ KernelSignature DotGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
dot_grad
,
phi
::
DotGradOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
dot_grad
,
phi
::
DotGradOpArgumentMapping
);
paddle/phi/ops/compat/elementwise_sig.cc
浏览文件 @
4a338796
...
...
@@ -102,28 +102,28 @@ KernelSignature ElementwiseSubGradOpArgumentMapping(
}
// namespace phi
P
T
_REGISTER_BASE_KERNEL_NAME
(
elementwise_add
,
add
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
elementwise_sub
,
subtract
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
elementwise_mul
,
multiply
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
elementwise_div
,
divide
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
elementwise_add_grad
,
add_grad
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
elementwise_add_grad_grad
,
add_double_grad
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
elementwise_add_triple_grad
,
add_triple_grad
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
elementwise_sub_grad
,
subtract_grad
);
P
T
_REGISTER_ARG_MAPPING_FN
(
elementwise_add
,
P
D
_REGISTER_BASE_KERNEL_NAME
(
elementwise_add
,
add
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
elementwise_sub
,
subtract
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
elementwise_mul
,
multiply
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
elementwise_div
,
divide
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
elementwise_add_grad
,
add_grad
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
elementwise_add_grad_grad
,
add_double_grad
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
elementwise_add_triple_grad
,
add_triple_grad
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
elementwise_sub_grad
,
subtract_grad
);
P
D
_REGISTER_ARG_MAPPING_FN
(
elementwise_add
,
phi
::
ElementwiseAddOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
elementwise_sub
,
P
D
_REGISTER_ARG_MAPPING_FN
(
elementwise_sub
,
phi
::
ElementwiseSubOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
elementwise_mul
,
P
D
_REGISTER_ARG_MAPPING_FN
(
elementwise_mul
,
phi
::
ElementwiseMulOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
elementwise_div
,
P
D
_REGISTER_ARG_MAPPING_FN
(
elementwise_div
,
phi
::
ElementwiseDivOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
elementwise_add_grad
,
P
D
_REGISTER_ARG_MAPPING_FN
(
elementwise_add_grad
,
phi
::
ElementwiseAddGradOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
elementwise_add_grad_grad
,
P
D
_REGISTER_ARG_MAPPING_FN
(
elementwise_add_grad_grad
,
phi
::
ElementwiseAddDoubleGradOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
elementwise_add_triple_grad
,
P
D
_REGISTER_ARG_MAPPING_FN
(
elementwise_add_triple_grad
,
phi
::
ElementwiseAddTripleGradOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
elementwise_sub_grad
,
P
D
_REGISTER_ARG_MAPPING_FN
(
elementwise_sub_grad
,
phi
::
ElementwiseSubGradOpArgumentMapping
);
paddle/phi/ops/compat/empty_sig.cc
浏览文件 @
4a338796
...
...
@@ -28,4 +28,4 @@ KernelSignature EmptyOpArgumentMapping(const ArgumentMappingContext& ctx) {
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
empty
,
phi
::
EmptyOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
empty
,
phi
::
EmptyOpArgumentMapping
);
paddle/phi/ops/compat/expand_sig.cc
浏览文件 @
4a338796
...
...
@@ -47,8 +47,8 @@ KernelSignature ExpandGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
}
// namespace phi
P
T
_REGISTER_BASE_KERNEL_NAME
(
expand_v2
,
expand
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
expand_v2_grad
,
expand_grad
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
expand_v2
,
expand
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
expand_v2_grad
,
expand_grad
);
P
T
_REGISTER_ARG_MAPPING_FN
(
expand_v2
,
phi
::
ExpandOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
expand_v2_grad
,
phi
::
ExpandGradOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
expand_v2
,
phi
::
ExpandOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
expand_v2_grad
,
phi
::
ExpandGradOpArgumentMapping
);
paddle/phi/ops/compat/fill_any_like_sig.cc
浏览文件 @
4a338796
...
...
@@ -23,6 +23,6 @@ KernelSignature FillAnyLikeOpArgumentMapping(
}
// namespace phi
P
T
_REGISTER_BASE_KERNEL_NAME
(
fill_any_like
,
full_like
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
fill_any_like
,
full_like
);
P
T
_REGISTER_ARG_MAPPING_FN
(
fill_any_like
,
phi
::
FillAnyLikeOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
fill_any_like
,
phi
::
FillAnyLikeOpArgumentMapping
);
paddle/phi/ops/compat/fill_constant_sig.cc
浏览文件 @
4a338796
...
...
@@ -123,6 +123,6 @@ KernelSignature FillConstantOpArgumentMapping(
}
// namespace phi
P
T
_REGISTER_BASE_KERNEL_NAME
(
fill_constant
,
full
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
fill_constant
,
full
);
P
T
_REGISTER_ARG_MAPPING_FN
(
fill_constant
,
phi
::
FillConstantOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
fill_constant
,
phi
::
FillConstantOpArgumentMapping
);
paddle/phi/ops/compat/flatten_sig.cc
浏览文件 @
4a338796
...
...
@@ -36,10 +36,10 @@ KernelSignature FlattenGradOpArgumentMapping(
}
// namespace phi
P
T
_REGISTER_BASE_KERNEL_NAME
(
flatten_contiguous_range
,
flatten
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
flatten_contiguous_range_grad
,
flatten_grad
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
flatten_contiguous_range
,
flatten
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
flatten_contiguous_range_grad
,
flatten_grad
);
P
T
_REGISTER_ARG_MAPPING_FN
(
flatten_contiguous_range
,
P
D
_REGISTER_ARG_MAPPING_FN
(
flatten_contiguous_range
,
phi
::
FlattenOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
flatten_contiguous_range_grad
,
P
D
_REGISTER_ARG_MAPPING_FN
(
flatten_contiguous_range_grad
,
phi
::
FlattenGradOpArgumentMapping
);
paddle/phi/ops/compat/histogram_sig.cc
浏览文件 @
4a338796
...
...
@@ -22,4 +22,4 @@ KernelSignature HistogramOpArgumentMapping(const ArgumentMappingContext& ctx) {
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
histogram
,
phi
::
HistogramOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
histogram
,
phi
::
HistogramOpArgumentMapping
);
paddle/phi/ops/compat/huber_loss_sig.cc
浏览文件 @
4a338796
...
...
@@ -31,6 +31,6 @@ KernelSignature HuberLossGradOpArgumentMapping(
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
huber_loss
,
phi
::
HuberLossOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
huber_loss_grad
,
P
D
_REGISTER_ARG_MAPPING_FN
(
huber_loss
,
phi
::
HuberLossOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
huber_loss_grad
,
phi
::
HuberLossGradOpArgumentMapping
);
paddle/phi/ops/compat/lerp_sig.cc
浏览文件 @
4a338796
...
...
@@ -29,5 +29,5 @@ KernelSignature LerpGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
lerp
,
phi
::
LerpOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
lerp_grad
,
phi
::
LerpGradOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
lerp
,
phi
::
LerpOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
lerp_grad
,
phi
::
LerpGradOpArgumentMapping
);
paddle/phi/ops/compat/masked_select_sig.cc
浏览文件 @
4a338796
...
...
@@ -31,6 +31,6 @@ KernelSignature MaskedSelectGradOpArgumentMapping(
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
masked_select
,
phi
::
MaskedSelectOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
masked_select_grad
,
P
D
_REGISTER_ARG_MAPPING_FN
(
masked_select
,
phi
::
MaskedSelectOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
masked_select_grad
,
phi
::
MaskedSelectGradOpArgumentMapping
);
paddle/phi/ops/compat/matmul_sig.cc
浏览文件 @
4a338796
...
...
@@ -49,13 +49,13 @@ KernelSignature MatmulTripleGradOpArgumentMapping(
}
// namespace phi
P
T
_REGISTER_BASE_KERNEL_NAME
(
matmul_v2
,
matmul
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
matmul_v2_grad
,
matmul_grad
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
matmul_v2_grad_grad
,
matmul_double_grad
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
matmul_v2_triple_grad
,
matmul_triple_grad
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
matmul_v2
,
matmul
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
matmul_v2_grad
,
matmul_grad
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
matmul_v2_grad_grad
,
matmul_double_grad
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
matmul_v2_triple_grad
,
matmul_triple_grad
);
P
T
_REGISTER_ARG_MAPPING_FN
(
matmul_v2_grad
,
phi
::
MatmulGradOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
matmul_v2_grad_grad
,
P
D
_REGISTER_ARG_MAPPING_FN
(
matmul_v2_grad
,
phi
::
MatmulGradOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
matmul_v2_grad_grad
,
phi
::
MatmulDoubleGradOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
matmul_v2_triple_grad
,
P
D
_REGISTER_ARG_MAPPING_FN
(
matmul_v2_triple_grad
,
phi
::
MatmulTripleGradOpArgumentMapping
);
paddle/phi/ops/compat/norm_sig.cc
浏览文件 @
4a338796
...
...
@@ -30,5 +30,5 @@ KernelSignature NormGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
norm
,
phi
::
NormOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
norm_grad
,
phi
::
NormGradOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
norm
,
phi
::
NormOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
norm_grad
,
phi
::
NormGradOpArgumentMapping
);
paddle/phi/ops/compat/reduce_sig.cc
浏览文件 @
4a338796
...
...
@@ -45,8 +45,8 @@ KernelSignature ReduceMeanOpArgumentMapping(const ArgumentMappingContext& ctx) {
}
// namespace phi
P
T
_REGISTER_BASE_KERNEL_NAME
(
reduce_sum
,
sum
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
reduce_mean
,
mean
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
reduce_sum
,
sum
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
reduce_mean
,
mean
);
P
T
_REGISTER_ARG_MAPPING_FN
(
reduce_sum
,
phi
::
ReduceSumOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
reduce_mean
,
phi
::
ReduceMeanOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
reduce_sum
,
phi
::
ReduceSumOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
reduce_mean
,
phi
::
ReduceMeanOpArgumentMapping
);
paddle/phi/ops/compat/reshape_sig.cc
浏览文件 @
4a338796
...
...
@@ -45,11 +45,11 @@ KernelSignature ReshapeDoubleGradOpArgumentMapping(
}
// namespace phi
P
T
_REGISTER_BASE_KERNEL_NAME
(
reshape2
,
reshape
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
reshape2_grad
,
reshape_grad
);
P
T
_REGISTER_BASE_KERNEL_NAME
(
reshape2_grad_grad
,
reshape_double_grad
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
reshape2
,
reshape
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
reshape2_grad
,
reshape_grad
);
P
D
_REGISTER_BASE_KERNEL_NAME
(
reshape2_grad_grad
,
reshape_double_grad
);
P
T
_REGISTER_ARG_MAPPING_FN
(
reshape2
,
phi
::
ReshapeOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
reshape2_grad
,
phi
::
ReshapeGradOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
reshape2_grad_grad
,
P
D
_REGISTER_ARG_MAPPING_FN
(
reshape2
,
phi
::
ReshapeOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
reshape2_grad
,
phi
::
ReshapeGradOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
reshape2_grad_grad
,
phi
::
ReshapeDoubleGradOpArgumentMapping
);
paddle/phi/ops/compat/scale_sig.cc
浏览文件 @
4a338796
...
...
@@ -72,4 +72,4 @@ KernelSignature ScaleOpArgumentMapping(const ArgumentMappingContext& ctx) {
}
// namespace phi
// op_type, api_name, arg_mapping_fn
P
T
_REGISTER_ARG_MAPPING_FN
(
scale
,
phi
::
ScaleOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
scale
,
phi
::
ScaleOpArgumentMapping
);
paddle/phi/ops/compat/split_sig.cc
浏览文件 @
4a338796
...
...
@@ -46,4 +46,4 @@ KernelSignature SplitOpArgumentMapping(const ArgumentMappingContext& ctx) {
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
split
,
phi
::
SplitOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
split
,
phi
::
SplitOpArgumentMapping
);
paddle/phi/ops/compat/trace_sig.cc
浏览文件 @
4a338796
...
...
@@ -30,5 +30,5 @@ KernelSignature TraceGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
trace
,
phi
::
TraceOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
trace_grad
,
phi
::
TraceGradOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
trace
,
phi
::
TraceOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
trace_grad
,
phi
::
TraceGradOpArgumentMapping
);
paddle/phi/ops/compat/trunc_sig.cc
浏览文件 @
4a338796
...
...
@@ -27,5 +27,5 @@ KernelSignature TruncGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
}
// namespace phi
P
T
_REGISTER_ARG_MAPPING_FN
(
trunc
,
phi
::
TruncOpArgumentMapping
);
P
T
_REGISTER_ARG_MAPPING_FN
(
trunc_grad
,
phi
::
TruncGradOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
trunc
,
phi
::
TruncOpArgumentMapping
);
P
D
_REGISTER_ARG_MAPPING_FN
(
trunc_grad
,
phi
::
TruncGradOpArgumentMapping
);
paddle/phi/tests/core/test_custom_kernel.cc
浏览文件 @
4a338796
...
...
@@ -17,6 +17,8 @@ limitations under the License. */
#define _LINUX
#endif
#include <gtest/gtest.h>
#ifdef _LINUX
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/platform/device_context.h"
...
...
@@ -30,8 +32,6 @@ limitations under the License. */
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/infermeta/binary.h"
#include <gtest/gtest.h>
// user kernel function
namespace
custom_kernel
{
...
...
@@ -98,16 +98,16 @@ void FakeDot(const Context& dev_ctx,
}
}
// namespace custom_kernel
PD_REGISTER_KERNEL
(
fake_dot
,
CPU
,
ALL_LAYOUT
,
custom_kernel
::
FakeDot
,
float
,
double
,
int
,
int64_t
,
int8_t
,
uint8_t
)
{}
PD_REGISTER_
BUILTIN_
KERNEL
(
fake_dot
,
CPU
,
ALL_LAYOUT
,
custom_kernel
::
FakeDot
,
float
,
double
,
int
,
int64_t
,
int8_t
,
uint8_t
)
{}
namespace
phi
{
namespace
tests
{
...
...
paddle/phi/tests/core/test_kernel_factory.cc
浏览文件 @
4a338796
...
...
@@ -22,7 +22,7 @@ limitations under the License. */
#include "gtest/gtest.h"
P
T
_DECLARE_KERNEL
(
scale
,
CPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
scale
,
CPU
,
ALL_LAYOUT
);
namespace
phi
{
namespace
tests
{
...
...
@@ -76,7 +76,7 @@ TEST(KernelRegistry, SetFP32Input) {
}
// namespace tests
}
// namespace phi
P
T
_REGISTER_KERNEL
(
test
,
P
D
_REGISTER_KERNEL
(
test
,
CPU
,
ALL_LAYOUT
,
phi
::
tests
::
TestKernel
,
...
...
paddle/phi/tests/kernels/test_flatten_dev_api.cc
浏览文件 @
4a338796
...
...
@@ -23,14 +23,14 @@ limitations under the License. */
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
P
T
_DECLARE_KERNEL
(
copy
,
CPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
copy
,
CPU
,
ALL_LAYOUT
);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
P
T
_DECLARE_KERNEL
(
copy
,
GPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
copy
,
GPU
,
ALL_LAYOUT
);
#endif
#ifdef PADDLE_WITH_XPU
P
T
_DECLARE_KERNEL
(
copy
,
XPU
,
ALL_LAYOUT
);
P
D
_DECLARE_KERNEL
(
copy
,
XPU
,
ALL_LAYOUT
);
#endif
namespace
phi
{
...
...
python/paddle/fluid/tests/custom_kernel/custom_kernel_dot.cc
浏览文件 @
4a338796
...
...
@@ -45,7 +45,7 @@ void DotKernel(const Context& dev_ctx,
}
// namespace custom_kernel
}
// namespace paddle
PD_REGISTER_KERNEL
(
PD_REGISTER_
BUILTIN_
KERNEL
(
dot
,
CPU
,
ALL_LAYOUT
,
paddle
::
custom_kernel
::
DotKernel
,
int8_t
)
{
kernel
->
OutputAt
(
0
).
SetDataType
(
paddle
::
experimental
::
DataType
::
INT8
);
}
python/paddle/utils/code_gen/api_gen.py
浏览文件 @
4a338796
...
...
@@ -118,7 +118,7 @@ def source_include(header_file_path):
def
api_register
():
return
"""
P
T
_REGISTER_API(Math);
P
D
_REGISTER_API(Math);
"""
...
...
python/paddle/utils/code_gen/wrapped_infermeta_gen.py
浏览文件 @
4a338796
...
...
@@ -26,7 +26,7 @@ def get_wrapped_infermeta_name(api_name):
def
gene_wrapped_infermeta_and_register
(
api
):
if
api
.
is_base_api
and
not
api
.
is_dygraph_api
:
register_code
=
f
"""
P
T
_REGISTER_INFER_META_FN(
{
api
.
kernel
[
'func'
][
0
]
}
, phi::
{
api
.
infer_meta
[
'func'
]
}
);"""
P
D
_REGISTER_INFER_META_FN(
{
api
.
kernel
[
'func'
][
0
]
}
, phi::
{
api
.
infer_meta
[
'func'
]
}
);"""
if
api
.
infer_meta
[
'param'
]
is
not
None
:
kernel_params
=
api
.
kernel
[
'param'
]
...
...
@@ -73,7 +73,7 @@ void {wrapped_infermeta_name}({", ".join(args)}) {{
"""
register_code
=
f
"""
P
T
_REGISTER_INFER_META_FN(
{
api
.
kernel
[
'func'
][
0
]
}
, phi::
{
get_wrapped_infermeta_name
(
api
.
kernel
[
'func'
][
0
])
}
);"""
P
D
_REGISTER_INFER_META_FN(
{
api
.
kernel
[
'func'
][
0
]
}
, phi::
{
get_wrapped_infermeta_name
(
api
.
kernel
[
'func'
][
0
])
}
);"""
return
declare_code
,
defind_code
,
register_code
else
:
...
...
tools/infrt/get_pten_kernel_function.sh
浏览文件 @
4a338796
...
...
@@ -24,9 +24,9 @@ set -e
kernel_register_info_file
=
`
mktemp
`
PADDLE_ROOT
=
"
$(
cd
"
$(
dirname
"
$0
"
)
/../../"
&&
pwd
)
"
unset
GREP_OPTIONS
&&
find
${
PADDLE_ROOT
}
/paddle/phi/kernels
-name
"*.c*"
\
| xargs
sed
-e
'/P
T
_REGISTER_\(GENERAL_\)\?KERNEL(/,/)/!d'
\
| xargs
sed
-e
'/P
D
_REGISTER_\(GENERAL_\)\?KERNEL(/,/)/!d'
\
|
awk
'BEGIN { RS="{" }{ gsub(/\n /,""); print $0 }'
\
|
grep
P
T
_REGISTER
\
|
grep
P
D
_REGISTER
\
|
awk
-F
",|
\(
|
\)
"
'{gsub(/ /,"");$1="";print}'
\
|
sort
-u
|
awk
'{gsub(/phi::/,"");gsub(/paddle::platform::/,"");gsub(/dtype::/,"");gsub(/paddle::/,"");print $0}'
\
|
grep
-v
"_grad"
>
$kernel_register_info_file
...
...
@@ -38,7 +38,7 @@ python3 ${PADDLE_ROOT}/python/paddle/utils/code_gen/wrapped_infermeta_gen.py \
--wrapped_infermeta_header_path
${
temp_path
}
/generate.h
\
--wrapped_infermeta_source_path
${
temp_path
}
/generate.cc
grep
P
T
_REGISTER_INFER_META_FN
${
temp_path
}
/generate.cc
\
grep
P
D
_REGISTER_INFER_META_FN
${
temp_path
}
/generate.cc
\
|
awk
-F
"
\(
|,|::|
\)
"
'{print $2, $4}'
>
${
temp_path
}
/wrap_info.txt
#step 3: merge all infos
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录