未验证 提交 4a338796 编写于 作者: C Chen Weihang 提交者: GitHub

[PTen->Phi PR2] Rename PT_REGISTER macro to PD_REGISTER (#39790)

* unify register macro

* rename declare macro

* fix infrt error
上级 73bf9673
......@@ -58,26 +58,26 @@ endfunction()
function(kernel_declare TARGET_LIST)
foreach(kernel_path ${TARGET_LIST})
file(READ ${kernel_path} kernel_impl)
# TODO(chenweihang): rename PT_REGISTER_KERNEL to PT_REGISTER_KERNEL
# TODO(chenweihang): rename PD_REGISTER_KERNEL to PD_REGISTER_KERNEL
# NOTE(chenweihang): now we don't recommend to use digit in kernel name
string(REGEX MATCH "(PT_REGISTER_KERNEL|PT_REGISTER_GENERAL_KERNEL)\\([ \t\r\n]*[a-z0-9_]*," first_registry "${kernel_impl}")
string(REGEX MATCH "(PD_REGISTER_KERNEL|PD_REGISTER_GENERAL_KERNEL)\\([ \t\r\n]*[a-z0-9_]*," first_registry "${kernel_impl}")
if (NOT first_registry STREQUAL "")
# parse the first kernel name
string(REPLACE "PT_REGISTER_KERNEL(" "" kernel_name "${first_registry}")
string(REPLACE "PT_REGISTER_GENERAL_KERNEL(" "" kernel_name "${kernel_name}")
string(REPLACE "PD_REGISTER_KERNEL(" "" kernel_name "${first_registry}")
string(REPLACE "PD_REGISTER_GENERAL_KERNEL(" "" kernel_name "${kernel_name}")
string(REPLACE "," "" kernel_name "${kernel_name}")
string(REGEX REPLACE "[ \t\r\n]+" "" kernel_name "${kernel_name}")
# append kernel declare into declarations.h
# TODO(chenweihang): default declare ALL_LAYOUT for each kernel
if (${kernel_path} MATCHES "./cpu\/")
file(APPEND ${kernel_declare_file} "PT_DECLARE_KERNEL(${kernel_name}, CPU, ALL_LAYOUT);\n")
file(APPEND ${kernel_declare_file} "PD_DECLARE_KERNEL(${kernel_name}, CPU, ALL_LAYOUT);\n")
elseif (${kernel_path} MATCHES "./gpu\/")
file(APPEND ${kernel_declare_file} "PT_DECLARE_KERNEL(${kernel_name}, GPU, ALL_LAYOUT);\n")
file(APPEND ${kernel_declare_file} "PD_DECLARE_KERNEL(${kernel_name}, GPU, ALL_LAYOUT);\n")
elseif (${kernel_path} MATCHES "./xpu\/")
file(APPEND ${kernel_declare_file} "PT_DECLARE_KERNEL(${kernel_name}, XPU, ALL_LAYOUT);\n")
file(APPEND ${kernel_declare_file} "PD_DECLARE_KERNEL(${kernel_name}, XPU, ALL_LAYOUT);\n")
else ()
# deal with device independent kernel, now we use CPU temporaary
file(APPEND ${kernel_declare_file} "PT_DECLARE_KERNEL(${kernel_name}, CPU, ALL_LAYOUT);\n")
file(APPEND ${kernel_declare_file} "PD_DECLARE_KERNEL(${kernel_name}, CPU, ALL_LAYOUT);\n")
endif()
endif()
endforeach()
......@@ -285,9 +285,9 @@ endfunction()
function(append_op_util_declare TARGET)
file(READ ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET} target_content)
string(REGEX MATCH "(PT_REGISTER_BASE_KERNEL_NAME|PT_REGISTER_ARG_MAPPING_FN)\\([ \t\r\n]*[a-z0-9_]*" util_registrar "${target_content}")
string(REPLACE "PT_REGISTER_ARG_MAPPING_FN" "PT_DECLARE_ARG_MAPPING_FN" util_declare "${util_registrar}")
string(REPLACE "PT_REGISTER_BASE_KERNEL_NAME" "PT_DECLARE_BASE_KERNEL_NAME" util_declare "${util_declare}")
string(REGEX MATCH "(PD_REGISTER_BASE_KERNEL_NAME|PD_REGISTER_ARG_MAPPING_FN)\\([ \t\r\n]*[a-z0-9_]*" util_registrar "${target_content}")
string(REPLACE "PD_REGISTER_ARG_MAPPING_FN" "PD_DECLARE_ARG_MAPPING_FN" util_declare "${util_registrar}")
string(REPLACE "PD_REGISTER_BASE_KERNEL_NAME" "PD_DECLARE_BASE_KERNEL_NAME" util_declare "${util_declare}")
string(APPEND util_declare ");\n")
file(APPEND ${op_utils_header} "${util_declare}")
endfunction()
......
......@@ -118,7 +118,7 @@ REGISTER_OPERATOR(infer_shape_utils_test,
paddle::framework::InferShapeUtilsTestOpMaker,
InferShapeUtilsTestInferShapeFunctor);
PT_REGISTER_KERNEL(infer_shape_utils_test, CPU, ALL_LAYOUT,
PD_REGISTER_KERNEL(infer_shape_utils_test, CPU, ALL_LAYOUT,
paddle::framework::InferShapeUtilsTestKernel, int) {}
TEST(InferShapeUtilsTest, ALL) {
......
......@@ -630,16 +630,16 @@ class PADDLE_API OpKernelInfoBuilder {
};
/////////////////////// Custom kernel register API /////////////////////////
// For inference: compile directly with framework
// Call after PD_REGISTER_KERNEL(...)
// Call after PD_REGISTER_BUILTIN_KERNEL(...)
void RegisterAllCustomKernel();
//////////////// Custom kernel register macro /////////////////////
// Refer to paddle/phi/core/kernel_registry.h, we can not use
// PT_REGISTER_KERNEL directly, common macros and functions are
// PD_REGISTER_KERNEL directly, common macros and functions are
// not ready for custom kernel now.
// Difference: custom_kernel stores all kernels' info into global
// g_custom_kernel_info_map before loading and registering into
// pten kernel management. Only providing PD_REGISTER_KERNEL which
// pten kernel management. Only providing PD_REGISTER_BUILTIN_KERNEL which
// supports 2 template arguments.
#define PD_BACKEND(arg__) phi::Backend::arg__
......@@ -666,11 +666,12 @@ void RegisterAllCustomKernel();
#define PD_ID __LINE__
#endif
#define PD_REGISTER_KERNEL(kernel_name, backend, layout, func, cpp_dtype, ...) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
_reg_custom_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"PD_REGISTER_KERNEL must be called in global namespace."); \
_PD_REGISTER_2TA_KERNEL( \
#define PD_REGISTER_BUILTIN_KERNEL( \
kernel_name, backend, layout, func, cpp_dtype, ...) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
_reg_custom_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"PD_REGISTER_BUILTIN_KERNEL must be called in global namespace."); \
_PD_REGISTER_2TA_KERNEL( \
kernel_name, backend, layout, func, cpp_dtype, ##__VA_ARGS__)
// WIN32 is not supported
......
......@@ -17,6 +17,6 @@ limitations under the License. */
// api symbols declare, remove in the future
#include "paddle/phi/api/lib/api_registry.h"
PT_DECLARE_API(Math);
PT_DECLARE_API(Utils);
PT_DECLARE_API(SparseApi);
PD_DECLARE_API(Math);
PD_DECLARE_API(Utils);
PD_DECLARE_API(SparseApi);
......@@ -36,10 +36,10 @@ namespace experimental {
*/
// use to declare symbol
#define PT_REGISTER_API(name) \
#define PD_REGISTER_API(name) \
PADDLE_API int RegisterSymbolsFor##name() { return 0; }
#define PT_DECLARE_API(name) \
#define PD_DECLARE_API(name) \
extern PADDLE_API int RegisterSymbolsFor##name(); \
UNUSED static int use_pten_api_##name = RegisterSymbolsFor##name()
......
......@@ -27,15 +27,15 @@ limitations under the License. */
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/infermeta/unary.h"
PT_DECLARE_KERNEL(copy, CPU, ALL_LAYOUT);
PT_DECLARE_KERNEL(split, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(copy, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(split, CPU, ALL_LAYOUT);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_DECLARE_KERNEL(copy, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(copy, GPU, ALL_LAYOUT);
#endif
#ifdef PADDLE_WITH_XPU
PT_DECLARE_KERNEL(copy, XPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(copy, XPU, ALL_LAYOUT);
#endif
namespace paddle {
......@@ -147,4 +147,4 @@ PADDLE_API std::vector<Tensor> split(const Tensor& x,
} // namespace experimental
} // namespace paddle
PT_REGISTER_API(Utils);
PD_REGISTER_API(Utils);
......@@ -86,7 +86,7 @@ OpKernelInfoBuilder& OpKernelInfoBuilder::ArgsDef(CustomKernelArgsDefFn func) {
/////////////////////// Op register API /////////////////////////
// For inference: compile directly with framework
// Call after PD_REGISTER_KERNEL(...)
// Call after PD_REGISTER_BUILTIN_KERNEL(...)
void RegisterAllCustomKernel() {
auto& op_kernel_info_map = OpKernelInfoMap::Instance();
framework::RegisterKernelWithMetaInfoMap(op_kernel_info_map);
......
......@@ -22,20 +22,20 @@ limitations under the License. */
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/infermeta/unary.h"
PT_DECLARE_KERNEL(dense_to_sparse_coo, CPU, ALL_LAYOUT);
PT_DECLARE_KERNEL(sparse_csr_to_coo, CPU, ALL_LAYOUT);
PT_DECLARE_KERNEL(dense_to_sparse_csr, CPU, ALL_LAYOUT);
PT_DECLARE_KERNEL(sparse_coo_to_csr, CPU, ALL_LAYOUT);
PT_DECLARE_KERNEL(sparse_coo_to_dense, CPU, ALL_LAYOUT);
PT_DECLARE_KERNEL(sparse_csr_to_dense, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(dense_to_sparse_coo, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_csr_to_coo, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(dense_to_sparse_csr, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_coo_to_csr, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_coo_to_dense, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_csr_to_dense, CPU, ALL_LAYOUT);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_DECLARE_KERNEL(dense_to_sparse_coo, GPU, ALL_LAYOUT);
PT_DECLARE_KERNEL(sparse_csr_to_coo, GPU, ALL_LAYOUT);
PT_DECLARE_KERNEL(dense_to_sparse_csr, GPU, ALL_LAYOUT);
PT_DECLARE_KERNEL(sparse_coo_to_csr, GPU, ALL_LAYOUT);
PT_DECLARE_KERNEL(sparse_coo_to_dense, GPU, ALL_LAYOUT);
PT_DECLARE_KERNEL(sparse_csr_to_dense, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(dense_to_sparse_coo, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_csr_to_coo, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(dense_to_sparse_csr, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_coo_to_csr, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_coo_to_dense, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_csr_to_dense, GPU, ALL_LAYOUT);
#endif
namespace paddle {
......@@ -228,4 +228,4 @@ PADDLE_API Tensor to_dense(const Tensor& x, Backend backend) {
} // namespace experimental
} // namespace paddle
PT_REGISTER_API(SparseApi);
PD_REGISTER_API(SparseApi);
......@@ -71,17 +71,17 @@ enum class Backend : uint8_t {
* Of course, we have also considered solving this problem through different
* named macros, for example, if we define
*
* PT_REGISTER_KERNEL_FOR_ALL_BACKEND
* PD_REGISTER_KERNEL_FOR_ALL_BACKEND
*
* Based on this design pattern, the dtype and layout also have the same
* requirements, this cause we need to define a series of macros
*
* PT_REGISTER_KERNEL_FOR_ALL_DTYPE
* PT_REGISTER_KERNEL_FOR_ALL_LAYOUT
* PT_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_LAYOUT
* PT_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_DTYPE
* PT_REGISTER_KERNEL_FOR_ALL_LAYOUT_AND_DTYPE
* PT_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_LAYOUT_AND_DTYPE
* PD_REGISTER_KERNEL_FOR_ALL_DTYPE
* PD_REGISTER_KERNEL_FOR_ALL_LAYOUT
* PD_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_LAYOUT
* PD_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_DTYPE
* PD_REGISTER_KERNEL_FOR_ALL_LAYOUT_AND_DTYPE
* PD_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_LAYOUT_AND_DTYPE
*
* It makes the system of registering macros more complicated, we think
* this is not a simple design, so we still adopt the design of providing
......
......@@ -164,34 +164,34 @@ struct ArgumentMappingFnRegistrar {
}
};
#define PT_REGISTER_BASE_KERNEL_NAME(op_type, base_kernel_name) \
#define PD_REGISTER_BASE_KERNEL_NAME(op_type, base_kernel_name) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_register_base_kernel_name_ns_check_##op_type, \
"PT_REGISTER_BASE_KERNEL_NAME must be called in global namespace."); \
PD_REGISTER_base_kernel_name_ns_check_##op_type, \
"PD_REGISTER_BASE_KERNEL_NAME must be called in global namespace."); \
static const ::phi::BaseKernelNameRegistrar \
__registrar_base_kernel_name_for_##op_type(#op_type, #base_kernel_name); \
int TouchBaseKernelNameSymbol_##op_type() { return 0; }
#define PT_DECLARE_BASE_KERNEL_NAME(op_type) \
#define PD_DECLARE_BASE_KERNEL_NAME(op_type) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_declare_ai_name_ns_check_##op_type, \
"PT_DECLARE_BASE_KERNEL_NAME must be called in global namespace."); \
PD_DECLARE_ai_name_ns_check_##op_type, \
"PD_DECLARE_BASE_KERNEL_NAME must be called in global namespace."); \
extern int TouchBaseKernelNameSymbol_##op_type(); \
UNUSED static int __declare_base_kernel_name_symbol_for_##op_type = \
TouchBaseKernelNameSymbol_##op_type()
#define PT_REGISTER_ARG_MAPPING_FN(op_type, arg_mapping_fn) \
#define PD_REGISTER_ARG_MAPPING_FN(op_type, arg_mapping_fn) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_register_arg_map_fn_ns_check_##op_type, \
"PT_REGISTER_ARG_MAPPING_FN must be called in global namespace."); \
PD_REGISTER_arg_map_fn_ns_check_##op_type, \
"PD_REGISTER_ARG_MAPPING_FN must be called in global namespace."); \
static const ::phi::ArgumentMappingFnRegistrar \
__registrar_arg_map_fn_for_##op_type(#op_type, arg_mapping_fn); \
int TouchArgumentMappingFnSymbol_##op_type() { return 0; }
#define PT_DECLARE_ARG_MAPPING_FN(op_type) \
#define PD_DECLARE_ARG_MAPPING_FN(op_type) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_declare_arg_map_fn_ns_check_##op_type, \
"PT_DECLARE_ARG_MAPPING_FN must be called in global namespace."); \
PD_DECLARE_arg_map_fn_ns_check_##op_type, \
"PD_DECLARE_ARG_MAPPING_FN must be called in global namespace."); \
extern int TouchArgumentMappingFnSymbol_##op_type(); \
UNUSED static int __declare_arg_map_fn_symbol_for_##op_type = \
TouchArgumentMappingFnSymbol_##op_type()
......
......@@ -282,10 +282,10 @@ struct InferMetaFnRegistrar {
}
};
#define PT_REGISTER_INFER_META_FN(kernel_name_prefix, variadic_infer_meta_fn) \
#define PD_REGISTER_INFER_META_FN(kernel_name_prefix, variadic_infer_meta_fn) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_register_infer_meta_fn_ns_check_##kernel_name_prefix, \
"PT_REGISTER_INFER_META_FN must be called in global namespace."); \
PD_REGISTER_infer_meta_fn_ns_check_##kernel_name_prefix, \
"PD_REGISTER_INFER_META_FN must be called in global namespace."); \
static const ::phi::InferMetaFnRegistrar \
__registrar_arg_map_fn_for_##kernel_name_prefix( \
#kernel_name_prefix, PT_INFER_META(variadic_infer_meta_fn))
......
......@@ -234,7 +234,7 @@ struct KernelRegistrar {
#define _PT_ARG_N(args) _PT_ARG_N_EXPAND args
#define _PT_RESQ_N() 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
/** PT_REGISTER_KERNEL
/** PD_REGISTER_KERNEL
*
* The most frequently used kernel registration macro, used for kernel
* registration with only data type as template parameter, and the function
......@@ -243,8 +243,8 @@ struct KernelRegistrar {
*
* Note: `2TA` means `2 template argument`
*/
#define PT_REGISTER_KERNEL(kernel_name, backend, layout, meta_kernel_fn, ...) \
_PT_REGISTER_KERNEL(::phi::RegType::BUILTIN, \
#define PD_REGISTER_KERNEL(kernel_name, backend, layout, meta_kernel_fn, ...) \
_PD_REGISTER_KERNEL(::phi::RegType::BUILTIN, \
kernel_name, \
backend, \
::phi::backend##Context, \
......@@ -252,12 +252,12 @@ struct KernelRegistrar {
meta_kernel_fn, \
__VA_ARGS__)
#define _PT_REGISTER_KERNEL( \
#define _PD_REGISTER_KERNEL( \
reg_type, kernel_name, backend, context, layout, meta_kernel_fn, ...) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_register_tp_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"PT_REGISTER_KERNEL must be called in global namespace."); \
PT_EXPAND(_PT_REGISTER_2TA_KERNEL(reg_type, \
PD_REGISTER_tp_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"PD_REGISTER_KERNEL must be called in global namespace."); \
PT_EXPAND(_PD_REGISTER_2TA_KERNEL(reg_type, \
kernel_name, \
backend, \
context, \
......@@ -266,7 +266,7 @@ struct KernelRegistrar {
__VA_ARGS__))
#ifndef _WIN32
#define _PT_REGISTER_2TA_KERNEL( \
#define _PD_REGISTER_2TA_KERNEL( \
reg_type, kernel_name, backend, context, layout, meta_kernel_fn, ...) \
PT_KERNEL_INSTANTIATION(meta_kernel_fn, backend, context, __VA_ARGS__); \
static void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \
......@@ -295,7 +295,7 @@ struct KernelRegistrar {
*
* And msvc can work without template instantiation
*/
#define _PT_REGISTER_2TA_KERNEL( \
#define _PD_REGISTER_2TA_KERNEL( \
reg_type, kernel_name, backend, context, layout, meta_kernel_fn, ...) \
static void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel); \
......@@ -909,27 +909,27 @@ struct KernelRegistrar {
args_def_fn, \
meta_kernel_fn, \
__VA_ARGS__))
/** PT_REGISTER_GENERAL_KERNEL
/** PD_REGISTER_GENERAL_KERNEL
*
* Basic Kernel register marco, used to register a instantiated kernel function
* with one template argument.
*/
#define PT_REGISTER_GENERAL_KERNEL( \
#define PD_REGISTER_GENERAL_KERNEL( \
kernel_name, backend, layout, kernel_fn, dtype) \
_PT_REGISTER_GENERAL_KERNEL( \
_PD_REGISTER_GENERAL_KERNEL( \
::phi::RegType::BUILTIN, kernel_name, backend, layout, kernel_fn, dtype)
#define _PT_REGISTER_GENERAL_KERNEL( \
#define _PD_REGISTER_GENERAL_KERNEL( \
reg_type, kernel_name, backend, layout, kernel_fn, dtype) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_register_no_t_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"PT_REGISTER_NO_TEMPLATE_KERNEL must be called in global namespace."); \
__PT_REGISTER_GENERAL_KERNEL( \
PD_REGISTER_no_t_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"PD_REGISTER_NO_TEMPLATE_KERNEL must be called in global namespace."); \
__PD_REGISTER_GENERAL_KERNEL( \
reg_type, kernel_name, backend, layout, kernel_fn, dtype)
#ifndef _WIN32
#define __PT_REGISTER_GENERAL_KERNEL( \
#define __PD_REGISTER_GENERAL_KERNEL( \
reg_type, kernel_name, backend, layout, kernel_fn, dtype) \
template decltype(kernel_fn) kernel_fn; \
static void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \
......@@ -950,7 +950,7 @@ struct KernelRegistrar {
void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel)
#else
#define __PT_REGISTER_GENERAL_KERNEL( \
#define __PD_REGISTER_GENERAL_KERNEL( \
reg_type, kernel_name, backend, layout, kernel_fn, dtype) \
static void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel); \
......@@ -971,42 +971,43 @@ struct KernelRegistrar {
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel)
#endif
/** PT_DECLARE_KERNEL
/** PD_DECLARE_KERNEL
*
* Used to export the symbols of the file where the kernel is located,
* to avoid being removed by linker
*/
#define PT_DECLARE_KERNEL(kernel_name, backend, layout) \
#define PD_DECLARE_KERNEL(kernel_name, backend, layout) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
pt_declare_tp_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"PT_DECLARE_KERNEL must be called in global namespace."); \
PD_DECLARE_tp_kernel_ns_check_##kernel_name##_##backend##_##layout, \
"PD_DECLARE_KERNEL must be called in global namespace."); \
extern int TouchKernelSymbolFor_##kernel_name##_##backend##_##layout(); \
UNUSED static int \
__declare_kernel_symbol_for_##kernel_name##_##backend##_##layout = \
TouchKernelSymbolFor_##kernel_name##_##backend##_##layout()
/** PD_REGISTER_KERNEL
/** PD_REGISTER_BUILTIN_KERNEL
*
* Used to register kernels for built-in backends.
* Support CPU GPU XPU.
*/
#define PD_REGISTER_KERNEL(kernel_name, backend, layout, meta_kernel_fn, ...) \
_PT_REGISTER_KERNEL(::phi::RegType::PLUGIN, \
kernel_name, \
backend, \
::phi::backend##Context, \
layout, \
meta_kernel_fn, \
#define PD_REGISTER_BUILTIN_KERNEL( \
kernel_name, backend, layout, meta_kernel_fn, ...) \
_PD_REGISTER_KERNEL(::phi::RegType::PLUGIN, \
kernel_name, \
backend, \
::phi::backend##Context, \
layout, \
meta_kernel_fn, \
__VA_ARGS__)
/** PD_REGISTER_CUSTOM_KERNEL
/** PD_REGISTER_PLUGIN_KERNEL
*
* Used to register kernels for plug-in backends.
* Support user-defined backend such as 'Ascend910'.
*/
#define PD_REGISTER_CUSTOM_KERNEL( \
#define PD_REGISTER_PLUGIN_KERNEL( \
kernel_name, backend, layout, meta_kernel_fn, ...) \
_PT_REGISTER_KERNEL(::phi::RegType::PLUGIN, \
_PD_REGISTER_KERNEL(::phi::RegType::PLUGIN, \
kernel_name, \
backend, \
::phi::CustomContext, \
......
......@@ -539,5 +539,5 @@ void TraceInferMeta(
} // namespace phi
PT_REGISTER_INFER_META_FN(copy_to, phi::CopyToInferMeta);
PT_REGISTER_INFER_META_FN(split, phi::SplitInferMeta);
PD_REGISTER_INFER_META_FN(copy_to, phi::CopyToInferMeta);
PD_REGISTER_INFER_META_FN(split, phi::SplitInferMeta);
......@@ -19,7 +19,7 @@
using phi::dtype::complex;
PT_REGISTER_KERNEL(abs_grad,
PD_REGISTER_KERNEL(abs_grad,
CPU,
ALL_LAYOUT,
phi::AbsGradKernel,
......@@ -29,7 +29,7 @@ PT_REGISTER_KERNEL(abs_grad,
int64_t,
complex<float>,
complex<double>) {}
PT_REGISTER_KERNEL(abs_double_grad,
PD_REGISTER_KERNEL(abs_double_grad,
CPU,
ALL_LAYOUT,
phi::AbsDoubleGradKernel,
......
......@@ -36,7 +36,7 @@ void AbsKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
} // namespace phi
PT_REGISTER_KERNEL(abs,
PD_REGISTER_KERNEL(abs,
CPU,
ALL_LAYOUT,
phi::AbsKernel,
......
......@@ -51,5 +51,5 @@ void BernoulliKernel(const Context& ctx,
} // namespace phi
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
bernoulli, CPU, ALL_LAYOUT, phi::BernoulliKernel, float, double) {}
......@@ -58,7 +58,7 @@ void CastKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(cast,
PD_REGISTER_KERNEL(cast,
CPU,
ALL_LAYOUT,
phi::CastKernel,
......
......@@ -21,7 +21,7 @@
// See Note [ Why still include the fluid headers? ]
#include "paddle/phi/common/complex.h"
PT_REGISTER_KERNEL(conj,
PD_REGISTER_KERNEL(conj,
CPU,
ALL_LAYOUT,
phi::ConjKernel,
......
......@@ -110,7 +110,7 @@ void ConcatKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(concat,
PD_REGISTER_KERNEL(concat,
CPU,
ALL_LAYOUT,
phi::ConcatKernel,
......
......@@ -56,5 +56,5 @@ void Copy(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_GENERAL_KERNEL(
PD_REGISTER_GENERAL_KERNEL(
copy, CPU, ALL_LAYOUT, phi::Copy<phi::CPUContext>, ALL_DTYPE) {}
......@@ -82,7 +82,7 @@ void DiagonalGradKernel(const Context& dev_ctx,
}
}
} // namespace phi
PT_REGISTER_KERNEL(diagonal_grad,
PD_REGISTER_KERNEL(diagonal_grad,
CPU,
ALL_LAYOUT,
phi::DiagonalGradKernel,
......
......@@ -79,7 +79,7 @@ void DiagonalKernel(const Context& dev_ctx,
}
}
} // namespace phi
PT_REGISTER_KERNEL(diagonal,
PD_REGISTER_KERNEL(diagonal,
CPU,
ALL_LAYOUT,
phi::DiagonalKernel,
......
......@@ -19,5 +19,5 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/digamma_grad_kernel_impl.h"
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
digamma_grad, CPU, ALL_LAYOUT, phi::DigammaGradKernel, float, double) {}
......@@ -19,5 +19,5 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/digamma_kernel_impl.h"
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
digamma, CPU, ALL_LAYOUT, phi::DigammaKernel, float, double) {}
......@@ -20,7 +20,7 @@
#include "paddle/phi/common/complex.h"
PT_REGISTER_KERNEL(dot_grad,
PD_REGISTER_KERNEL(dot_grad,
CPU,
ALL_LAYOUT,
phi::DotGradKernel,
......
......@@ -49,7 +49,7 @@ void DotKernel(const Context& dev_ctx,
using complex64 = ::phi::dtype::complex<float>;
using complex128 = ::phi::dtype::complex<double>;
PT_REGISTER_KERNEL(dot,
PD_REGISTER_KERNEL(dot,
CPU,
ALL_LAYOUT,
phi::DotKernel,
......
......@@ -125,7 +125,7 @@ void SubtractDoubleGradKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(add_grad,
PD_REGISTER_KERNEL(add_grad,
CPU,
ALL_LAYOUT,
phi::AddGradKernel,
......@@ -137,7 +137,7 @@ PT_REGISTER_KERNEL(add_grad,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(add_double_grad,
PD_REGISTER_KERNEL(add_double_grad,
CPU,
ALL_LAYOUT,
phi::AddDoubleGradKernel,
......@@ -149,7 +149,7 @@ PT_REGISTER_KERNEL(add_double_grad,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(add_triple_grad,
PD_REGISTER_KERNEL(add_triple_grad,
CPU,
ALL_LAYOUT,
phi::AddTripleGradKernel,
......@@ -161,7 +161,7 @@ PT_REGISTER_KERNEL(add_triple_grad,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(subtract_grad,
PD_REGISTER_KERNEL(subtract_grad,
CPU,
ALL_LAYOUT,
phi::SubtractGradKernel,
......@@ -173,7 +173,7 @@ PT_REGISTER_KERNEL(subtract_grad,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(subtract_double_grad,
PD_REGISTER_KERNEL(subtract_double_grad,
CPU,
ALL_LAYOUT,
phi::SubtractDoubleGradKernel,
......
......@@ -19,7 +19,7 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/expand_grad_kernel_impl.h"
PT_REGISTER_KERNEL(expand_grad,
PD_REGISTER_KERNEL(expand_grad,
CPU,
ALL_LAYOUT,
phi::ExpandGradKernel,
......
......@@ -19,7 +19,7 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/expand_kernel_impl.h"
PT_REGISTER_KERNEL(expand,
PD_REGISTER_KERNEL(expand,
CPU,
ALL_LAYOUT,
phi::ExpandKernel,
......
......@@ -73,7 +73,7 @@ void FullLikeKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(full,
PD_REGISTER_KERNEL(full,
CPU,
ALL_LAYOUT,
phi::FullKernel,
......@@ -89,7 +89,7 @@ PT_REGISTER_KERNEL(full,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(full_like,
PD_REGISTER_KERNEL(full_like,
CPU,
ALL_LAYOUT,
phi::FullLikeKernel,
......
......@@ -77,7 +77,7 @@ void HistogramKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(histogram,
PD_REGISTER_KERNEL(histogram,
CPU,
ALL_LAYOUT,
phi::HistogramKernel,
......
......@@ -17,6 +17,6 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h"
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
huber_loss_grad, CPU, ALL_LAYOUT, phi::HuberLossGradKernel, float, double) {
}
......@@ -17,5 +17,5 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/huber_loss_kernel_impl.h"
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
huber_loss, CPU, ALL_LAYOUT, phi::HuberLossKernel, float, double) {}
......@@ -17,5 +17,5 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/lerp_grad_kernel_impl.h"
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
lerp_grad, CPU, ALL_LAYOUT, phi::LerpGradKernel, float, double) {}
......@@ -17,4 +17,4 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/lerp_kernel_impl.h"
PT_REGISTER_KERNEL(lerp, CPU, ALL_LAYOUT, phi::LerpKernel, float, double) {}
PD_REGISTER_KERNEL(lerp, CPU, ALL_LAYOUT, phi::LerpKernel, float, double) {}
......@@ -43,7 +43,7 @@ void MaskedSelectGradKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(masked_select_grad,
PD_REGISTER_KERNEL(masked_select_grad,
CPU,
ALL_LAYOUT,
phi::MaskedSelectGradKernel,
......
......@@ -61,7 +61,7 @@ void MaskedSelectKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(masked_select,
PD_REGISTER_KERNEL(masked_select,
CPU,
ALL_LAYOUT,
phi::MaskedSelectKernel,
......
......@@ -118,7 +118,7 @@ using complex128 = ::phi::dtype::complex<double>;
// NOTE(chenweihang): using bfloat16 will cause redefine with xpu bfloat16
// using bfloat16 = ::phi::dtype::bfloat16;
PT_REGISTER_KERNEL(add_raw,
PD_REGISTER_KERNEL(add_raw,
CPU,
ALL_LAYOUT,
phi::AddRawKernel,
......@@ -129,7 +129,7 @@ PT_REGISTER_KERNEL(add_raw,
int64_t,
complex64,
complex128) {}
PT_REGISTER_KERNEL(subtract_raw,
PD_REGISTER_KERNEL(subtract_raw,
CPU,
ALL_LAYOUT,
phi::SubtractRawKernel,
......@@ -140,7 +140,7 @@ PT_REGISTER_KERNEL(subtract_raw,
int64_t,
complex64,
complex128) {}
PT_REGISTER_KERNEL(divide_raw,
PD_REGISTER_KERNEL(divide_raw,
CPU,
ALL_LAYOUT,
phi::DivideRawKernel,
......@@ -150,7 +150,7 @@ PT_REGISTER_KERNEL(divide_raw,
int64_t,
complex64,
complex128) {}
PT_REGISTER_KERNEL(multiply_raw,
PD_REGISTER_KERNEL(multiply_raw,
CPU,
ALL_LAYOUT,
phi::MultiplyRawKernel,
......@@ -161,7 +161,7 @@ PT_REGISTER_KERNEL(multiply_raw,
bool,
complex64,
complex128) {}
PT_REGISTER_KERNEL(sum_raw,
PD_REGISTER_KERNEL(sum_raw,
CPU,
ALL_LAYOUT,
phi::SumRawKernel,
......@@ -176,5 +176,5 @@ PT_REGISTER_KERNEL(sum_raw,
complex128) {
kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::UNDEFINED);
}
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
mean_raw, CPU, ALL_LAYOUT, phi::MeanRawKernel, float, double, bool) {}
......@@ -19,7 +19,7 @@ limitations under the License. */
#include "paddle/phi/kernels/impl/matmul_grad_kernel_impl.h"
PT_REGISTER_KERNEL(matmul_grad,
PD_REGISTER_KERNEL(matmul_grad,
CPU,
ALL_LAYOUT,
phi::MatmulGradKernel,
......@@ -28,7 +28,7 @@ PT_REGISTER_KERNEL(matmul_grad,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(matmul_double_grad,
PD_REGISTER_KERNEL(matmul_double_grad,
CPU,
ALL_LAYOUT,
phi::MatmulDoubleGradKernel,
......@@ -37,7 +37,7 @@ PT_REGISTER_KERNEL(matmul_double_grad,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(matmul_triple_grad,
PD_REGISTER_KERNEL(matmul_triple_grad,
CPU,
ALL_LAYOUT,
phi::MatmulTripleGradKernel,
......
......@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/phi/common/complex.h"
#include "paddle/phi/kernels/impl/matmul_kernel_impl.h"
PT_REGISTER_KERNEL(matmul,
PD_REGISTER_KERNEL(matmul,
CPU,
ALL_LAYOUT,
phi::MatmulKernel,
......
......@@ -83,5 +83,5 @@ void NormGradKernel(const Context& ctx,
} // namespace phi
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
norm_grad, CPU, ALL_LAYOUT, phi::NormGradKernel, float, double) {}
......@@ -76,4 +76,4 @@ void NormKernel(const Context& ctx,
} // namespace phi
PT_REGISTER_KERNEL(norm, CPU, ALL_LAYOUT, phi::NormKernel, float, double) {}
PD_REGISTER_KERNEL(norm, CPU, ALL_LAYOUT, phi::NormKernel, float, double) {}
......@@ -51,7 +51,7 @@ void ScaleKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(scale,
PD_REGISTER_KERNEL(scale,
CPU,
ALL_LAYOUT,
phi::ScaleKernel,
......
......@@ -21,4 +21,4 @@ limitations under the License. */
// See Note [ Why still include the fluid headers? ]
#include "paddle/phi/common/bfloat16.h"
PT_REGISTER_KERNEL(sign, CPU, ALL_LAYOUT, phi::SignKernel, float, double) {}
PD_REGISTER_KERNEL(sign, CPU, ALL_LAYOUT, phi::SignKernel, float, double) {}
......@@ -60,7 +60,7 @@ void SplitKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(split,
PD_REGISTER_KERNEL(split,
CPU,
ALL_LAYOUT,
phi::SplitKernel,
......
......@@ -18,7 +18,7 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/trace_grad_kernel_impl.h"
PT_REGISTER_KERNEL(trace_grad,
PD_REGISTER_KERNEL(trace_grad,
CPU,
ALL_LAYOUT,
phi::TraceGradKernel,
......
......@@ -45,7 +45,7 @@ void TraceKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(trace,
PD_REGISTER_KERNEL(trace,
CPU,
ALL_LAYOUT,
phi::TraceKernel,
......
......@@ -30,7 +30,7 @@ void TruncGradKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(trunc_grad,
PD_REGISTER_KERNEL(trunc_grad,
CPU,
ALL_LAYOUT,
phi::TruncGradKernel,
......
......@@ -35,5 +35,5 @@ void TruncKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
trunc, CPU, ALL_LAYOUT, phi::TruncKernel, float, double, int, int64_t) {}
......@@ -38,7 +38,7 @@ void EmptyLikeKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(empty,
PD_REGISTER_KERNEL(empty,
CPU,
ALL_LAYOUT,
phi::EmptyKernel,
......@@ -54,7 +54,7 @@ PT_REGISTER_KERNEL(empty,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(empty_like,
PD_REGISTER_KERNEL(empty_like,
CPU,
ALL_LAYOUT,
phi::EmptyLikeKernel,
......@@ -71,7 +71,7 @@ PT_REGISTER_KERNEL(empty_like,
phi::dtype::complex<double>) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_REGISTER_KERNEL(empty,
PD_REGISTER_KERNEL(empty,
GPU,
ALL_LAYOUT,
phi::EmptyKernel,
......@@ -86,7 +86,7 @@ PT_REGISTER_KERNEL(empty,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(empty_like,
PD_REGISTER_KERNEL(empty_like,
GPU,
ALL_LAYOUT,
phi::EmptyLikeKernel,
......
......@@ -32,7 +32,7 @@ void FlattenGradKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(flatten_grad,
PD_REGISTER_KERNEL(flatten_grad,
CPU,
ALL_LAYOUT,
phi::FlattenGradKernel,
......@@ -44,7 +44,7 @@ PT_REGISTER_KERNEL(flatten_grad,
int64_t) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_REGISTER_KERNEL(flatten_grad,
PD_REGISTER_KERNEL(flatten_grad,
GPU,
ALL_LAYOUT,
phi::FlattenGradKernel,
......@@ -59,7 +59,7 @@ PT_REGISTER_KERNEL(flatten_grad,
#endif
#ifdef PADDLE_WITH_XPU
PT_REGISTER_KERNEL(flatten_grad,
PD_REGISTER_KERNEL(flatten_grad,
XPU,
ALL_LAYOUT,
phi::FlattenGradKernel,
......
......@@ -48,7 +48,7 @@ void FlattenWithXShape(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(flatten,
PD_REGISTER_KERNEL(flatten,
CPU,
ALL_LAYOUT,
phi::FlattenKernel,
......@@ -60,7 +60,7 @@ PT_REGISTER_KERNEL(flatten,
int,
int64_t) {}
PT_REGISTER_KERNEL(flatten_with_xshape,
PD_REGISTER_KERNEL(flatten_with_xshape,
CPU,
ALL_LAYOUT,
phi::FlattenWithXShape,
......@@ -73,7 +73,7 @@ PT_REGISTER_KERNEL(flatten_with_xshape,
int64_t) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_REGISTER_KERNEL(flatten,
PD_REGISTER_KERNEL(flatten,
GPU,
ALL_LAYOUT,
phi::FlattenKernel,
......@@ -86,7 +86,7 @@ PT_REGISTER_KERNEL(flatten,
int,
int64_t) {}
PT_REGISTER_KERNEL(flatten_with_xshape,
PD_REGISTER_KERNEL(flatten_with_xshape,
GPU,
ALL_LAYOUT,
phi::FlattenWithXShape,
......@@ -101,7 +101,7 @@ PT_REGISTER_KERNEL(flatten_with_xshape,
#endif
#ifdef PADDLE_WITH_XPU
PT_REGISTER_KERNEL(flatten,
PD_REGISTER_KERNEL(flatten,
XPU,
ALL_LAYOUT,
phi::FlattenKernel,
......@@ -112,7 +112,7 @@ PT_REGISTER_KERNEL(flatten,
int,
int64_t) {}
PT_REGISTER_KERNEL(flatten_with_xshape,
PD_REGISTER_KERNEL(flatten_with_xshape,
XPU,
ALL_LAYOUT,
phi::FlattenWithXShape,
......
......@@ -20,7 +20,7 @@
using phi::dtype::complex;
PT_REGISTER_KERNEL(abs_grad,
PD_REGISTER_KERNEL(abs_grad,
GPU,
ALL_LAYOUT,
phi::AbsGradKernel,
......@@ -31,7 +31,7 @@ PT_REGISTER_KERNEL(abs_grad,
phi::dtype::float16,
complex<float>,
complex<double>) {}
PT_REGISTER_KERNEL(abs_double_grad,
PD_REGISTER_KERNEL(abs_double_grad,
GPU,
ALL_LAYOUT,
phi::AbsDoubleGradKernel,
......
......@@ -52,7 +52,7 @@ void AbsKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
} // namespace phi
PT_REGISTER_KERNEL(abs,
PD_REGISTER_KERNEL(abs,
GPU,
ALL_LAYOUT,
phi::AbsKernel,
......
......@@ -73,5 +73,5 @@ void BernoulliKernel(const Context& ctx,
} // namespace phi
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
bernoulli, GPU, ALL_LAYOUT, phi::BernoulliKernel, float, double) {}
......@@ -61,7 +61,7 @@ void CastKernel(const Context& dev_ctx,
} // namespace phi
#define PTEN_REGISTER_CAST_CUDA_BASE_TYPE(op_name, ...) \
PT_REGISTER_KERNEL(cast, \
PD_REGISTER_KERNEL(cast, \
GPU, \
ALL_LAYOUT, \
phi::CastKernel, \
......
......@@ -21,7 +21,7 @@
// See Note [ Why still include the fluid headers? ]
#include "paddle/phi/common/complex.h"
PT_REGISTER_KERNEL(conj,
PD_REGISTER_KERNEL(conj,
GPU,
ALL_LAYOUT,
phi::ConjKernel,
......
......@@ -110,7 +110,7 @@ void ConcatKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(concat,
PD_REGISTER_KERNEL(concat,
GPU,
ALL_LAYOUT,
phi::ConcatKernel,
......
......@@ -207,5 +207,5 @@ void Copy(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_GENERAL_KERNEL(
PD_REGISTER_GENERAL_KERNEL(
copy, GPU, ALL_LAYOUT, phi::Copy<phi::GPUContext>, ALL_DTYPE) {}
......@@ -158,7 +158,7 @@ void DiagonalGradKernel(const Context& dev_ctx,
}
}
} // namespace phi
PT_REGISTER_KERNEL(diagonal_grad,
PD_REGISTER_KERNEL(diagonal_grad,
GPU,
ALL_LAYOUT,
phi::DiagonalGradKernel,
......
......@@ -154,7 +154,7 @@ void DiagonalKernel(const Context& dev_ctx,
}
} // namespace phi
PT_REGISTER_KERNEL(diagonal,
PD_REGISTER_KERNEL(diagonal,
GPU,
ALL_LAYOUT,
phi::DiagonalKernel,
......
......@@ -18,5 +18,5 @@
#include "paddle/phi/kernels/digamma_grad_kernel.h"
#include "paddle/phi/kernels/impl/digamma_grad_kernel_impl.h"
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
digamma_grad, GPU, ALL_LAYOUT, phi::DigammaGradKernel, float, double) {}
......@@ -19,5 +19,5 @@
#include "paddle/phi/kernels/digamma_kernel.h"
#include "paddle/phi/kernels/impl/digamma_kernel_impl.h"
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
digamma, GPU, ALL_LAYOUT, phi::DigammaKernel, float, double) {}
......@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/phi/common/complex.h"
PT_REGISTER_KERNEL(dot_grad,
PD_REGISTER_KERNEL(dot_grad,
GPU,
ALL_LAYOUT,
phi::DotGradKernel,
......
......@@ -52,7 +52,7 @@ void DotKernel(const Context& dev_ctx,
using complex64 = ::phi::dtype::complex<float>;
using complex128 = ::phi::dtype::complex<double>;
PT_REGISTER_KERNEL(dot,
PD_REGISTER_KERNEL(dot,
GPU,
ALL_LAYOUT,
phi::DotKernel,
......
......@@ -119,7 +119,7 @@ void SubtractDoubleGradKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(add_grad,
PD_REGISTER_KERNEL(add_grad,
GPU,
ALL_LAYOUT,
phi::AddGradKernel,
......@@ -131,7 +131,7 @@ PT_REGISTER_KERNEL(add_grad,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(add_double_grad,
PD_REGISTER_KERNEL(add_double_grad,
GPU,
ALL_LAYOUT,
phi::AddDoubleGradKernel,
......@@ -143,7 +143,7 @@ PT_REGISTER_KERNEL(add_double_grad,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(add_triple_grad,
PD_REGISTER_KERNEL(add_triple_grad,
GPU,
ALL_LAYOUT,
phi::AddTripleGradKernel,
......@@ -155,7 +155,7 @@ PT_REGISTER_KERNEL(add_triple_grad,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(subtract_grad,
PD_REGISTER_KERNEL(subtract_grad,
GPU,
ALL_LAYOUT,
phi::SubtractGradKernel,
......@@ -167,7 +167,7 @@ PT_REGISTER_KERNEL(subtract_grad,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(subtract_double_grad,
PD_REGISTER_KERNEL(subtract_double_grad,
GPU,
ALL_LAYOUT,
phi::SubtractDoubleGradKernel,
......
......@@ -18,7 +18,7 @@
#include "paddle/phi/kernels/expand_grad_kernel.h"
#include "paddle/phi/kernels/impl/expand_grad_kernel_impl.h"
PT_REGISTER_KERNEL(expand_grad,
PD_REGISTER_KERNEL(expand_grad,
GPU,
ALL_LAYOUT,
phi::ExpandGradKernel,
......
......@@ -19,7 +19,7 @@
#include "paddle/phi/kernels/expand_kernel.h"
#include "paddle/phi/kernels/impl/expand_kernel_impl.h"
PT_REGISTER_KERNEL(expand,
PD_REGISTER_KERNEL(expand,
GPU,
ALL_LAYOUT,
phi::ExpandKernel,
......
......@@ -98,7 +98,7 @@ void FullLikeKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(full,
PD_REGISTER_KERNEL(full,
GPU,
ALL_LAYOUT,
phi::FullKernel,
......@@ -113,7 +113,7 @@ PT_REGISTER_KERNEL(full,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(full_like,
PD_REGISTER_KERNEL(full_like,
GPU,
ALL_LAYOUT,
phi::FullLikeKernel,
......
......@@ -149,7 +149,7 @@ void HistogramKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(histogram,
PD_REGISTER_KERNEL(histogram,
GPU,
ALL_LAYOUT,
phi::HistogramKernel,
......
......@@ -17,6 +17,6 @@
#include "paddle/phi/kernels/huber_loss_grad_kernel.h"
#include "paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h"
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
huber_loss_grad, GPU, ALL_LAYOUT, phi::HuberLossGradKernel, float, double) {
}
......@@ -17,5 +17,5 @@
#include "paddle/phi/kernels/huber_loss_kernel.h"
#include "paddle/phi/kernels/impl/huber_loss_kernel_impl.h"
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
huber_loss, GPU, ALL_LAYOUT, phi::HuberLossKernel, float, double) {}
......@@ -17,5 +17,5 @@
#include "paddle/phi/kernels/impl/lerp_grad_kernel_impl.h"
#include "paddle/phi/kernels/lerp_grad_kernel.h"
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
lerp_grad, GPU, ALL_LAYOUT, phi::LerpGradKernel, float, double) {}
......@@ -17,4 +17,4 @@
#include "paddle/phi/kernels/impl/lerp_kernel_impl.h"
#include "paddle/phi/kernels/lerp_kernel.h"
PT_REGISTER_KERNEL(lerp, GPU, ALL_LAYOUT, phi::LerpKernel, float, double) {}
PD_REGISTER_KERNEL(lerp, GPU, ALL_LAYOUT, phi::LerpKernel, float, double) {}
......@@ -96,7 +96,7 @@ void MaskedSelectGradKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(masked_select_grad,
PD_REGISTER_KERNEL(masked_select_grad,
GPU,
ALL_LAYOUT,
phi::MaskedSelectGradKernel,
......
......@@ -108,7 +108,7 @@ void MaskedSelectKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(masked_select,
PD_REGISTER_KERNEL(masked_select,
GPU,
ALL_LAYOUT,
phi::MaskedSelectKernel,
......
......@@ -95,7 +95,7 @@ using float16 = phi::dtype::float16;
using complex64 = ::phi::dtype::complex<float>;
using complex128 = ::phi::dtype::complex<double>;
PT_REGISTER_KERNEL(add_raw,
PD_REGISTER_KERNEL(add_raw,
GPU,
ALL_LAYOUT,
phi::AddRawKernel,
......@@ -107,7 +107,7 @@ PT_REGISTER_KERNEL(add_raw,
float16,
complex64,
complex128) {}
PT_REGISTER_KERNEL(subtract_raw,
PD_REGISTER_KERNEL(subtract_raw,
GPU,
ALL_LAYOUT,
phi::SubtractRawKernel,
......@@ -119,7 +119,7 @@ PT_REGISTER_KERNEL(subtract_raw,
float16,
complex64,
complex128) {}
PT_REGISTER_KERNEL(divide_raw,
PD_REGISTER_KERNEL(divide_raw,
GPU,
ALL_LAYOUT,
phi::DivideRawKernel,
......@@ -130,7 +130,7 @@ PT_REGISTER_KERNEL(divide_raw,
float16,
complex64,
complex128) {}
PT_REGISTER_KERNEL(multiply_raw,
PD_REGISTER_KERNEL(multiply_raw,
GPU,
ALL_LAYOUT,
phi::MultiplyRawKernel,
......@@ -142,7 +142,7 @@ PT_REGISTER_KERNEL(multiply_raw,
float16,
complex64,
complex128) {}
PT_REGISTER_KERNEL(sum_raw,
PD_REGISTER_KERNEL(sum_raw,
GPU,
ALL_LAYOUT,
phi::SumRawKernel,
......@@ -158,7 +158,7 @@ PT_REGISTER_KERNEL(sum_raw,
kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::UNDEFINED);
}
PT_REGISTER_KERNEL(mean_raw,
PD_REGISTER_KERNEL(mean_raw,
GPU,
ALL_LAYOUT,
phi::MeanRawKernel,
......
......@@ -19,7 +19,7 @@ limitations under the License. */
#include "paddle/phi/kernels/impl/matmul_grad_kernel_impl.h"
PT_REGISTER_KERNEL(matmul_grad,
PD_REGISTER_KERNEL(matmul_grad,
GPU,
ALL_LAYOUT,
phi::MatmulGradKernel,
......@@ -30,7 +30,7 @@ PT_REGISTER_KERNEL(matmul_grad,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(matmul_double_grad,
PD_REGISTER_KERNEL(matmul_double_grad,
GPU,
ALL_LAYOUT,
phi::MatmulDoubleGradKernel,
......@@ -40,7 +40,7 @@ PT_REGISTER_KERNEL(matmul_double_grad,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(matmul_triple_grad,
PD_REGISTER_KERNEL(matmul_triple_grad,
GPU,
ALL_LAYOUT,
phi::MatmulTripleGradKernel,
......
......@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/phi/common/complex.h"
#include "paddle/phi/kernels/impl/matmul_kernel_impl.h"
PT_REGISTER_KERNEL(matmul,
PD_REGISTER_KERNEL(matmul,
GPU,
ALL_LAYOUT,
phi::MatmulKernel,
......
......@@ -111,7 +111,7 @@ void NormGradKernel(const Context& ctx,
} // namespace phi
PT_REGISTER_KERNEL(norm_grad,
PD_REGISTER_KERNEL(norm_grad,
GPU,
ALL_LAYOUT,
phi::NormGradKernel,
......
......@@ -124,7 +124,7 @@ void NormKernel(const Context& ctx,
} // namespace phi
PT_REGISTER_KERNEL(norm,
PD_REGISTER_KERNEL(norm,
GPU,
ALL_LAYOUT,
phi::NormKernel,
......
......@@ -63,7 +63,7 @@ void ScaleKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(scale,
PD_REGISTER_KERNEL(scale,
GPU,
ALL_LAYOUT,
phi::ScaleKernel,
......
......@@ -23,5 +23,5 @@ limitations under the License. */
using float16 = phi::dtype::float16;
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
sign, GPU, ALL_LAYOUT, phi::SignKernel, float, double, float16) {}
......@@ -59,7 +59,7 @@ void SplitKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(split,
PD_REGISTER_KERNEL(split,
GPU,
ALL_LAYOUT,
phi::SplitKernel,
......
......@@ -18,7 +18,7 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/trace_grad_kernel_impl.h"
PT_REGISTER_KERNEL(trace_grad,
PD_REGISTER_KERNEL(trace_grad,
GPU,
ALL_LAYOUT,
phi::TraceGradKernel,
......
......@@ -44,7 +44,7 @@ void TraceKernel(const Context& ctx,
} // namespace phi
PT_REGISTER_KERNEL(trace,
PD_REGISTER_KERNEL(trace,
GPU,
ALL_LAYOUT,
phi::TraceKernel,
......
......@@ -44,7 +44,7 @@ void TruncGradKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(trunc_grad,
PD_REGISTER_KERNEL(trunc_grad,
GPU,
ALL_LAYOUT,
phi::TruncGradKernel,
......
......@@ -77,5 +77,5 @@ void TruncKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
trunc, GPU, ALL_LAYOUT, phi::TruncKernel, float, double, int, int64_t) {}
......@@ -81,10 +81,10 @@ void MultiplyKernel(const Context& dev_ctx,
using complex64 = ::phi::dtype::complex<float>;
using complex128 = ::phi::dtype::complex<double>;
PT_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
mean, CPU, ALL_LAYOUT, phi::MeanKernel, float, double, bool) {}
PT_REGISTER_KERNEL(sum,
PD_REGISTER_KERNEL(sum,
CPU,
ALL_LAYOUT,
phi::SumKernel,
......@@ -100,7 +100,7 @@ PT_REGISTER_KERNEL(sum,
kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::UNDEFINED);
}
PT_REGISTER_KERNEL(add,
PD_REGISTER_KERNEL(add,
CPU,
ALL_LAYOUT,
phi::AddKernel,
......@@ -111,7 +111,7 @@ PT_REGISTER_KERNEL(add,
int64_t,
complex64,
complex128) {}
PT_REGISTER_KERNEL(subtract,
PD_REGISTER_KERNEL(subtract,
CPU,
ALL_LAYOUT,
phi::SubtractKernel,
......@@ -122,7 +122,7 @@ PT_REGISTER_KERNEL(subtract,
int64_t,
complex64,
complex128) {}
PT_REGISTER_KERNEL(divide,
PD_REGISTER_KERNEL(divide,
CPU,
ALL_LAYOUT,
phi::DivideKernel,
......@@ -132,7 +132,7 @@ PT_REGISTER_KERNEL(divide,
int64_t,
complex64,
complex128) {}
PT_REGISTER_KERNEL(multiply,
PD_REGISTER_KERNEL(multiply,
CPU,
ALL_LAYOUT,
phi::MultiplyKernel,
......@@ -145,7 +145,7 @@ PT_REGISTER_KERNEL(multiply,
complex128) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_REGISTER_KERNEL(mean,
PD_REGISTER_KERNEL(mean,
GPU,
ALL_LAYOUT,
phi::MeanKernel,
......@@ -155,7 +155,7 @@ PT_REGISTER_KERNEL(mean,
int,
int64_t,
phi::dtype::float16) {}
PT_REGISTER_KERNEL(sum,
PD_REGISTER_KERNEL(sum,
GPU,
ALL_LAYOUT,
phi::SumKernel,
......@@ -170,7 +170,7 @@ PT_REGISTER_KERNEL(sum,
complex128) {
kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::UNDEFINED);
}
PT_REGISTER_KERNEL(add,
PD_REGISTER_KERNEL(add,
GPU,
ALL_LAYOUT,
phi::AddKernel,
......@@ -182,7 +182,7 @@ PT_REGISTER_KERNEL(add,
phi::dtype::float16,
complex64,
complex128) {}
PT_REGISTER_KERNEL(subtract,
PD_REGISTER_KERNEL(subtract,
GPU,
ALL_LAYOUT,
phi::SubtractKernel,
......@@ -194,7 +194,7 @@ PT_REGISTER_KERNEL(subtract,
phi::dtype::float16,
complex64,
complex128) {}
PT_REGISTER_KERNEL(divide,
PD_REGISTER_KERNEL(divide,
GPU,
ALL_LAYOUT,
phi::DivideKernel,
......@@ -205,7 +205,7 @@ PT_REGISTER_KERNEL(divide,
phi::dtype::float16,
complex64,
complex128) {}
PT_REGISTER_KERNEL(multiply,
PD_REGISTER_KERNEL(multiply,
GPU,
ALL_LAYOUT,
phi::MultiplyKernel,
......
......@@ -37,24 +37,24 @@ void ReshapeDoubleGradKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_GENERAL_KERNEL(reshape_grad,
PD_REGISTER_GENERAL_KERNEL(reshape_grad,
CPU,
ALL_LAYOUT,
phi::ReshapeGradKernel<phi::CPUContext>,
ALL_DTYPE) {}
PT_REGISTER_GENERAL_KERNEL(reshape_double_grad,
PD_REGISTER_GENERAL_KERNEL(reshape_double_grad,
CPU,
ALL_LAYOUT,
phi::ReshapeDoubleGradKernel<phi::CPUContext>,
ALL_DTYPE) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_REGISTER_GENERAL_KERNEL(reshape_grad,
PD_REGISTER_GENERAL_KERNEL(reshape_grad,
GPU,
ALL_LAYOUT,
phi::ReshapeGradKernel<phi::GPUContext>,
ALL_DTYPE) {}
PT_REGISTER_GENERAL_KERNEL(reshape_double_grad,
PD_REGISTER_GENERAL_KERNEL(reshape_double_grad,
GPU,
ALL_LAYOUT,
phi::ReshapeDoubleGradKernel<phi::GPUContext>,
......@@ -62,12 +62,12 @@ PT_REGISTER_GENERAL_KERNEL(reshape_double_grad,
#endif
#ifdef PADDLE_WITH_XPU
PT_REGISTER_GENERAL_KERNEL(reshape_grad,
PD_REGISTER_GENERAL_KERNEL(reshape_grad,
XPU,
ALL_LAYOUT,
phi::ReshapeGradKernel<phi::XPUContext>,
ALL_DTYPE) {}
PT_REGISTER_GENERAL_KERNEL(reshape_double_grad,
PD_REGISTER_GENERAL_KERNEL(reshape_double_grad,
XPU,
ALL_LAYOUT,
phi::ReshapeDoubleGradKernel<phi::XPUContext>,
......
......@@ -52,18 +52,18 @@ void ReshapeWithXShape(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_GENERAL_KERNEL(
PD_REGISTER_GENERAL_KERNEL(
reshape, CPU, ALL_LAYOUT, phi::ReshapeKernel<phi::CPUContext>, ALL_DTYPE) {}
PT_REGISTER_GENERAL_KERNEL(reshape_with_xshape,
PD_REGISTER_GENERAL_KERNEL(reshape_with_xshape,
CPU,
ALL_LAYOUT,
phi::ReshapeWithXShape<phi::CPUContext>,
ALL_DTYPE) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_REGISTER_GENERAL_KERNEL(
PD_REGISTER_GENERAL_KERNEL(
reshape, GPU, ALL_LAYOUT, phi::ReshapeKernel<phi::GPUContext>, ALL_DTYPE) {}
PT_REGISTER_GENERAL_KERNEL(reshape_with_xshape,
PD_REGISTER_GENERAL_KERNEL(reshape_with_xshape,
GPU,
ALL_LAYOUT,
phi::ReshapeWithXShape<phi::GPUContext>,
......@@ -71,9 +71,9 @@ PT_REGISTER_GENERAL_KERNEL(reshape_with_xshape,
#endif
#ifdef PADDLE_WITH_XPU
PT_REGISTER_GENERAL_KERNEL(
PD_REGISTER_GENERAL_KERNEL(
reshape, XPU, ALL_LAYOUT, phi::ReshapeKernel<phi::XPUContext>, ALL_DTYPE) {}
PT_REGISTER_GENERAL_KERNEL(reshape_with_xshape,
PD_REGISTER_GENERAL_KERNEL(reshape_with_xshape,
XPU,
ALL_LAYOUT,
phi::ReshapeWithXShape<phi::XPUContext>,
......
......@@ -36,7 +36,7 @@ void FullSR(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(full_sr,
PD_REGISTER_KERNEL(full_sr,
CPU,
ALL_LAYOUT,
phi::FullSR,
......@@ -53,7 +53,7 @@ PT_REGISTER_KERNEL(full_sr,
phi::dtype::complex<double>) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_REGISTER_KERNEL(full_sr,
PD_REGISTER_KERNEL(full_sr,
GPU,
ALL_LAYOUT,
phi::FullSR,
......
......@@ -38,7 +38,7 @@ void ScaleSR(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(scale_sr,
PD_REGISTER_KERNEL(scale_sr,
CPU,
ALL_LAYOUT,
phi::ScaleSR,
......@@ -52,7 +52,7 @@ PT_REGISTER_KERNEL(scale_sr,
int64_t) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_REGISTER_KERNEL(scale_sr,
PD_REGISTER_KERNEL(scale_sr,
GPU,
ALL_LAYOUT,
phi::ScaleSR,
......
......@@ -284,7 +284,7 @@ void SparseCooToDenseKernel(const Context& dev_ctx,
} // namespace sparse
} // namespace phi
PT_REGISTER_KERNEL(dense_to_sparse_coo,
PD_REGISTER_KERNEL(dense_to_sparse_coo,
CPU,
ALL_LAYOUT,
phi::sparse::DenseToSparseCooKernel,
......@@ -297,7 +297,7 @@ PT_REGISTER_KERNEL(dense_to_sparse_coo,
int,
int64_t) {}
PT_REGISTER_KERNEL(sparse_csr_to_coo,
PD_REGISTER_KERNEL(sparse_csr_to_coo,
CPU,
ALL_LAYOUT,
phi::sparse::SparseCsrToCooKernel,
......@@ -310,7 +310,7 @@ PT_REGISTER_KERNEL(sparse_csr_to_coo,
int,
int64_t) {}
PT_REGISTER_KERNEL(sparse_coo_to_csr,
PD_REGISTER_KERNEL(sparse_coo_to_csr,
CPU,
ALL_LAYOUT,
phi::sparse::SparseCooToCsrKernel,
......@@ -323,7 +323,7 @@ PT_REGISTER_KERNEL(sparse_coo_to_csr,
int,
int64_t) {}
PT_REGISTER_KERNEL(dense_to_sparse_csr,
PD_REGISTER_KERNEL(dense_to_sparse_csr,
CPU,
ALL_LAYOUT,
phi::sparse::DenseToSparseCsrKernel,
......@@ -336,7 +336,7 @@ PT_REGISTER_KERNEL(dense_to_sparse_csr,
int,
int64_t) {}
PT_REGISTER_KERNEL(sparse_coo_to_dense,
PD_REGISTER_KERNEL(sparse_coo_to_dense,
CPU,
ALL_LAYOUT,
phi::sparse::SparseCooToDenseKernel,
......@@ -349,7 +349,7 @@ PT_REGISTER_KERNEL(sparse_coo_to_dense,
int,
int64_t) {}
PT_REGISTER_KERNEL(sparse_csr_to_dense,
PD_REGISTER_KERNEL(sparse_csr_to_dense,
CPU,
ALL_LAYOUT,
phi::sparse::SparseCsrToDenseKernel,
......
......@@ -553,7 +553,7 @@ void SparseCooToDenseKernel(const Context& dev_ctx,
} // namespace sparse
} // namespace phi
PT_REGISTER_KERNEL(dense_to_sparse_coo,
PD_REGISTER_KERNEL(dense_to_sparse_coo,
GPU,
ALL_LAYOUT,
phi::sparse::DenseToSparseCooKernel,
......@@ -566,7 +566,7 @@ PT_REGISTER_KERNEL(dense_to_sparse_coo,
int,
int64_t) {}
PT_REGISTER_KERNEL(sparse_csr_to_coo,
PD_REGISTER_KERNEL(sparse_csr_to_coo,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCsrToCooKernel,
......@@ -579,7 +579,7 @@ PT_REGISTER_KERNEL(sparse_csr_to_coo,
int,
int64_t) {}
PT_REGISTER_KERNEL(sparse_coo_to_csr,
PD_REGISTER_KERNEL(sparse_coo_to_csr,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCooToCsrKernel,
......@@ -592,7 +592,7 @@ PT_REGISTER_KERNEL(sparse_coo_to_csr,
int,
int64_t) {}
PT_REGISTER_KERNEL(dense_to_sparse_csr,
PD_REGISTER_KERNEL(dense_to_sparse_csr,
GPU,
ALL_LAYOUT,
phi::sparse::DenseToSparseCsrKernel,
......@@ -605,7 +605,7 @@ PT_REGISTER_KERNEL(dense_to_sparse_csr,
int,
int64_t) {}
PT_REGISTER_KERNEL(sparse_coo_to_dense,
PD_REGISTER_KERNEL(sparse_coo_to_dense,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCooToDenseKernel,
......@@ -618,7 +618,7 @@ PT_REGISTER_KERNEL(sparse_coo_to_dense,
int,
int64_t) {}
PT_REGISTER_KERNEL(sparse_csr_to_dense,
PD_REGISTER_KERNEL(sparse_csr_to_dense,
GPU,
ALL_LAYOUT,
phi::sparse::SparseCsrToDenseKernel,
......
......@@ -69,7 +69,7 @@ void TransferLayoutKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_GENERAL_KERNEL(pten_transfer_layout,
PD_REGISTER_GENERAL_KERNEL(pten_transfer_layout,
CPU,
ALL_LAYOUT,
phi::TransferLayoutKernel<phi::CPUContext>,
......
......@@ -86,7 +86,7 @@ void CastKernel(const Context& dev_ctx,
}
} // namespace phi
PT_REGISTER_KERNEL(cast,
PD_REGISTER_KERNEL(cast,
XPU,
ALL_LAYOUT,
phi::CastKernel,
......
......@@ -69,5 +69,5 @@ void Copy(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_GENERAL_KERNEL(
PD_REGISTER_GENERAL_KERNEL(
copy, XPU, ALL_LAYOUT, phi::Copy<phi::XPUContext>, ALL_DTYPE) {}
......@@ -116,7 +116,7 @@ void FullLikeKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(full,
PD_REGISTER_KERNEL(full,
XPU,
ALL_LAYOUT,
phi::FullKernel,
......@@ -132,7 +132,7 @@ PT_REGISTER_KERNEL(full,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PT_REGISTER_KERNEL(full_like,
PD_REGISTER_KERNEL(full_like,
XPU,
ALL_LAYOUT,
phi::FullLikeKernel,
......
......@@ -56,7 +56,7 @@ void ScaleKernel(const Context& dev_ctx,
} // namespace phi
PT_REGISTER_KERNEL(scale,
PD_REGISTER_KERNEL(scale,
XPU,
ALL_LAYOUT,
phi::ScaleKernel,
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册