diff --git a/cmake/pten.cmake b/cmake/pten.cmake index 6049f6e21e5662a8b45e6f77898f10c2220a70b5..9a3552efce8e12d21f1c79b70ac649c43af2c085 100644 --- a/cmake/pten.cmake +++ b/cmake/pten.cmake @@ -58,26 +58,26 @@ endfunction() function(kernel_declare TARGET_LIST) foreach(kernel_path ${TARGET_LIST}) file(READ ${kernel_path} kernel_impl) - # TODO(chenweihang): rename PT_REGISTER_KERNEL to PT_REGISTER_KERNEL + # TODO(chenweihang): rename PD_REGISTER_KERNEL to PD_REGISTER_KERNEL # NOTE(chenweihang): now we don't recommend to use digit in kernel name - string(REGEX MATCH "(PT_REGISTER_KERNEL|PT_REGISTER_GENERAL_KERNEL)\\([ \t\r\n]*[a-z0-9_]*," first_registry "${kernel_impl}") + string(REGEX MATCH "(PD_REGISTER_KERNEL|PD_REGISTER_GENERAL_KERNEL)\\([ \t\r\n]*[a-z0-9_]*," first_registry "${kernel_impl}") if (NOT first_registry STREQUAL "") # parse the first kernel name - string(REPLACE "PT_REGISTER_KERNEL(" "" kernel_name "${first_registry}") - string(REPLACE "PT_REGISTER_GENERAL_KERNEL(" "" kernel_name "${kernel_name}") + string(REPLACE "PD_REGISTER_KERNEL(" "" kernel_name "${first_registry}") + string(REPLACE "PD_REGISTER_GENERAL_KERNEL(" "" kernel_name "${kernel_name}") string(REPLACE "," "" kernel_name "${kernel_name}") string(REGEX REPLACE "[ \t\r\n]+" "" kernel_name "${kernel_name}") # append kernel declare into declarations.h # TODO(chenweihang): default declare ALL_LAYOUT for each kernel if (${kernel_path} MATCHES "./cpu\/") - file(APPEND ${kernel_declare_file} "PT_DECLARE_KERNEL(${kernel_name}, CPU, ALL_LAYOUT);\n") + file(APPEND ${kernel_declare_file} "PD_DECLARE_KERNEL(${kernel_name}, CPU, ALL_LAYOUT);\n") elseif (${kernel_path} MATCHES "./gpu\/") - file(APPEND ${kernel_declare_file} "PT_DECLARE_KERNEL(${kernel_name}, GPU, ALL_LAYOUT);\n") + file(APPEND ${kernel_declare_file} "PD_DECLARE_KERNEL(${kernel_name}, GPU, ALL_LAYOUT);\n") elseif (${kernel_path} MATCHES "./xpu\/") - file(APPEND ${kernel_declare_file} "PT_DECLARE_KERNEL(${kernel_name}, XPU, ALL_LAYOUT);\n") + file(APPEND ${kernel_declare_file} "PD_DECLARE_KERNEL(${kernel_name}, XPU, ALL_LAYOUT);\n") else () # deal with device independent kernel, now we use CPU temporaary - file(APPEND ${kernel_declare_file} "PT_DECLARE_KERNEL(${kernel_name}, CPU, ALL_LAYOUT);\n") + file(APPEND ${kernel_declare_file} "PD_DECLARE_KERNEL(${kernel_name}, CPU, ALL_LAYOUT);\n") endif() endif() endforeach() @@ -285,9 +285,9 @@ endfunction() function(append_op_util_declare TARGET) file(READ ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET} target_content) - string(REGEX MATCH "(PT_REGISTER_BASE_KERNEL_NAME|PT_REGISTER_ARG_MAPPING_FN)\\([ \t\r\n]*[a-z0-9_]*" util_registrar "${target_content}") - string(REPLACE "PT_REGISTER_ARG_MAPPING_FN" "PT_DECLARE_ARG_MAPPING_FN" util_declare "${util_registrar}") - string(REPLACE "PT_REGISTER_BASE_KERNEL_NAME" "PT_DECLARE_BASE_KERNEL_NAME" util_declare "${util_declare}") + string(REGEX MATCH "(PD_REGISTER_BASE_KERNEL_NAME|PD_REGISTER_ARG_MAPPING_FN)\\([ \t\r\n]*[a-z0-9_]*" util_registrar "${target_content}") + string(REPLACE "PD_REGISTER_ARG_MAPPING_FN" "PD_DECLARE_ARG_MAPPING_FN" util_declare "${util_registrar}") + string(REPLACE "PD_REGISTER_BASE_KERNEL_NAME" "PD_DECLARE_BASE_KERNEL_NAME" util_declare "${util_declare}") string(APPEND util_declare ");\n") file(APPEND ${op_utils_header} "${util_declare}") endfunction() diff --git a/paddle/fluid/framework/infershape_utils_test.cc b/paddle/fluid/framework/infershape_utils_test.cc index 592e787109d18c45eb872fb720954ed29b073ea4..53dcc19fcbae88ab5ccfcc498037327946029927 100644 --- a/paddle/fluid/framework/infershape_utils_test.cc +++ b/paddle/fluid/framework/infershape_utils_test.cc @@ -118,7 +118,7 @@ REGISTER_OPERATOR(infer_shape_utils_test, paddle::framework::InferShapeUtilsTestOpMaker, InferShapeUtilsTestInferShapeFunctor); -PT_REGISTER_KERNEL(infer_shape_utils_test, CPU, ALL_LAYOUT, +PD_REGISTER_KERNEL(infer_shape_utils_test, CPU, ALL_LAYOUT, paddle::framework::InferShapeUtilsTestKernel, int) {} TEST(InferShapeUtilsTest, ALL) { diff --git a/paddle/phi/api/ext/op_kernel_info.h b/paddle/phi/api/ext/op_kernel_info.h index b52b0abe9e745d7a559a4f4752bb9a77e4137245..b3adbe9d18b966f6e9ec7f7e26408b8d722ed5bc 100644 --- a/paddle/phi/api/ext/op_kernel_info.h +++ b/paddle/phi/api/ext/op_kernel_info.h @@ -630,16 +630,16 @@ class PADDLE_API OpKernelInfoBuilder { }; /////////////////////// Custom kernel register API ///////////////////////// // For inference: compile directly with framework -// Call after PD_REGISTER_KERNEL(...) +// Call after PD_REGISTER_BUILTIN_KERNEL(...) void RegisterAllCustomKernel(); //////////////// Custom kernel register macro ///////////////////// // Refer to paddle/phi/core/kernel_registry.h, we can not use -// PT_REGISTER_KERNEL directly, common macros and functions are +// PD_REGISTER_KERNEL directly, common macros and functions are // not ready for custom kernel now. // Difference: custom_kernel stores all kernels' info into global // g_custom_kernel_info_map before loading and registering into -// pten kernel management. Only providing PD_REGISTER_KERNEL which +// pten kernel management. Only providing PD_REGISTER_BUILTIN_KERNEL which // supports 2 template arguments. #define PD_BACKEND(arg__) phi::Backend::arg__ @@ -666,11 +666,12 @@ void RegisterAllCustomKernel(); #define PD_ID __LINE__ #endif -#define PD_REGISTER_KERNEL(kernel_name, backend, layout, func, cpp_dtype, ...) \ - STATIC_ASSERT_GLOBAL_NAMESPACE( \ - _reg_custom_kernel_ns_check_##kernel_name##_##backend##_##layout, \ - "PD_REGISTER_KERNEL must be called in global namespace."); \ - _PD_REGISTER_2TA_KERNEL( \ +#define PD_REGISTER_BUILTIN_KERNEL( \ + kernel_name, backend, layout, func, cpp_dtype, ...) \ + STATIC_ASSERT_GLOBAL_NAMESPACE( \ + _reg_custom_kernel_ns_check_##kernel_name##_##backend##_##layout, \ + "PD_REGISTER_BUILTIN_KERNEL must be called in global namespace."); \ + _PD_REGISTER_2TA_KERNEL( \ kernel_name, backend, layout, func, cpp_dtype, ##__VA_ARGS__) // WIN32 is not supported diff --git a/paddle/phi/api/lib/api_declare.h b/paddle/phi/api/lib/api_declare.h index 650161a933a8cb9ba02d1385eef3c7bd0dc09a08..26408290bd325e60952f8f88d413b90451544044 100644 --- a/paddle/phi/api/lib/api_declare.h +++ b/paddle/phi/api/lib/api_declare.h @@ -17,6 +17,6 @@ limitations under the License. */ // api symbols declare, remove in the future #include "paddle/phi/api/lib/api_registry.h" -PT_DECLARE_API(Math); -PT_DECLARE_API(Utils); -PT_DECLARE_API(SparseApi); +PD_DECLARE_API(Math); +PD_DECLARE_API(Utils); +PD_DECLARE_API(SparseApi); diff --git a/paddle/phi/api/lib/api_registry.h b/paddle/phi/api/lib/api_registry.h index 2812bede8e09ba99577efd69d928d89e8431cf25..3783620ea449b46ab17ae1ac7d9f7e80ef08cae9 100644 --- a/paddle/phi/api/lib/api_registry.h +++ b/paddle/phi/api/lib/api_registry.h @@ -36,10 +36,10 @@ namespace experimental { */ // use to declare symbol -#define PT_REGISTER_API(name) \ +#define PD_REGISTER_API(name) \ PADDLE_API int RegisterSymbolsFor##name() { return 0; } -#define PT_DECLARE_API(name) \ +#define PD_DECLARE_API(name) \ extern PADDLE_API int RegisterSymbolsFor##name(); \ UNUSED static int use_pten_api_##name = RegisterSymbolsFor##name() diff --git a/paddle/phi/api/lib/manual_api.cc b/paddle/phi/api/lib/manual_api.cc index e0da15eac39b79f3b8ffde3f4c068d02ce28ae6c..7bd4711cc3f308173ce6fd12225faa46f516cb91 100644 --- a/paddle/phi/api/lib/manual_api.cc +++ b/paddle/phi/api/lib/manual_api.cc @@ -27,15 +27,15 @@ limitations under the License. */ #include "paddle/phi/core/meta_tensor.h" #include "paddle/phi/infermeta/unary.h" -PT_DECLARE_KERNEL(copy, CPU, ALL_LAYOUT); -PT_DECLARE_KERNEL(split, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(copy, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(split, CPU, ALL_LAYOUT); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PT_DECLARE_KERNEL(copy, GPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(copy, GPU, ALL_LAYOUT); #endif #ifdef PADDLE_WITH_XPU -PT_DECLARE_KERNEL(copy, XPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(copy, XPU, ALL_LAYOUT); #endif namespace paddle { @@ -147,4 +147,4 @@ PADDLE_API std::vector split(const Tensor& x, } // namespace experimental } // namespace paddle -PT_REGISTER_API(Utils); +PD_REGISTER_API(Utils); diff --git a/paddle/phi/api/lib/op_kernel_info.cc b/paddle/phi/api/lib/op_kernel_info.cc index 78b4955f321da0a3b37cc766287806acd37f37ac..c2aef8288dae1ae87a952c19f91495bb6dd40b0c 100644 --- a/paddle/phi/api/lib/op_kernel_info.cc +++ b/paddle/phi/api/lib/op_kernel_info.cc @@ -86,7 +86,7 @@ OpKernelInfoBuilder& OpKernelInfoBuilder::ArgsDef(CustomKernelArgsDefFn func) { /////////////////////// Op register API ///////////////////////// // For inference: compile directly with framework -// Call after PD_REGISTER_KERNEL(...) +// Call after PD_REGISTER_BUILTIN_KERNEL(...) void RegisterAllCustomKernel() { auto& op_kernel_info_map = OpKernelInfoMap::Instance(); framework::RegisterKernelWithMetaInfoMap(op_kernel_info_map); diff --git a/paddle/phi/api/lib/sparse_api.cc b/paddle/phi/api/lib/sparse_api.cc index 5a22d617492d2121de3acdb2e10bcaaa60f78a24..cc90c2b819daefd725a71f2787d75e42e37899bd 100644 --- a/paddle/phi/api/lib/sparse_api.cc +++ b/paddle/phi/api/lib/sparse_api.cc @@ -22,20 +22,20 @@ limitations under the License. */ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/infermeta/unary.h" -PT_DECLARE_KERNEL(dense_to_sparse_coo, CPU, ALL_LAYOUT); -PT_DECLARE_KERNEL(sparse_csr_to_coo, CPU, ALL_LAYOUT); -PT_DECLARE_KERNEL(dense_to_sparse_csr, CPU, ALL_LAYOUT); -PT_DECLARE_KERNEL(sparse_coo_to_csr, CPU, ALL_LAYOUT); -PT_DECLARE_KERNEL(sparse_coo_to_dense, CPU, ALL_LAYOUT); -PT_DECLARE_KERNEL(sparse_csr_to_dense, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(dense_to_sparse_coo, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(sparse_csr_to_coo, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(dense_to_sparse_csr, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(sparse_coo_to_csr, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(sparse_coo_to_dense, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(sparse_csr_to_dense, CPU, ALL_LAYOUT); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PT_DECLARE_KERNEL(dense_to_sparse_coo, GPU, ALL_LAYOUT); -PT_DECLARE_KERNEL(sparse_csr_to_coo, GPU, ALL_LAYOUT); -PT_DECLARE_KERNEL(dense_to_sparse_csr, GPU, ALL_LAYOUT); -PT_DECLARE_KERNEL(sparse_coo_to_csr, GPU, ALL_LAYOUT); -PT_DECLARE_KERNEL(sparse_coo_to_dense, GPU, ALL_LAYOUT); -PT_DECLARE_KERNEL(sparse_csr_to_dense, GPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(dense_to_sparse_coo, GPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(sparse_csr_to_coo, GPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(dense_to_sparse_csr, GPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(sparse_coo_to_csr, GPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(sparse_coo_to_dense, GPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(sparse_csr_to_dense, GPU, ALL_LAYOUT); #endif namespace paddle { @@ -228,4 +228,4 @@ PADDLE_API Tensor to_dense(const Tensor& x, Backend backend) { } // namespace experimental } // namespace paddle -PT_REGISTER_API(SparseApi); +PD_REGISTER_API(SparseApi); diff --git a/paddle/phi/common/backend.h b/paddle/phi/common/backend.h index 62692fb9475dac6dbd7df6f458dec62facf4d60d..9a2ec093119fdbebfd2ea0eba0952b2236ab12e6 100644 --- a/paddle/phi/common/backend.h +++ b/paddle/phi/common/backend.h @@ -71,17 +71,17 @@ enum class Backend : uint8_t { * Of course, we have also considered solving this problem through different * named macros, for example, if we define * - * PT_REGISTER_KERNEL_FOR_ALL_BACKEND + * PD_REGISTER_KERNEL_FOR_ALL_BACKEND * * Based on this design pattern, the dtype and layout also have the same * requirements, this cause we need to define a series of macros * - * PT_REGISTER_KERNEL_FOR_ALL_DTYPE - * PT_REGISTER_KERNEL_FOR_ALL_LAYOUT - * PT_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_LAYOUT - * PT_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_DTYPE - * PT_REGISTER_KERNEL_FOR_ALL_LAYOUT_AND_DTYPE - * PT_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_LAYOUT_AND_DTYPE + * PD_REGISTER_KERNEL_FOR_ALL_DTYPE + * PD_REGISTER_KERNEL_FOR_ALL_LAYOUT + * PD_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_LAYOUT + * PD_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_DTYPE + * PD_REGISTER_KERNEL_FOR_ALL_LAYOUT_AND_DTYPE + * PD_REGISTER_KERNEL_FOR_ALL_BACKEND_AND_LAYOUT_AND_DTYPE * * It makes the system of registering macros more complicated, we think * this is not a simple design, so we still adopt the design of providing diff --git a/paddle/phi/core/compat/op_utils.h b/paddle/phi/core/compat/op_utils.h index 5c0c440d8942c83d10bfe092b3fc1782944f1719..ec810d4e16340862faaabe0799e19245551b44c3 100644 --- a/paddle/phi/core/compat/op_utils.h +++ b/paddle/phi/core/compat/op_utils.h @@ -164,34 +164,34 @@ struct ArgumentMappingFnRegistrar { } }; -#define PT_REGISTER_BASE_KERNEL_NAME(op_type, base_kernel_name) \ +#define PD_REGISTER_BASE_KERNEL_NAME(op_type, base_kernel_name) \ PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \ - pt_register_base_kernel_name_ns_check_##op_type, \ - "PT_REGISTER_BASE_KERNEL_NAME must be called in global namespace."); \ + PD_REGISTER_base_kernel_name_ns_check_##op_type, \ + "PD_REGISTER_BASE_KERNEL_NAME must be called in global namespace."); \ static const ::phi::BaseKernelNameRegistrar \ __registrar_base_kernel_name_for_##op_type(#op_type, #base_kernel_name); \ int TouchBaseKernelNameSymbol_##op_type() { return 0; } -#define PT_DECLARE_BASE_KERNEL_NAME(op_type) \ +#define PD_DECLARE_BASE_KERNEL_NAME(op_type) \ PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \ - pt_declare_ai_name_ns_check_##op_type, \ - "PT_DECLARE_BASE_KERNEL_NAME must be called in global namespace."); \ + PD_DECLARE_ai_name_ns_check_##op_type, \ + "PD_DECLARE_BASE_KERNEL_NAME must be called in global namespace."); \ extern int TouchBaseKernelNameSymbol_##op_type(); \ UNUSED static int __declare_base_kernel_name_symbol_for_##op_type = \ TouchBaseKernelNameSymbol_##op_type() -#define PT_REGISTER_ARG_MAPPING_FN(op_type, arg_mapping_fn) \ +#define PD_REGISTER_ARG_MAPPING_FN(op_type, arg_mapping_fn) \ PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \ - pt_register_arg_map_fn_ns_check_##op_type, \ - "PT_REGISTER_ARG_MAPPING_FN must be called in global namespace."); \ + PD_REGISTER_arg_map_fn_ns_check_##op_type, \ + "PD_REGISTER_ARG_MAPPING_FN must be called in global namespace."); \ static const ::phi::ArgumentMappingFnRegistrar \ __registrar_arg_map_fn_for_##op_type(#op_type, arg_mapping_fn); \ int TouchArgumentMappingFnSymbol_##op_type() { return 0; } -#define PT_DECLARE_ARG_MAPPING_FN(op_type) \ +#define PD_DECLARE_ARG_MAPPING_FN(op_type) \ PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \ - pt_declare_arg_map_fn_ns_check_##op_type, \ - "PT_DECLARE_ARG_MAPPING_FN must be called in global namespace."); \ + PD_DECLARE_arg_map_fn_ns_check_##op_type, \ + "PD_DECLARE_ARG_MAPPING_FN must be called in global namespace."); \ extern int TouchArgumentMappingFnSymbol_##op_type(); \ UNUSED static int __declare_arg_map_fn_symbol_for_##op_type = \ TouchArgumentMappingFnSymbol_##op_type() diff --git a/paddle/phi/core/infermeta_utils.h b/paddle/phi/core/infermeta_utils.h index 2b98ab22bcdbd43a1863c2d59d93e31c510368b8..1b8cfea130d4900b331f24526332b80903f55e19 100644 --- a/paddle/phi/core/infermeta_utils.h +++ b/paddle/phi/core/infermeta_utils.h @@ -282,10 +282,10 @@ struct InferMetaFnRegistrar { } }; -#define PT_REGISTER_INFER_META_FN(kernel_name_prefix, variadic_infer_meta_fn) \ +#define PD_REGISTER_INFER_META_FN(kernel_name_prefix, variadic_infer_meta_fn) \ PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \ - pt_register_infer_meta_fn_ns_check_##kernel_name_prefix, \ - "PT_REGISTER_INFER_META_FN must be called in global namespace."); \ + PD_REGISTER_infer_meta_fn_ns_check_##kernel_name_prefix, \ + "PD_REGISTER_INFER_META_FN must be called in global namespace."); \ static const ::phi::InferMetaFnRegistrar \ __registrar_arg_map_fn_for_##kernel_name_prefix( \ #kernel_name_prefix, PT_INFER_META(variadic_infer_meta_fn)) diff --git a/paddle/phi/core/kernel_registry.h b/paddle/phi/core/kernel_registry.h index a93c9a282606894f94724252f93116b6fb232d70..4603f4123acd020885387a0bcd0a5d5a4de15350 100644 --- a/paddle/phi/core/kernel_registry.h +++ b/paddle/phi/core/kernel_registry.h @@ -234,7 +234,7 @@ struct KernelRegistrar { #define _PT_ARG_N(args) _PT_ARG_N_EXPAND args #define _PT_RESQ_N() 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 -/** PT_REGISTER_KERNEL +/** PD_REGISTER_KERNEL * * The most frequently used kernel registration macro, used for kernel * registration with only data type as template parameter, and the function @@ -243,8 +243,8 @@ struct KernelRegistrar { * * Note: `2TA` means `2 template argument` */ -#define PT_REGISTER_KERNEL(kernel_name, backend, layout, meta_kernel_fn, ...) \ - _PT_REGISTER_KERNEL(::phi::RegType::BUILTIN, \ +#define PD_REGISTER_KERNEL(kernel_name, backend, layout, meta_kernel_fn, ...) \ + _PD_REGISTER_KERNEL(::phi::RegType::BUILTIN, \ kernel_name, \ backend, \ ::phi::backend##Context, \ @@ -252,12 +252,12 @@ struct KernelRegistrar { meta_kernel_fn, \ __VA_ARGS__) -#define _PT_REGISTER_KERNEL( \ +#define _PD_REGISTER_KERNEL( \ reg_type, kernel_name, backend, context, layout, meta_kernel_fn, ...) \ PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \ - pt_register_tp_kernel_ns_check_##kernel_name##_##backend##_##layout, \ - "PT_REGISTER_KERNEL must be called in global namespace."); \ - PT_EXPAND(_PT_REGISTER_2TA_KERNEL(reg_type, \ + PD_REGISTER_tp_kernel_ns_check_##kernel_name##_##backend##_##layout, \ + "PD_REGISTER_KERNEL must be called in global namespace."); \ + PT_EXPAND(_PD_REGISTER_2TA_KERNEL(reg_type, \ kernel_name, \ backend, \ context, \ @@ -266,7 +266,7 @@ struct KernelRegistrar { __VA_ARGS__)) #ifndef _WIN32 -#define _PT_REGISTER_2TA_KERNEL( \ +#define _PD_REGISTER_2TA_KERNEL( \ reg_type, kernel_name, backend, context, layout, meta_kernel_fn, ...) \ PT_KERNEL_INSTANTIATION(meta_kernel_fn, backend, context, __VA_ARGS__); \ static void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \ @@ -295,7 +295,7 @@ struct KernelRegistrar { * * And msvc can work without template instantiation */ -#define _PT_REGISTER_2TA_KERNEL( \ +#define _PD_REGISTER_2TA_KERNEL( \ reg_type, kernel_name, backend, context, layout, meta_kernel_fn, ...) \ static void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \ const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel); \ @@ -909,27 +909,27 @@ struct KernelRegistrar { args_def_fn, \ meta_kernel_fn, \ __VA_ARGS__)) -/** PT_REGISTER_GENERAL_KERNEL +/** PD_REGISTER_GENERAL_KERNEL * * Basic Kernel register marco, used to register a instantiated kernel function * with one template argument. */ -#define PT_REGISTER_GENERAL_KERNEL( \ +#define PD_REGISTER_GENERAL_KERNEL( \ kernel_name, backend, layout, kernel_fn, dtype) \ - _PT_REGISTER_GENERAL_KERNEL( \ + _PD_REGISTER_GENERAL_KERNEL( \ ::phi::RegType::BUILTIN, kernel_name, backend, layout, kernel_fn, dtype) -#define _PT_REGISTER_GENERAL_KERNEL( \ +#define _PD_REGISTER_GENERAL_KERNEL( \ reg_type, kernel_name, backend, layout, kernel_fn, dtype) \ PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \ - pt_register_no_t_kernel_ns_check_##kernel_name##_##backend##_##layout, \ - "PT_REGISTER_NO_TEMPLATE_KERNEL must be called in global namespace."); \ - __PT_REGISTER_GENERAL_KERNEL( \ + PD_REGISTER_no_t_kernel_ns_check_##kernel_name##_##backend##_##layout, \ + "PD_REGISTER_NO_TEMPLATE_KERNEL must be called in global namespace."); \ + __PD_REGISTER_GENERAL_KERNEL( \ reg_type, kernel_name, backend, layout, kernel_fn, dtype) #ifndef _WIN32 -#define __PT_REGISTER_GENERAL_KERNEL( \ +#define __PD_REGISTER_GENERAL_KERNEL( \ reg_type, kernel_name, backend, layout, kernel_fn, dtype) \ template decltype(kernel_fn) kernel_fn; \ static void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \ @@ -950,7 +950,7 @@ struct KernelRegistrar { void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \ const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel) #else -#define __PT_REGISTER_GENERAL_KERNEL( \ +#define __PD_REGISTER_GENERAL_KERNEL( \ reg_type, kernel_name, backend, layout, kernel_fn, dtype) \ static void __PT_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \ const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel); \ @@ -971,42 +971,43 @@ struct KernelRegistrar { const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel) #endif -/** PT_DECLARE_KERNEL +/** PD_DECLARE_KERNEL * * Used to export the symbols of the file where the kernel is located, * to avoid being removed by linker */ -#define PT_DECLARE_KERNEL(kernel_name, backend, layout) \ +#define PD_DECLARE_KERNEL(kernel_name, backend, layout) \ PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \ - pt_declare_tp_kernel_ns_check_##kernel_name##_##backend##_##layout, \ - "PT_DECLARE_KERNEL must be called in global namespace."); \ + PD_DECLARE_tp_kernel_ns_check_##kernel_name##_##backend##_##layout, \ + "PD_DECLARE_KERNEL must be called in global namespace."); \ extern int TouchKernelSymbolFor_##kernel_name##_##backend##_##layout(); \ UNUSED static int \ __declare_kernel_symbol_for_##kernel_name##_##backend##_##layout = \ TouchKernelSymbolFor_##kernel_name##_##backend##_##layout() -/** PD_REGISTER_KERNEL +/** PD_REGISTER_BUILTIN_KERNEL * * Used to register kernels for built-in backends. * Support CPU GPU XPU. */ -#define PD_REGISTER_KERNEL(kernel_name, backend, layout, meta_kernel_fn, ...) \ - _PT_REGISTER_KERNEL(::phi::RegType::PLUGIN, \ - kernel_name, \ - backend, \ - ::phi::backend##Context, \ - layout, \ - meta_kernel_fn, \ +#define PD_REGISTER_BUILTIN_KERNEL( \ + kernel_name, backend, layout, meta_kernel_fn, ...) \ + _PD_REGISTER_KERNEL(::phi::RegType::PLUGIN, \ + kernel_name, \ + backend, \ + ::phi::backend##Context, \ + layout, \ + meta_kernel_fn, \ __VA_ARGS__) -/** PD_REGISTER_CUSTOM_KERNEL +/** PD_REGISTER_PLUGIN_KERNEL * * Used to register kernels for plug-in backends. * Support user-defined backend such as 'Ascend910'. */ -#define PD_REGISTER_CUSTOM_KERNEL( \ +#define PD_REGISTER_PLUGIN_KERNEL( \ kernel_name, backend, layout, meta_kernel_fn, ...) \ - _PT_REGISTER_KERNEL(::phi::RegType::PLUGIN, \ + _PD_REGISTER_KERNEL(::phi::RegType::PLUGIN, \ kernel_name, \ backend, \ ::phi::CustomContext, \ diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 4b13545e038f0970c5ed60ca3c4fefaeb6edba58..66a91e0ca53e82488aa8097bd4707293d8c02425 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -539,5 +539,5 @@ void TraceInferMeta( } // namespace phi -PT_REGISTER_INFER_META_FN(copy_to, phi::CopyToInferMeta); -PT_REGISTER_INFER_META_FN(split, phi::SplitInferMeta); +PD_REGISTER_INFER_META_FN(copy_to, phi::CopyToInferMeta); +PD_REGISTER_INFER_META_FN(split, phi::SplitInferMeta); diff --git a/paddle/phi/kernels/cpu/abs_grad_kernel.cc b/paddle/phi/kernels/cpu/abs_grad_kernel.cc index 3c90a348d86a4ccdc1f6a5c1cd53815e00e1fa79..ca42a5eb2976f62708544e3d3bdd31f63d2a004f 100644 --- a/paddle/phi/kernels/cpu/abs_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/abs_grad_kernel.cc @@ -19,7 +19,7 @@ using phi::dtype::complex; -PT_REGISTER_KERNEL(abs_grad, +PD_REGISTER_KERNEL(abs_grad, CPU, ALL_LAYOUT, phi::AbsGradKernel, @@ -29,7 +29,7 @@ PT_REGISTER_KERNEL(abs_grad, int64_t, complex, complex) {} -PT_REGISTER_KERNEL(abs_double_grad, +PD_REGISTER_KERNEL(abs_double_grad, CPU, ALL_LAYOUT, phi::AbsDoubleGradKernel, diff --git a/paddle/phi/kernels/cpu/abs_kernel.cc b/paddle/phi/kernels/cpu/abs_kernel.cc index 97bd89832870cc1d2a9031c266441bfa4c732ef2..71d818c45e6f3f28697d3496cc9ae8a0d209ce6e 100644 --- a/paddle/phi/kernels/cpu/abs_kernel.cc +++ b/paddle/phi/kernels/cpu/abs_kernel.cc @@ -36,7 +36,7 @@ void AbsKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) { } // namespace phi -PT_REGISTER_KERNEL(abs, +PD_REGISTER_KERNEL(abs, CPU, ALL_LAYOUT, phi::AbsKernel, diff --git a/paddle/phi/kernels/cpu/bernoulli_kernel.cc b/paddle/phi/kernels/cpu/bernoulli_kernel.cc index 4ba965a4e5f1d2beb6a114b64ca5fa211804bbcb..09c07d9ec9dea028bd3b1921056b78bc97c07ec2 100644 --- a/paddle/phi/kernels/cpu/bernoulli_kernel.cc +++ b/paddle/phi/kernels/cpu/bernoulli_kernel.cc @@ -51,5 +51,5 @@ void BernoulliKernel(const Context& ctx, } // namespace phi -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( bernoulli, CPU, ALL_LAYOUT, phi::BernoulliKernel, float, double) {} diff --git a/paddle/phi/kernels/cpu/cast_kernel.cc b/paddle/phi/kernels/cpu/cast_kernel.cc index 4e95a37270dd43a4f3f45eb3a26b1c0500e0aaf2..c2c207bfaf25e5bea9faed36c85a5755884e5669 100644 --- a/paddle/phi/kernels/cpu/cast_kernel.cc +++ b/paddle/phi/kernels/cpu/cast_kernel.cc @@ -58,7 +58,7 @@ void CastKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(cast, +PD_REGISTER_KERNEL(cast, CPU, ALL_LAYOUT, phi::CastKernel, diff --git a/paddle/phi/kernels/cpu/complex_kernel.cc b/paddle/phi/kernels/cpu/complex_kernel.cc index 3a886c3378524c62c53aae9951de4db17aad9acc..ae09f2a5effe16c39d040cc2edd9400cb8ac96a7 100644 --- a/paddle/phi/kernels/cpu/complex_kernel.cc +++ b/paddle/phi/kernels/cpu/complex_kernel.cc @@ -21,7 +21,7 @@ // See Note [ Why still include the fluid headers? ] #include "paddle/phi/common/complex.h" -PT_REGISTER_KERNEL(conj, +PD_REGISTER_KERNEL(conj, CPU, ALL_LAYOUT, phi::ConjKernel, diff --git a/paddle/phi/kernels/cpu/concat_kernel.cc b/paddle/phi/kernels/cpu/concat_kernel.cc index 7f4cce379e04d4744f2544788feec28ba0a915e2..0cae2599f8d13fe807baa71be2692c85201fc5a8 100644 --- a/paddle/phi/kernels/cpu/concat_kernel.cc +++ b/paddle/phi/kernels/cpu/concat_kernel.cc @@ -110,7 +110,7 @@ void ConcatKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(concat, +PD_REGISTER_KERNEL(concat, CPU, ALL_LAYOUT, phi::ConcatKernel, diff --git a/paddle/phi/kernels/cpu/copy_kernel.cc b/paddle/phi/kernels/cpu/copy_kernel.cc index 8a79a5f6b1941e1fcd24d5a1f05d1094628ca28d..7dcd75d39e4df5b7bc634c4e16f7843bf5044c94 100644 --- a/paddle/phi/kernels/cpu/copy_kernel.cc +++ b/paddle/phi/kernels/cpu/copy_kernel.cc @@ -56,5 +56,5 @@ void Copy(const Context& dev_ctx, } // namespace phi -PT_REGISTER_GENERAL_KERNEL( +PD_REGISTER_GENERAL_KERNEL( copy, CPU, ALL_LAYOUT, phi::Copy, ALL_DTYPE) {} diff --git a/paddle/phi/kernels/cpu/diagonal_grad_kernel.cc b/paddle/phi/kernels/cpu/diagonal_grad_kernel.cc index 351b2335386a8b60c725c43d80bff8fc5872eb16..c3c290b4fe91ec1ecee6f0026ed5af39288e2618 100644 --- a/paddle/phi/kernels/cpu/diagonal_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/diagonal_grad_kernel.cc @@ -82,7 +82,7 @@ void DiagonalGradKernel(const Context& dev_ctx, } } } // namespace phi -PT_REGISTER_KERNEL(diagonal_grad, +PD_REGISTER_KERNEL(diagonal_grad, CPU, ALL_LAYOUT, phi::DiagonalGradKernel, diff --git a/paddle/phi/kernels/cpu/diagonal_kernel.cc b/paddle/phi/kernels/cpu/diagonal_kernel.cc index 79f09008f3e2e48cce5ec4f431b6541450c3d710..df17b458e1166b49815d405a4e7d97c5384ab4f0 100644 --- a/paddle/phi/kernels/cpu/diagonal_kernel.cc +++ b/paddle/phi/kernels/cpu/diagonal_kernel.cc @@ -79,7 +79,7 @@ void DiagonalKernel(const Context& dev_ctx, } } } // namespace phi -PT_REGISTER_KERNEL(diagonal, +PD_REGISTER_KERNEL(diagonal, CPU, ALL_LAYOUT, phi::DiagonalKernel, diff --git a/paddle/phi/kernels/cpu/digamma_grad_kernel.cc b/paddle/phi/kernels/cpu/digamma_grad_kernel.cc index 5cb86eef498bd325c8beda7c08f5e76b57f417b0..da1b5ae556609c05a91623cf9cac408e190868b9 100644 --- a/paddle/phi/kernels/cpu/digamma_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/digamma_grad_kernel.cc @@ -19,5 +19,5 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/digamma_grad_kernel_impl.h" -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( digamma_grad, CPU, ALL_LAYOUT, phi::DigammaGradKernel, float, double) {} diff --git a/paddle/phi/kernels/cpu/digamma_kernel.cc b/paddle/phi/kernels/cpu/digamma_kernel.cc index 0013d8ee7740b8a396ebf127698b6be0b53067d0..ee120a29b6061efcadfb88ecce8ba3235d865ca1 100644 --- a/paddle/phi/kernels/cpu/digamma_kernel.cc +++ b/paddle/phi/kernels/cpu/digamma_kernel.cc @@ -19,5 +19,5 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/digamma_kernel_impl.h" -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( digamma, CPU, ALL_LAYOUT, phi::DigammaKernel, float, double) {} diff --git a/paddle/phi/kernels/cpu/dot_grad_kernel.cc b/paddle/phi/kernels/cpu/dot_grad_kernel.cc index 729bc9aa3a3acad547269613cbfb66e75ff20ead..a2abdb7c00900ecd103562430d1f965cbaf92d4e 100644 --- a/paddle/phi/kernels/cpu/dot_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/dot_grad_kernel.cc @@ -20,7 +20,7 @@ #include "paddle/phi/common/complex.h" -PT_REGISTER_KERNEL(dot_grad, +PD_REGISTER_KERNEL(dot_grad, CPU, ALL_LAYOUT, phi::DotGradKernel, diff --git a/paddle/phi/kernels/cpu/dot_kernel.cc b/paddle/phi/kernels/cpu/dot_kernel.cc index f4f5d1ffeb544dfa006444ce746e076c1d6258ae..3518501a6b63d160d32ecefc57236d4e2aa7b1fa 100644 --- a/paddle/phi/kernels/cpu/dot_kernel.cc +++ b/paddle/phi/kernels/cpu/dot_kernel.cc @@ -49,7 +49,7 @@ void DotKernel(const Context& dev_ctx, using complex64 = ::phi::dtype::complex; using complex128 = ::phi::dtype::complex; -PT_REGISTER_KERNEL(dot, +PD_REGISTER_KERNEL(dot, CPU, ALL_LAYOUT, phi::DotKernel, diff --git a/paddle/phi/kernels/cpu/elementwise_grad_kernel.cc b/paddle/phi/kernels/cpu/elementwise_grad_kernel.cc index 2d1b2a3bd7c3fa4d40d6544a704ef984d7fac1fc..0b29091367c83acee19e703f450d16602f322f3c 100644 --- a/paddle/phi/kernels/cpu/elementwise_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/elementwise_grad_kernel.cc @@ -125,7 +125,7 @@ void SubtractDoubleGradKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(add_grad, +PD_REGISTER_KERNEL(add_grad, CPU, ALL_LAYOUT, phi::AddGradKernel, @@ -137,7 +137,7 @@ PT_REGISTER_KERNEL(add_grad, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(add_double_grad, +PD_REGISTER_KERNEL(add_double_grad, CPU, ALL_LAYOUT, phi::AddDoubleGradKernel, @@ -149,7 +149,7 @@ PT_REGISTER_KERNEL(add_double_grad, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(add_triple_grad, +PD_REGISTER_KERNEL(add_triple_grad, CPU, ALL_LAYOUT, phi::AddTripleGradKernel, @@ -161,7 +161,7 @@ PT_REGISTER_KERNEL(add_triple_grad, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(subtract_grad, +PD_REGISTER_KERNEL(subtract_grad, CPU, ALL_LAYOUT, phi::SubtractGradKernel, @@ -173,7 +173,7 @@ PT_REGISTER_KERNEL(subtract_grad, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(subtract_double_grad, +PD_REGISTER_KERNEL(subtract_double_grad, CPU, ALL_LAYOUT, phi::SubtractDoubleGradKernel, diff --git a/paddle/phi/kernels/cpu/expand_grad_kernel.cc b/paddle/phi/kernels/cpu/expand_grad_kernel.cc index 427b6441b2d24c8ea1862cb7ae0168a3009c54dc..4799a6aa7afdf85a759d5940edea05e885b965e3 100644 --- a/paddle/phi/kernels/cpu/expand_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/expand_grad_kernel.cc @@ -19,7 +19,7 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/expand_grad_kernel_impl.h" -PT_REGISTER_KERNEL(expand_grad, +PD_REGISTER_KERNEL(expand_grad, CPU, ALL_LAYOUT, phi::ExpandGradKernel, diff --git a/paddle/phi/kernels/cpu/expand_kernel.cc b/paddle/phi/kernels/cpu/expand_kernel.cc index cce367c8eb832469a223c4c54d462b6f7c9b4237..077048976729fddefe8162f8eebb4961843dd2e0 100644 --- a/paddle/phi/kernels/cpu/expand_kernel.cc +++ b/paddle/phi/kernels/cpu/expand_kernel.cc @@ -19,7 +19,7 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/expand_kernel_impl.h" -PT_REGISTER_KERNEL(expand, +PD_REGISTER_KERNEL(expand, CPU, ALL_LAYOUT, phi::ExpandKernel, diff --git a/paddle/phi/kernels/cpu/full_kernel.cc b/paddle/phi/kernels/cpu/full_kernel.cc index b55eb109f7de32ced5c8a316edd6aa2811b7e77d..84d7f56d3361c6e5d4382e8ea45e9b31a8eb60bd 100644 --- a/paddle/phi/kernels/cpu/full_kernel.cc +++ b/paddle/phi/kernels/cpu/full_kernel.cc @@ -73,7 +73,7 @@ void FullLikeKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(full, +PD_REGISTER_KERNEL(full, CPU, ALL_LAYOUT, phi::FullKernel, @@ -89,7 +89,7 @@ PT_REGISTER_KERNEL(full, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(full_like, +PD_REGISTER_KERNEL(full_like, CPU, ALL_LAYOUT, phi::FullLikeKernel, diff --git a/paddle/phi/kernels/cpu/histogram_kernel.cc b/paddle/phi/kernels/cpu/histogram_kernel.cc index fbcf47c3070e68470a2eecf3b4c6eaa6c37926d2..82b88f868d8a70cd61073b65bb24fd195baeb5c2 100644 --- a/paddle/phi/kernels/cpu/histogram_kernel.cc +++ b/paddle/phi/kernels/cpu/histogram_kernel.cc @@ -77,7 +77,7 @@ void HistogramKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(histogram, +PD_REGISTER_KERNEL(histogram, CPU, ALL_LAYOUT, phi::HistogramKernel, diff --git a/paddle/phi/kernels/cpu/huber_loss_grad_kernel.cc b/paddle/phi/kernels/cpu/huber_loss_grad_kernel.cc index bd2349393e742911156e4c219d557f10acb42ded..654f2c9400af00484e6921aae63aeb0d93b521ae 100644 --- a/paddle/phi/kernels/cpu/huber_loss_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/huber_loss_grad_kernel.cc @@ -17,6 +17,6 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h" -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( huber_loss_grad, CPU, ALL_LAYOUT, phi::HuberLossGradKernel, float, double) { } diff --git a/paddle/phi/kernels/cpu/huber_loss_kernel.cc b/paddle/phi/kernels/cpu/huber_loss_kernel.cc index dfdab16bc85e33bbf1a10594784b5bddaad3f8d2..702c0589057af7079e6e0a41f1058063922790fe 100644 --- a/paddle/phi/kernels/cpu/huber_loss_kernel.cc +++ b/paddle/phi/kernels/cpu/huber_loss_kernel.cc @@ -17,5 +17,5 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/huber_loss_kernel_impl.h" -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( huber_loss, CPU, ALL_LAYOUT, phi::HuberLossKernel, float, double) {} diff --git a/paddle/phi/kernels/cpu/lerp_grad_kernel.cc b/paddle/phi/kernels/cpu/lerp_grad_kernel.cc index 7cfb42dbcf96faef7a2b4a4d9f95b8d3a1cb28d6..d74919011ec5da08b700b974393fcc70de22b21c 100644 --- a/paddle/phi/kernels/cpu/lerp_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/lerp_grad_kernel.cc @@ -17,5 +17,5 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/lerp_grad_kernel_impl.h" -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( lerp_grad, CPU, ALL_LAYOUT, phi::LerpGradKernel, float, double) {} diff --git a/paddle/phi/kernels/cpu/lerp_kernel.cc b/paddle/phi/kernels/cpu/lerp_kernel.cc index 97083c96464c305c1ccdb0ff674ce5aac372a335..7adfc35bfa321e8c111a11998e3b0b683009e619 100644 --- a/paddle/phi/kernels/cpu/lerp_kernel.cc +++ b/paddle/phi/kernels/cpu/lerp_kernel.cc @@ -17,4 +17,4 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/lerp_kernel_impl.h" -PT_REGISTER_KERNEL(lerp, CPU, ALL_LAYOUT, phi::LerpKernel, float, double) {} +PD_REGISTER_KERNEL(lerp, CPU, ALL_LAYOUT, phi::LerpKernel, float, double) {} diff --git a/paddle/phi/kernels/cpu/masked_select_grad_kernel.cc b/paddle/phi/kernels/cpu/masked_select_grad_kernel.cc index 071bbba1975e40abe65cce3b50972cb282e45c95..7fe41e686af8c54d1d105ffe5ff43c5e9c7a92e8 100644 --- a/paddle/phi/kernels/cpu/masked_select_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/masked_select_grad_kernel.cc @@ -43,7 +43,7 @@ void MaskedSelectGradKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(masked_select_grad, +PD_REGISTER_KERNEL(masked_select_grad, CPU, ALL_LAYOUT, phi::MaskedSelectGradKernel, diff --git a/paddle/phi/kernels/cpu/masked_select_kernel.cc b/paddle/phi/kernels/cpu/masked_select_kernel.cc index 08fc3f69f01e17c7e18b0f1307781d9d5290e801..274863a863b799a397840ceec314219fbbf70a39 100644 --- a/paddle/phi/kernels/cpu/masked_select_kernel.cc +++ b/paddle/phi/kernels/cpu/masked_select_kernel.cc @@ -61,7 +61,7 @@ void MaskedSelectKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(masked_select, +PD_REGISTER_KERNEL(masked_select, CPU, ALL_LAYOUT, phi::MaskedSelectKernel, diff --git a/paddle/phi/kernels/cpu/math_kernel.cc b/paddle/phi/kernels/cpu/math_kernel.cc index 862ee42296c9244a37a018023d5f3d215b8204e0..581c5f90f35e5cadb239291d143ce54d499c017e 100644 --- a/paddle/phi/kernels/cpu/math_kernel.cc +++ b/paddle/phi/kernels/cpu/math_kernel.cc @@ -118,7 +118,7 @@ using complex128 = ::phi::dtype::complex; // NOTE(chenweihang): using bfloat16 will cause redefine with xpu bfloat16 // using bfloat16 = ::phi::dtype::bfloat16; -PT_REGISTER_KERNEL(add_raw, +PD_REGISTER_KERNEL(add_raw, CPU, ALL_LAYOUT, phi::AddRawKernel, @@ -129,7 +129,7 @@ PT_REGISTER_KERNEL(add_raw, int64_t, complex64, complex128) {} -PT_REGISTER_KERNEL(subtract_raw, +PD_REGISTER_KERNEL(subtract_raw, CPU, ALL_LAYOUT, phi::SubtractRawKernel, @@ -140,7 +140,7 @@ PT_REGISTER_KERNEL(subtract_raw, int64_t, complex64, complex128) {} -PT_REGISTER_KERNEL(divide_raw, +PD_REGISTER_KERNEL(divide_raw, CPU, ALL_LAYOUT, phi::DivideRawKernel, @@ -150,7 +150,7 @@ PT_REGISTER_KERNEL(divide_raw, int64_t, complex64, complex128) {} -PT_REGISTER_KERNEL(multiply_raw, +PD_REGISTER_KERNEL(multiply_raw, CPU, ALL_LAYOUT, phi::MultiplyRawKernel, @@ -161,7 +161,7 @@ PT_REGISTER_KERNEL(multiply_raw, bool, complex64, complex128) {} -PT_REGISTER_KERNEL(sum_raw, +PD_REGISTER_KERNEL(sum_raw, CPU, ALL_LAYOUT, phi::SumRawKernel, @@ -176,5 +176,5 @@ PT_REGISTER_KERNEL(sum_raw, complex128) { kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::UNDEFINED); } -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( mean_raw, CPU, ALL_LAYOUT, phi::MeanRawKernel, float, double, bool) {} diff --git a/paddle/phi/kernels/cpu/matmul_grad_kernel.cc b/paddle/phi/kernels/cpu/matmul_grad_kernel.cc index 56a185e4ade064f91b1e7a52ff48997c7e9941e1..c68e8115e898b3701b9f568ac501260615b69ad4 100644 --- a/paddle/phi/kernels/cpu/matmul_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/matmul_grad_kernel.cc @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/phi/kernels/impl/matmul_grad_kernel_impl.h" -PT_REGISTER_KERNEL(matmul_grad, +PD_REGISTER_KERNEL(matmul_grad, CPU, ALL_LAYOUT, phi::MatmulGradKernel, @@ -28,7 +28,7 @@ PT_REGISTER_KERNEL(matmul_grad, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(matmul_double_grad, +PD_REGISTER_KERNEL(matmul_double_grad, CPU, ALL_LAYOUT, phi::MatmulDoubleGradKernel, @@ -37,7 +37,7 @@ PT_REGISTER_KERNEL(matmul_double_grad, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(matmul_triple_grad, +PD_REGISTER_KERNEL(matmul_triple_grad, CPU, ALL_LAYOUT, phi::MatmulTripleGradKernel, diff --git a/paddle/phi/kernels/cpu/matmul_kernel.cc b/paddle/phi/kernels/cpu/matmul_kernel.cc index 8676aec3eccb475a9de346e34e15c01c195aebbb..2bf56c07a5bc7485fd29d6ac347a5311915d8f36 100644 --- a/paddle/phi/kernels/cpu/matmul_kernel.cc +++ b/paddle/phi/kernels/cpu/matmul_kernel.cc @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/phi/common/complex.h" #include "paddle/phi/kernels/impl/matmul_kernel_impl.h" -PT_REGISTER_KERNEL(matmul, +PD_REGISTER_KERNEL(matmul, CPU, ALL_LAYOUT, phi::MatmulKernel, diff --git a/paddle/phi/kernels/cpu/norm_grad_kernel.cc b/paddle/phi/kernels/cpu/norm_grad_kernel.cc index d2073c07244bd54acbfcf7bf81028684f3ea739b..597207a05a226ac598d9141b42d5682bed5364f1 100644 --- a/paddle/phi/kernels/cpu/norm_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/norm_grad_kernel.cc @@ -83,5 +83,5 @@ void NormGradKernel(const Context& ctx, } // namespace phi -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( norm_grad, CPU, ALL_LAYOUT, phi::NormGradKernel, float, double) {} diff --git a/paddle/phi/kernels/cpu/norm_kernel.cc b/paddle/phi/kernels/cpu/norm_kernel.cc index e8f35b5fe7efd8dc04f16dffa877af082456a14d..50906d9c3bb9495817e81678b60fe3e426a22444 100644 --- a/paddle/phi/kernels/cpu/norm_kernel.cc +++ b/paddle/phi/kernels/cpu/norm_kernel.cc @@ -76,4 +76,4 @@ void NormKernel(const Context& ctx, } // namespace phi -PT_REGISTER_KERNEL(norm, CPU, ALL_LAYOUT, phi::NormKernel, float, double) {} +PD_REGISTER_KERNEL(norm, CPU, ALL_LAYOUT, phi::NormKernel, float, double) {} diff --git a/paddle/phi/kernels/cpu/scale_kernel.cc b/paddle/phi/kernels/cpu/scale_kernel.cc index 156afb8798de40000dcdea7d613734b92f1bc162..e929b5bd7219b60acb226374f67a0bc511c41723 100644 --- a/paddle/phi/kernels/cpu/scale_kernel.cc +++ b/paddle/phi/kernels/cpu/scale_kernel.cc @@ -51,7 +51,7 @@ void ScaleKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(scale, +PD_REGISTER_KERNEL(scale, CPU, ALL_LAYOUT, phi::ScaleKernel, diff --git a/paddle/phi/kernels/cpu/sign_kernel.cc b/paddle/phi/kernels/cpu/sign_kernel.cc index 6be931904d133159b907d296d17aebdba9bc2501..5fe11ffbd6d5c08b5072b61ab23d6fbea1879b53 100644 --- a/paddle/phi/kernels/cpu/sign_kernel.cc +++ b/paddle/phi/kernels/cpu/sign_kernel.cc @@ -21,4 +21,4 @@ limitations under the License. */ // See Note [ Why still include the fluid headers? ] #include "paddle/phi/common/bfloat16.h" -PT_REGISTER_KERNEL(sign, CPU, ALL_LAYOUT, phi::SignKernel, float, double) {} +PD_REGISTER_KERNEL(sign, CPU, ALL_LAYOUT, phi::SignKernel, float, double) {} diff --git a/paddle/phi/kernels/cpu/split_kernel.cc b/paddle/phi/kernels/cpu/split_kernel.cc index d02909f007da462089903d0f0764e2cf86231ede..259bf9e388c2c1a88400d13086bf9df23df21044 100644 --- a/paddle/phi/kernels/cpu/split_kernel.cc +++ b/paddle/phi/kernels/cpu/split_kernel.cc @@ -60,7 +60,7 @@ void SplitKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(split, +PD_REGISTER_KERNEL(split, CPU, ALL_LAYOUT, phi::SplitKernel, diff --git a/paddle/phi/kernels/cpu/trace_grad_kernel.cc b/paddle/phi/kernels/cpu/trace_grad_kernel.cc index e6ffd99bc53bd837aa3ef5ea142890fd4786249d..2167851b197d142a3e9c4b104175fd9147de6972 100644 --- a/paddle/phi/kernels/cpu/trace_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/trace_grad_kernel.cc @@ -18,7 +18,7 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/trace_grad_kernel_impl.h" -PT_REGISTER_KERNEL(trace_grad, +PD_REGISTER_KERNEL(trace_grad, CPU, ALL_LAYOUT, phi::TraceGradKernel, diff --git a/paddle/phi/kernels/cpu/trace_kernel.cc b/paddle/phi/kernels/cpu/trace_kernel.cc index 2b2cda6491d48487834321b376920f8943ea3650..3646e226519139430818c0f17b3f40c61c516dbd 100644 --- a/paddle/phi/kernels/cpu/trace_kernel.cc +++ b/paddle/phi/kernels/cpu/trace_kernel.cc @@ -45,7 +45,7 @@ void TraceKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(trace, +PD_REGISTER_KERNEL(trace, CPU, ALL_LAYOUT, phi::TraceKernel, diff --git a/paddle/phi/kernels/cpu/trunc_grad_kernel.cc b/paddle/phi/kernels/cpu/trunc_grad_kernel.cc index 7fc677c16ef7397e0963bbd1c9eed3ac49f136e0..4d85dd609e2d1f14cc476a1c53ba0506e6b519a5 100644 --- a/paddle/phi/kernels/cpu/trunc_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/trunc_grad_kernel.cc @@ -30,7 +30,7 @@ void TruncGradKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(trunc_grad, +PD_REGISTER_KERNEL(trunc_grad, CPU, ALL_LAYOUT, phi::TruncGradKernel, diff --git a/paddle/phi/kernels/cpu/trunc_kernel.cc b/paddle/phi/kernels/cpu/trunc_kernel.cc index 10e42196679fa546f7611b97fbcda812bedf4b23..babae6ce7c9318f7cb4ba1f15aedbe38de5ebbd3 100644 --- a/paddle/phi/kernels/cpu/trunc_kernel.cc +++ b/paddle/phi/kernels/cpu/trunc_kernel.cc @@ -35,5 +35,5 @@ void TruncKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( trunc, CPU, ALL_LAYOUT, phi::TruncKernel, float, double, int, int64_t) {} diff --git a/paddle/phi/kernels/empty_kernel.cc b/paddle/phi/kernels/empty_kernel.cc index 6d9e733b2f57677c70e259f39d20c332a5fff195..8109d3879cb21edd85d19612a62d9a8e0711e456 100644 --- a/paddle/phi/kernels/empty_kernel.cc +++ b/paddle/phi/kernels/empty_kernel.cc @@ -38,7 +38,7 @@ void EmptyLikeKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(empty, +PD_REGISTER_KERNEL(empty, CPU, ALL_LAYOUT, phi::EmptyKernel, @@ -54,7 +54,7 @@ PT_REGISTER_KERNEL(empty, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(empty_like, +PD_REGISTER_KERNEL(empty_like, CPU, ALL_LAYOUT, phi::EmptyLikeKernel, @@ -71,7 +71,7 @@ PT_REGISTER_KERNEL(empty_like, phi::dtype::complex) {} #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PT_REGISTER_KERNEL(empty, +PD_REGISTER_KERNEL(empty, GPU, ALL_LAYOUT, phi::EmptyKernel, @@ -86,7 +86,7 @@ PT_REGISTER_KERNEL(empty, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(empty_like, +PD_REGISTER_KERNEL(empty_like, GPU, ALL_LAYOUT, phi::EmptyLikeKernel, diff --git a/paddle/phi/kernels/flatten_grad_kernel.cc b/paddle/phi/kernels/flatten_grad_kernel.cc index 33e6c2724982a7c916636d2f782898eedf875225..7e8010a43f3d1898309ff72ab7189c58d4ece71d 100644 --- a/paddle/phi/kernels/flatten_grad_kernel.cc +++ b/paddle/phi/kernels/flatten_grad_kernel.cc @@ -32,7 +32,7 @@ void FlattenGradKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(flatten_grad, +PD_REGISTER_KERNEL(flatten_grad, CPU, ALL_LAYOUT, phi::FlattenGradKernel, @@ -44,7 +44,7 @@ PT_REGISTER_KERNEL(flatten_grad, int64_t) {} #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PT_REGISTER_KERNEL(flatten_grad, +PD_REGISTER_KERNEL(flatten_grad, GPU, ALL_LAYOUT, phi::FlattenGradKernel, @@ -59,7 +59,7 @@ PT_REGISTER_KERNEL(flatten_grad, #endif #ifdef PADDLE_WITH_XPU -PT_REGISTER_KERNEL(flatten_grad, +PD_REGISTER_KERNEL(flatten_grad, XPU, ALL_LAYOUT, phi::FlattenGradKernel, diff --git a/paddle/phi/kernels/flatten_kernel.cc b/paddle/phi/kernels/flatten_kernel.cc index 1ac444aa1792f4645c44feb117a5eacc409b0017..12eaab92d5211c08143ba72058cd4443aca1501c 100644 --- a/paddle/phi/kernels/flatten_kernel.cc +++ b/paddle/phi/kernels/flatten_kernel.cc @@ -48,7 +48,7 @@ void FlattenWithXShape(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(flatten, +PD_REGISTER_KERNEL(flatten, CPU, ALL_LAYOUT, phi::FlattenKernel, @@ -60,7 +60,7 @@ PT_REGISTER_KERNEL(flatten, int, int64_t) {} -PT_REGISTER_KERNEL(flatten_with_xshape, +PD_REGISTER_KERNEL(flatten_with_xshape, CPU, ALL_LAYOUT, phi::FlattenWithXShape, @@ -73,7 +73,7 @@ PT_REGISTER_KERNEL(flatten_with_xshape, int64_t) {} #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PT_REGISTER_KERNEL(flatten, +PD_REGISTER_KERNEL(flatten, GPU, ALL_LAYOUT, phi::FlattenKernel, @@ -86,7 +86,7 @@ PT_REGISTER_KERNEL(flatten, int, int64_t) {} -PT_REGISTER_KERNEL(flatten_with_xshape, +PD_REGISTER_KERNEL(flatten_with_xshape, GPU, ALL_LAYOUT, phi::FlattenWithXShape, @@ -101,7 +101,7 @@ PT_REGISTER_KERNEL(flatten_with_xshape, #endif #ifdef PADDLE_WITH_XPU -PT_REGISTER_KERNEL(flatten, +PD_REGISTER_KERNEL(flatten, XPU, ALL_LAYOUT, phi::FlattenKernel, @@ -112,7 +112,7 @@ PT_REGISTER_KERNEL(flatten, int, int64_t) {} -PT_REGISTER_KERNEL(flatten_with_xshape, +PD_REGISTER_KERNEL(flatten_with_xshape, XPU, ALL_LAYOUT, phi::FlattenWithXShape, diff --git a/paddle/phi/kernels/gpu/abs_grad_kernel.cu b/paddle/phi/kernels/gpu/abs_grad_kernel.cu index 37b19278233a8728ab444ea3dd97cd623742f730..1ce6a1638b1a04fa8e21adb386b1c92bd57296a2 100644 --- a/paddle/phi/kernels/gpu/abs_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/abs_grad_kernel.cu @@ -20,7 +20,7 @@ using phi::dtype::complex; -PT_REGISTER_KERNEL(abs_grad, +PD_REGISTER_KERNEL(abs_grad, GPU, ALL_LAYOUT, phi::AbsGradKernel, @@ -31,7 +31,7 @@ PT_REGISTER_KERNEL(abs_grad, phi::dtype::float16, complex, complex) {} -PT_REGISTER_KERNEL(abs_double_grad, +PD_REGISTER_KERNEL(abs_double_grad, GPU, ALL_LAYOUT, phi::AbsDoubleGradKernel, diff --git a/paddle/phi/kernels/gpu/abs_kernel.cu b/paddle/phi/kernels/gpu/abs_kernel.cu index 5c191dfc992a526b5418892e612243fac6bf766f..e122e6b1e9c8abe977ec5688a2ffddecadc776fb 100644 --- a/paddle/phi/kernels/gpu/abs_kernel.cu +++ b/paddle/phi/kernels/gpu/abs_kernel.cu @@ -52,7 +52,7 @@ void AbsKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) { } // namespace phi -PT_REGISTER_KERNEL(abs, +PD_REGISTER_KERNEL(abs, GPU, ALL_LAYOUT, phi::AbsKernel, diff --git a/paddle/phi/kernels/gpu/bernoulli_kernel.cu b/paddle/phi/kernels/gpu/bernoulli_kernel.cu index b043a55e21b611b254ec46360d11342bce851c57..6127bceef509c95b5e11204a9c578c6350802831 100644 --- a/paddle/phi/kernels/gpu/bernoulli_kernel.cu +++ b/paddle/phi/kernels/gpu/bernoulli_kernel.cu @@ -73,5 +73,5 @@ void BernoulliKernel(const Context& ctx, } // namespace phi -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( bernoulli, GPU, ALL_LAYOUT, phi::BernoulliKernel, float, double) {} diff --git a/paddle/phi/kernels/gpu/cast_kernel.cu b/paddle/phi/kernels/gpu/cast_kernel.cu index c05cd15b4757a3064f3e1eeec7ee724439115276..7a6c99c5fe15f6ddecd190d2d77e359503be7a80 100644 --- a/paddle/phi/kernels/gpu/cast_kernel.cu +++ b/paddle/phi/kernels/gpu/cast_kernel.cu @@ -61,7 +61,7 @@ void CastKernel(const Context& dev_ctx, } // namespace phi #define PTEN_REGISTER_CAST_CUDA_BASE_TYPE(op_name, ...) \ - PT_REGISTER_KERNEL(cast, \ + PD_REGISTER_KERNEL(cast, \ GPU, \ ALL_LAYOUT, \ phi::CastKernel, \ diff --git a/paddle/phi/kernels/gpu/complex_kernel.cu b/paddle/phi/kernels/gpu/complex_kernel.cu index 47a43ee9910b8579529128115652f3321ef3496a..02fd408aba86f3fd7d764e3a6daf89a5774ff41f 100644 --- a/paddle/phi/kernels/gpu/complex_kernel.cu +++ b/paddle/phi/kernels/gpu/complex_kernel.cu @@ -21,7 +21,7 @@ // See Note [ Why still include the fluid headers? ] #include "paddle/phi/common/complex.h" -PT_REGISTER_KERNEL(conj, +PD_REGISTER_KERNEL(conj, GPU, ALL_LAYOUT, phi::ConjKernel, diff --git a/paddle/phi/kernels/gpu/concat_kernel.cu b/paddle/phi/kernels/gpu/concat_kernel.cu index 22faeaf41970083d970903b64808b638f2115931..c80a873127708c244c88eaf83516662f34b40993 100644 --- a/paddle/phi/kernels/gpu/concat_kernel.cu +++ b/paddle/phi/kernels/gpu/concat_kernel.cu @@ -110,7 +110,7 @@ void ConcatKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(concat, +PD_REGISTER_KERNEL(concat, GPU, ALL_LAYOUT, phi::ConcatKernel, diff --git a/paddle/phi/kernels/gpu/copy_kernel.cu b/paddle/phi/kernels/gpu/copy_kernel.cu index 58b0a31d1d6d54e291339e59d4ab0da7ef09e68b..e88795b6173706a8b54cd23c64f73b11e08f0fa6 100644 --- a/paddle/phi/kernels/gpu/copy_kernel.cu +++ b/paddle/phi/kernels/gpu/copy_kernel.cu @@ -207,5 +207,5 @@ void Copy(const Context& dev_ctx, } // namespace phi -PT_REGISTER_GENERAL_KERNEL( +PD_REGISTER_GENERAL_KERNEL( copy, GPU, ALL_LAYOUT, phi::Copy, ALL_DTYPE) {} diff --git a/paddle/phi/kernels/gpu/diagonal_grad_kernel.cu b/paddle/phi/kernels/gpu/diagonal_grad_kernel.cu index 599fa2842a974e737c3d095b48f6e49f13578218..423093728e9d62386832d38a8db7caa5984e07d3 100644 --- a/paddle/phi/kernels/gpu/diagonal_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/diagonal_grad_kernel.cu @@ -158,7 +158,7 @@ void DiagonalGradKernel(const Context& dev_ctx, } } } // namespace phi -PT_REGISTER_KERNEL(diagonal_grad, +PD_REGISTER_KERNEL(diagonal_grad, GPU, ALL_LAYOUT, phi::DiagonalGradKernel, diff --git a/paddle/phi/kernels/gpu/diagonal_kernel.cu b/paddle/phi/kernels/gpu/diagonal_kernel.cu index c4b61cf819f84464d5f2b3e19a9a9c25b1908207..58da29b2224a615234634f4e853089f5f51e2dcd 100644 --- a/paddle/phi/kernels/gpu/diagonal_kernel.cu +++ b/paddle/phi/kernels/gpu/diagonal_kernel.cu @@ -154,7 +154,7 @@ void DiagonalKernel(const Context& dev_ctx, } } // namespace phi -PT_REGISTER_KERNEL(diagonal, +PD_REGISTER_KERNEL(diagonal, GPU, ALL_LAYOUT, phi::DiagonalKernel, diff --git a/paddle/phi/kernels/gpu/digamma_grad_kernel.cu b/paddle/phi/kernels/gpu/digamma_grad_kernel.cu index 54a618fe0421e4b6ecdda8d4ee3f4174ab5aeb5b..695227bba0f71d8c40e730a30998235c7c756442 100644 --- a/paddle/phi/kernels/gpu/digamma_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/digamma_grad_kernel.cu @@ -18,5 +18,5 @@ #include "paddle/phi/kernels/digamma_grad_kernel.h" #include "paddle/phi/kernels/impl/digamma_grad_kernel_impl.h" -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( digamma_grad, GPU, ALL_LAYOUT, phi::DigammaGradKernel, float, double) {} diff --git a/paddle/phi/kernels/gpu/digamma_kernel.cu b/paddle/phi/kernels/gpu/digamma_kernel.cu index 91d63eeab8c83e72d965045b00e3df7005a27469..381c22a82e863d08d92b9fd5a9824fb9678ef2fa 100644 --- a/paddle/phi/kernels/gpu/digamma_kernel.cu +++ b/paddle/phi/kernels/gpu/digamma_kernel.cu @@ -19,5 +19,5 @@ #include "paddle/phi/kernels/digamma_kernel.h" #include "paddle/phi/kernels/impl/digamma_kernel_impl.h" -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( digamma, GPU, ALL_LAYOUT, phi::DigammaKernel, float, double) {} diff --git a/paddle/phi/kernels/gpu/dot_grad_kernel.cu b/paddle/phi/kernels/gpu/dot_grad_kernel.cu index 3290dba3d45b9789722f8be4859d1c28a8ba66c7..7defc0304e511e2002c9b904226792636212a214 100644 --- a/paddle/phi/kernels/gpu/dot_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/dot_grad_kernel.cu @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/phi/common/complex.h" -PT_REGISTER_KERNEL(dot_grad, +PD_REGISTER_KERNEL(dot_grad, GPU, ALL_LAYOUT, phi::DotGradKernel, diff --git a/paddle/phi/kernels/gpu/dot_kernel.cu b/paddle/phi/kernels/gpu/dot_kernel.cu index 9f3c3ff794abaef4464505566cda38f83c2e79bc..4442396f6c9dd752d044a1f540b673546944586c 100644 --- a/paddle/phi/kernels/gpu/dot_kernel.cu +++ b/paddle/phi/kernels/gpu/dot_kernel.cu @@ -52,7 +52,7 @@ void DotKernel(const Context& dev_ctx, using complex64 = ::phi::dtype::complex; using complex128 = ::phi::dtype::complex; -PT_REGISTER_KERNEL(dot, +PD_REGISTER_KERNEL(dot, GPU, ALL_LAYOUT, phi::DotKernel, diff --git a/paddle/phi/kernels/gpu/elementwise_grad_kernel.cu b/paddle/phi/kernels/gpu/elementwise_grad_kernel.cu index fc78fe88c2e0e1201542765aed8684ddff9c7697..02dbb506c4eb579fbb2b82513421aaf1dd3ef163 100644 --- a/paddle/phi/kernels/gpu/elementwise_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/elementwise_grad_kernel.cu @@ -119,7 +119,7 @@ void SubtractDoubleGradKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(add_grad, +PD_REGISTER_KERNEL(add_grad, GPU, ALL_LAYOUT, phi::AddGradKernel, @@ -131,7 +131,7 @@ PT_REGISTER_KERNEL(add_grad, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(add_double_grad, +PD_REGISTER_KERNEL(add_double_grad, GPU, ALL_LAYOUT, phi::AddDoubleGradKernel, @@ -143,7 +143,7 @@ PT_REGISTER_KERNEL(add_double_grad, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(add_triple_grad, +PD_REGISTER_KERNEL(add_triple_grad, GPU, ALL_LAYOUT, phi::AddTripleGradKernel, @@ -155,7 +155,7 @@ PT_REGISTER_KERNEL(add_triple_grad, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(subtract_grad, +PD_REGISTER_KERNEL(subtract_grad, GPU, ALL_LAYOUT, phi::SubtractGradKernel, @@ -167,7 +167,7 @@ PT_REGISTER_KERNEL(subtract_grad, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(subtract_double_grad, +PD_REGISTER_KERNEL(subtract_double_grad, GPU, ALL_LAYOUT, phi::SubtractDoubleGradKernel, diff --git a/paddle/phi/kernels/gpu/expand_grad_kernel.cu b/paddle/phi/kernels/gpu/expand_grad_kernel.cu index 9ee58ad6caf29c7ff150f60627c772e166cb36a8..8e2c3fde04a6a0038f457cfe74561719bba7f069 100644 --- a/paddle/phi/kernels/gpu/expand_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/expand_grad_kernel.cu @@ -18,7 +18,7 @@ #include "paddle/phi/kernels/expand_grad_kernel.h" #include "paddle/phi/kernels/impl/expand_grad_kernel_impl.h" -PT_REGISTER_KERNEL(expand_grad, +PD_REGISTER_KERNEL(expand_grad, GPU, ALL_LAYOUT, phi::ExpandGradKernel, diff --git a/paddle/phi/kernels/gpu/expand_kernel.cu b/paddle/phi/kernels/gpu/expand_kernel.cu index dc1b4717fcc4c80809546acfb4cc8510d3e67c82..d4275804b3db8f4ae246379fe562d57598a2100d 100644 --- a/paddle/phi/kernels/gpu/expand_kernel.cu +++ b/paddle/phi/kernels/gpu/expand_kernel.cu @@ -19,7 +19,7 @@ #include "paddle/phi/kernels/expand_kernel.h" #include "paddle/phi/kernels/impl/expand_kernel_impl.h" -PT_REGISTER_KERNEL(expand, +PD_REGISTER_KERNEL(expand, GPU, ALL_LAYOUT, phi::ExpandKernel, diff --git a/paddle/phi/kernels/gpu/full_kernel.cu b/paddle/phi/kernels/gpu/full_kernel.cu index caa05514c4f0fa2f6b0f78951dae49d7d4710b66..d5cb1575b71817be445311d1081a7185e80e49ed 100644 --- a/paddle/phi/kernels/gpu/full_kernel.cu +++ b/paddle/phi/kernels/gpu/full_kernel.cu @@ -98,7 +98,7 @@ void FullLikeKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(full, +PD_REGISTER_KERNEL(full, GPU, ALL_LAYOUT, phi::FullKernel, @@ -113,7 +113,7 @@ PT_REGISTER_KERNEL(full, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(full_like, +PD_REGISTER_KERNEL(full_like, GPU, ALL_LAYOUT, phi::FullLikeKernel, diff --git a/paddle/phi/kernels/gpu/histogram_kernel.cu b/paddle/phi/kernels/gpu/histogram_kernel.cu index 47dee820e2fbde254b21d4203e41368214ff91d7..6db987e22fc6c2ed59f67ac82adee8176ede0c9b 100644 --- a/paddle/phi/kernels/gpu/histogram_kernel.cu +++ b/paddle/phi/kernels/gpu/histogram_kernel.cu @@ -149,7 +149,7 @@ void HistogramKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(histogram, +PD_REGISTER_KERNEL(histogram, GPU, ALL_LAYOUT, phi::HistogramKernel, diff --git a/paddle/phi/kernels/gpu/huber_loss_grad_kernel.cu b/paddle/phi/kernels/gpu/huber_loss_grad_kernel.cu index 5e1e000a38d955fc8f20e609ce57f11a0379a1ac..20cc2ed669adf91ee369b11ba957805839978581 100644 --- a/paddle/phi/kernels/gpu/huber_loss_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/huber_loss_grad_kernel.cu @@ -17,6 +17,6 @@ #include "paddle/phi/kernels/huber_loss_grad_kernel.h" #include "paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h" -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( huber_loss_grad, GPU, ALL_LAYOUT, phi::HuberLossGradKernel, float, double) { } diff --git a/paddle/phi/kernels/gpu/huber_loss_kernel.cu b/paddle/phi/kernels/gpu/huber_loss_kernel.cu index 2cca0c08a3f3bf42bb885805cf6b57ab49c9ed62..26648a260b99ec6835ed3679e7c2c0550e8f6063 100644 --- a/paddle/phi/kernels/gpu/huber_loss_kernel.cu +++ b/paddle/phi/kernels/gpu/huber_loss_kernel.cu @@ -17,5 +17,5 @@ #include "paddle/phi/kernels/huber_loss_kernel.h" #include "paddle/phi/kernels/impl/huber_loss_kernel_impl.h" -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( huber_loss, GPU, ALL_LAYOUT, phi::HuberLossKernel, float, double) {} diff --git a/paddle/phi/kernels/gpu/lerp_grad_kernel.cu b/paddle/phi/kernels/gpu/lerp_grad_kernel.cu index 81bd69a5f12e041b613f822561dcd63690f6c828..0a5ac99fa8e458cc7d786f1fbb00d8106032719c 100644 --- a/paddle/phi/kernels/gpu/lerp_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/lerp_grad_kernel.cu @@ -17,5 +17,5 @@ #include "paddle/phi/kernels/impl/lerp_grad_kernel_impl.h" #include "paddle/phi/kernels/lerp_grad_kernel.h" -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( lerp_grad, GPU, ALL_LAYOUT, phi::LerpGradKernel, float, double) {} diff --git a/paddle/phi/kernels/gpu/lerp_kernel.cu b/paddle/phi/kernels/gpu/lerp_kernel.cu index 190248c0cd077a1228c3ab5d324f3c7bb3f59254..96010aff4e70c6399032307ce7c48be7822ce12a 100644 --- a/paddle/phi/kernels/gpu/lerp_kernel.cu +++ b/paddle/phi/kernels/gpu/lerp_kernel.cu @@ -17,4 +17,4 @@ #include "paddle/phi/kernels/impl/lerp_kernel_impl.h" #include "paddle/phi/kernels/lerp_kernel.h" -PT_REGISTER_KERNEL(lerp, GPU, ALL_LAYOUT, phi::LerpKernel, float, double) {} +PD_REGISTER_KERNEL(lerp, GPU, ALL_LAYOUT, phi::LerpKernel, float, double) {} diff --git a/paddle/phi/kernels/gpu/masked_select_grad_kernel.cu b/paddle/phi/kernels/gpu/masked_select_grad_kernel.cu index c4f4b461f2aa042307ed4e34a4c450a1dfbfd644..71b7cd8750462fdf0dad20b2b221bd18cc6dbbe6 100644 --- a/paddle/phi/kernels/gpu/masked_select_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/masked_select_grad_kernel.cu @@ -96,7 +96,7 @@ void MaskedSelectGradKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(masked_select_grad, +PD_REGISTER_KERNEL(masked_select_grad, GPU, ALL_LAYOUT, phi::MaskedSelectGradKernel, diff --git a/paddle/phi/kernels/gpu/masked_select_kernel.cu b/paddle/phi/kernels/gpu/masked_select_kernel.cu index 8254ce4be6356eba89786467249bb2e3e59ff52e..fc4adca2f42438f464346ad83bc7e49448826bb2 100644 --- a/paddle/phi/kernels/gpu/masked_select_kernel.cu +++ b/paddle/phi/kernels/gpu/masked_select_kernel.cu @@ -108,7 +108,7 @@ void MaskedSelectKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(masked_select, +PD_REGISTER_KERNEL(masked_select, GPU, ALL_LAYOUT, phi::MaskedSelectKernel, diff --git a/paddle/phi/kernels/gpu/math_kernel.cu b/paddle/phi/kernels/gpu/math_kernel.cu index c3605ce655f2bde78c01a88db4dd70bd38c98431..f7b1205cb593a24d4799a64f7afe6f3559adf26b 100644 --- a/paddle/phi/kernels/gpu/math_kernel.cu +++ b/paddle/phi/kernels/gpu/math_kernel.cu @@ -95,7 +95,7 @@ using float16 = phi::dtype::float16; using complex64 = ::phi::dtype::complex; using complex128 = ::phi::dtype::complex; -PT_REGISTER_KERNEL(add_raw, +PD_REGISTER_KERNEL(add_raw, GPU, ALL_LAYOUT, phi::AddRawKernel, @@ -107,7 +107,7 @@ PT_REGISTER_KERNEL(add_raw, float16, complex64, complex128) {} -PT_REGISTER_KERNEL(subtract_raw, +PD_REGISTER_KERNEL(subtract_raw, GPU, ALL_LAYOUT, phi::SubtractRawKernel, @@ -119,7 +119,7 @@ PT_REGISTER_KERNEL(subtract_raw, float16, complex64, complex128) {} -PT_REGISTER_KERNEL(divide_raw, +PD_REGISTER_KERNEL(divide_raw, GPU, ALL_LAYOUT, phi::DivideRawKernel, @@ -130,7 +130,7 @@ PT_REGISTER_KERNEL(divide_raw, float16, complex64, complex128) {} -PT_REGISTER_KERNEL(multiply_raw, +PD_REGISTER_KERNEL(multiply_raw, GPU, ALL_LAYOUT, phi::MultiplyRawKernel, @@ -142,7 +142,7 @@ PT_REGISTER_KERNEL(multiply_raw, float16, complex64, complex128) {} -PT_REGISTER_KERNEL(sum_raw, +PD_REGISTER_KERNEL(sum_raw, GPU, ALL_LAYOUT, phi::SumRawKernel, @@ -158,7 +158,7 @@ PT_REGISTER_KERNEL(sum_raw, kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::UNDEFINED); } -PT_REGISTER_KERNEL(mean_raw, +PD_REGISTER_KERNEL(mean_raw, GPU, ALL_LAYOUT, phi::MeanRawKernel, diff --git a/paddle/phi/kernels/gpu/matmul_grad_kernel.cu b/paddle/phi/kernels/gpu/matmul_grad_kernel.cu index 7da5fb2c98818064af9d171256d5a93fd27c4668..ff23ebd05b52833eef9fd23efb1d8537d1013454 100644 --- a/paddle/phi/kernels/gpu/matmul_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/matmul_grad_kernel.cu @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/phi/kernels/impl/matmul_grad_kernel_impl.h" -PT_REGISTER_KERNEL(matmul_grad, +PD_REGISTER_KERNEL(matmul_grad, GPU, ALL_LAYOUT, phi::MatmulGradKernel, @@ -30,7 +30,7 @@ PT_REGISTER_KERNEL(matmul_grad, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(matmul_double_grad, +PD_REGISTER_KERNEL(matmul_double_grad, GPU, ALL_LAYOUT, phi::MatmulDoubleGradKernel, @@ -40,7 +40,7 @@ PT_REGISTER_KERNEL(matmul_double_grad, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(matmul_triple_grad, +PD_REGISTER_KERNEL(matmul_triple_grad, GPU, ALL_LAYOUT, phi::MatmulTripleGradKernel, diff --git a/paddle/phi/kernels/gpu/matmul_kernel.cu b/paddle/phi/kernels/gpu/matmul_kernel.cu index 3041784e93695fcd7e16460202f7aaa9a96029ac..98be79c5f9dab5f1a72d7784dfbe1745d27bd622 100644 --- a/paddle/phi/kernels/gpu/matmul_kernel.cu +++ b/paddle/phi/kernels/gpu/matmul_kernel.cu @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/phi/common/complex.h" #include "paddle/phi/kernels/impl/matmul_kernel_impl.h" -PT_REGISTER_KERNEL(matmul, +PD_REGISTER_KERNEL(matmul, GPU, ALL_LAYOUT, phi::MatmulKernel, diff --git a/paddle/phi/kernels/gpu/norm_grad_kernel.cu b/paddle/phi/kernels/gpu/norm_grad_kernel.cu index 3530de11d35e2a1e1200b0c3f748fc5cbf5bb540..ab38a82eceb1e73bddbe07a37d72cab99929852c 100644 --- a/paddle/phi/kernels/gpu/norm_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/norm_grad_kernel.cu @@ -111,7 +111,7 @@ void NormGradKernel(const Context& ctx, } // namespace phi -PT_REGISTER_KERNEL(norm_grad, +PD_REGISTER_KERNEL(norm_grad, GPU, ALL_LAYOUT, phi::NormGradKernel, diff --git a/paddle/phi/kernels/gpu/norm_kernel.cu b/paddle/phi/kernels/gpu/norm_kernel.cu index 4ed3100918edf5a3cbed19cd99c32958421f41ab..274f91b8dd6611e5d560713ed9f2338bb95c73df 100644 --- a/paddle/phi/kernels/gpu/norm_kernel.cu +++ b/paddle/phi/kernels/gpu/norm_kernel.cu @@ -124,7 +124,7 @@ void NormKernel(const Context& ctx, } // namespace phi -PT_REGISTER_KERNEL(norm, +PD_REGISTER_KERNEL(norm, GPU, ALL_LAYOUT, phi::NormKernel, diff --git a/paddle/phi/kernels/gpu/scale_kernel.cu b/paddle/phi/kernels/gpu/scale_kernel.cu index 245605ed8a91b9424d663ee70e204033a7794c83..d9c8de21c5bc2d26cb371d03be30ed0616a27a64 100644 --- a/paddle/phi/kernels/gpu/scale_kernel.cu +++ b/paddle/phi/kernels/gpu/scale_kernel.cu @@ -63,7 +63,7 @@ void ScaleKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(scale, +PD_REGISTER_KERNEL(scale, GPU, ALL_LAYOUT, phi::ScaleKernel, diff --git a/paddle/phi/kernels/gpu/sign_kernel.cu.cc b/paddle/phi/kernels/gpu/sign_kernel.cu.cc index 950cf67d7cff5b675ea10b7e62bba8729a1523d0..1fe17a7a227ecfbb05fadb583e5ed27456f318b6 100644 --- a/paddle/phi/kernels/gpu/sign_kernel.cu.cc +++ b/paddle/phi/kernels/gpu/sign_kernel.cu.cc @@ -23,5 +23,5 @@ limitations under the License. */ using float16 = phi::dtype::float16; -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( sign, GPU, ALL_LAYOUT, phi::SignKernel, float, double, float16) {} diff --git a/paddle/phi/kernels/gpu/split_kernel.cu b/paddle/phi/kernels/gpu/split_kernel.cu index 919b0a7d4f9664c2df4c3f9e0c77200344911de6..5222fce03ace6fe30fce4aa9908794e348b79ad3 100644 --- a/paddle/phi/kernels/gpu/split_kernel.cu +++ b/paddle/phi/kernels/gpu/split_kernel.cu @@ -59,7 +59,7 @@ void SplitKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(split, +PD_REGISTER_KERNEL(split, GPU, ALL_LAYOUT, phi::SplitKernel, diff --git a/paddle/phi/kernels/gpu/trace_grad_kernel.cu b/paddle/phi/kernels/gpu/trace_grad_kernel.cu index a7e4b55b4ca221dbd6776aa8a9be76e84472c545..6692c1e19b033c3945387166f9954cc71fa6de32 100644 --- a/paddle/phi/kernels/gpu/trace_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/trace_grad_kernel.cu @@ -18,7 +18,7 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/trace_grad_kernel_impl.h" -PT_REGISTER_KERNEL(trace_grad, +PD_REGISTER_KERNEL(trace_grad, GPU, ALL_LAYOUT, phi::TraceGradKernel, diff --git a/paddle/phi/kernels/gpu/trace_kernel.cu b/paddle/phi/kernels/gpu/trace_kernel.cu index bc8b6bc922c9176c40da06b7f80dd5556164f905..7ac7c451b00542c3e0511692dc7cad470374f2ae 100644 --- a/paddle/phi/kernels/gpu/trace_kernel.cu +++ b/paddle/phi/kernels/gpu/trace_kernel.cu @@ -44,7 +44,7 @@ void TraceKernel(const Context& ctx, } // namespace phi -PT_REGISTER_KERNEL(trace, +PD_REGISTER_KERNEL(trace, GPU, ALL_LAYOUT, phi::TraceKernel, diff --git a/paddle/phi/kernels/gpu/trunc_grad_kernel.cu b/paddle/phi/kernels/gpu/trunc_grad_kernel.cu index b5427d0b73867614fb6bfc023092bf6dc07d030e..92d95e7259bf0c5cb1cf7c180eeba1d7b7ea8842 100644 --- a/paddle/phi/kernels/gpu/trunc_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/trunc_grad_kernel.cu @@ -44,7 +44,7 @@ void TruncGradKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(trunc_grad, +PD_REGISTER_KERNEL(trunc_grad, GPU, ALL_LAYOUT, phi::TruncGradKernel, diff --git a/paddle/phi/kernels/gpu/trunc_kernel.cu b/paddle/phi/kernels/gpu/trunc_kernel.cu index d9c0803de2832c1e1a9c82f289295b1a8ea60bc0..cc44602b657aabfe7a6f55ed1d0b2c06cb56fa9e 100644 --- a/paddle/phi/kernels/gpu/trunc_kernel.cu +++ b/paddle/phi/kernels/gpu/trunc_kernel.cu @@ -77,5 +77,5 @@ void TruncKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( trunc, GPU, ALL_LAYOUT, phi::TruncKernel, float, double, int, int64_t) {} diff --git a/paddle/phi/kernels/math_kernel.cc b/paddle/phi/kernels/math_kernel.cc index e1e3679ea8be89ef438b59e70889e23adc360ea0..db6c5e1ac35919c153c8021c82e747cc3ca9fe37 100644 --- a/paddle/phi/kernels/math_kernel.cc +++ b/paddle/phi/kernels/math_kernel.cc @@ -81,10 +81,10 @@ void MultiplyKernel(const Context& dev_ctx, using complex64 = ::phi::dtype::complex; using complex128 = ::phi::dtype::complex; -PT_REGISTER_KERNEL( +PD_REGISTER_KERNEL( mean, CPU, ALL_LAYOUT, phi::MeanKernel, float, double, bool) {} -PT_REGISTER_KERNEL(sum, +PD_REGISTER_KERNEL(sum, CPU, ALL_LAYOUT, phi::SumKernel, @@ -100,7 +100,7 @@ PT_REGISTER_KERNEL(sum, kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::UNDEFINED); } -PT_REGISTER_KERNEL(add, +PD_REGISTER_KERNEL(add, CPU, ALL_LAYOUT, phi::AddKernel, @@ -111,7 +111,7 @@ PT_REGISTER_KERNEL(add, int64_t, complex64, complex128) {} -PT_REGISTER_KERNEL(subtract, +PD_REGISTER_KERNEL(subtract, CPU, ALL_LAYOUT, phi::SubtractKernel, @@ -122,7 +122,7 @@ PT_REGISTER_KERNEL(subtract, int64_t, complex64, complex128) {} -PT_REGISTER_KERNEL(divide, +PD_REGISTER_KERNEL(divide, CPU, ALL_LAYOUT, phi::DivideKernel, @@ -132,7 +132,7 @@ PT_REGISTER_KERNEL(divide, int64_t, complex64, complex128) {} -PT_REGISTER_KERNEL(multiply, +PD_REGISTER_KERNEL(multiply, CPU, ALL_LAYOUT, phi::MultiplyKernel, @@ -145,7 +145,7 @@ PT_REGISTER_KERNEL(multiply, complex128) {} #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PT_REGISTER_KERNEL(mean, +PD_REGISTER_KERNEL(mean, GPU, ALL_LAYOUT, phi::MeanKernel, @@ -155,7 +155,7 @@ PT_REGISTER_KERNEL(mean, int, int64_t, phi::dtype::float16) {} -PT_REGISTER_KERNEL(sum, +PD_REGISTER_KERNEL(sum, GPU, ALL_LAYOUT, phi::SumKernel, @@ -170,7 +170,7 @@ PT_REGISTER_KERNEL(sum, complex128) { kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::UNDEFINED); } -PT_REGISTER_KERNEL(add, +PD_REGISTER_KERNEL(add, GPU, ALL_LAYOUT, phi::AddKernel, @@ -182,7 +182,7 @@ PT_REGISTER_KERNEL(add, phi::dtype::float16, complex64, complex128) {} -PT_REGISTER_KERNEL(subtract, +PD_REGISTER_KERNEL(subtract, GPU, ALL_LAYOUT, phi::SubtractKernel, @@ -194,7 +194,7 @@ PT_REGISTER_KERNEL(subtract, phi::dtype::float16, complex64, complex128) {} -PT_REGISTER_KERNEL(divide, +PD_REGISTER_KERNEL(divide, GPU, ALL_LAYOUT, phi::DivideKernel, @@ -205,7 +205,7 @@ PT_REGISTER_KERNEL(divide, phi::dtype::float16, complex64, complex128) {} -PT_REGISTER_KERNEL(multiply, +PD_REGISTER_KERNEL(multiply, GPU, ALL_LAYOUT, phi::MultiplyKernel, diff --git a/paddle/phi/kernels/reshape_grad_kernel.cc b/paddle/phi/kernels/reshape_grad_kernel.cc index 436813b53e6cd363cd1b79412ca729636e9e8f1b..5361315bb611b02375da52e7bbe00e1f7ee3f4ed 100644 --- a/paddle/phi/kernels/reshape_grad_kernel.cc +++ b/paddle/phi/kernels/reshape_grad_kernel.cc @@ -37,24 +37,24 @@ void ReshapeDoubleGradKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_GENERAL_KERNEL(reshape_grad, +PD_REGISTER_GENERAL_KERNEL(reshape_grad, CPU, ALL_LAYOUT, phi::ReshapeGradKernel, ALL_DTYPE) {} -PT_REGISTER_GENERAL_KERNEL(reshape_double_grad, +PD_REGISTER_GENERAL_KERNEL(reshape_double_grad, CPU, ALL_LAYOUT, phi::ReshapeDoubleGradKernel, ALL_DTYPE) {} #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PT_REGISTER_GENERAL_KERNEL(reshape_grad, +PD_REGISTER_GENERAL_KERNEL(reshape_grad, GPU, ALL_LAYOUT, phi::ReshapeGradKernel, ALL_DTYPE) {} -PT_REGISTER_GENERAL_KERNEL(reshape_double_grad, +PD_REGISTER_GENERAL_KERNEL(reshape_double_grad, GPU, ALL_LAYOUT, phi::ReshapeDoubleGradKernel, @@ -62,12 +62,12 @@ PT_REGISTER_GENERAL_KERNEL(reshape_double_grad, #endif #ifdef PADDLE_WITH_XPU -PT_REGISTER_GENERAL_KERNEL(reshape_grad, +PD_REGISTER_GENERAL_KERNEL(reshape_grad, XPU, ALL_LAYOUT, phi::ReshapeGradKernel, ALL_DTYPE) {} -PT_REGISTER_GENERAL_KERNEL(reshape_double_grad, +PD_REGISTER_GENERAL_KERNEL(reshape_double_grad, XPU, ALL_LAYOUT, phi::ReshapeDoubleGradKernel, diff --git a/paddle/phi/kernels/reshape_kernel.cc b/paddle/phi/kernels/reshape_kernel.cc index 68d9130850191029c111fcfe42589af5962b60b3..570e70ea11227111a7343003d8043a3407841f19 100644 --- a/paddle/phi/kernels/reshape_kernel.cc +++ b/paddle/phi/kernels/reshape_kernel.cc @@ -52,18 +52,18 @@ void ReshapeWithXShape(const Context& dev_ctx, } // namespace phi -PT_REGISTER_GENERAL_KERNEL( +PD_REGISTER_GENERAL_KERNEL( reshape, CPU, ALL_LAYOUT, phi::ReshapeKernel, ALL_DTYPE) {} -PT_REGISTER_GENERAL_KERNEL(reshape_with_xshape, +PD_REGISTER_GENERAL_KERNEL(reshape_with_xshape, CPU, ALL_LAYOUT, phi::ReshapeWithXShape, ALL_DTYPE) {} #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PT_REGISTER_GENERAL_KERNEL( +PD_REGISTER_GENERAL_KERNEL( reshape, GPU, ALL_LAYOUT, phi::ReshapeKernel, ALL_DTYPE) {} -PT_REGISTER_GENERAL_KERNEL(reshape_with_xshape, +PD_REGISTER_GENERAL_KERNEL(reshape_with_xshape, GPU, ALL_LAYOUT, phi::ReshapeWithXShape, @@ -71,9 +71,9 @@ PT_REGISTER_GENERAL_KERNEL(reshape_with_xshape, #endif #ifdef PADDLE_WITH_XPU -PT_REGISTER_GENERAL_KERNEL( +PD_REGISTER_GENERAL_KERNEL( reshape, XPU, ALL_LAYOUT, phi::ReshapeKernel, ALL_DTYPE) {} -PT_REGISTER_GENERAL_KERNEL(reshape_with_xshape, +PD_REGISTER_GENERAL_KERNEL(reshape_with_xshape, XPU, ALL_LAYOUT, phi::ReshapeWithXShape, diff --git a/paddle/phi/kernels/selected_rows/full_kernel.cc b/paddle/phi/kernels/selected_rows/full_kernel.cc index baedf899d2b53a599f4f9fbbbcf81c8368276f1f..02231867fdd35cc8db4359fa3cc31d6236229afc 100644 --- a/paddle/phi/kernels/selected_rows/full_kernel.cc +++ b/paddle/phi/kernels/selected_rows/full_kernel.cc @@ -36,7 +36,7 @@ void FullSR(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(full_sr, +PD_REGISTER_KERNEL(full_sr, CPU, ALL_LAYOUT, phi::FullSR, @@ -53,7 +53,7 @@ PT_REGISTER_KERNEL(full_sr, phi::dtype::complex) {} #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PT_REGISTER_KERNEL(full_sr, +PD_REGISTER_KERNEL(full_sr, GPU, ALL_LAYOUT, phi::FullSR, diff --git a/paddle/phi/kernels/selected_rows/scale_kernel.cc b/paddle/phi/kernels/selected_rows/scale_kernel.cc index 67717ed469488d7cb4765b2a31ebfacfc123b266..094b6f4d12022be07910bac68f09d201040b364a 100644 --- a/paddle/phi/kernels/selected_rows/scale_kernel.cc +++ b/paddle/phi/kernels/selected_rows/scale_kernel.cc @@ -38,7 +38,7 @@ void ScaleSR(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(scale_sr, +PD_REGISTER_KERNEL(scale_sr, CPU, ALL_LAYOUT, phi::ScaleSR, @@ -52,7 +52,7 @@ PT_REGISTER_KERNEL(scale_sr, int64_t) {} #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PT_REGISTER_KERNEL(scale_sr, +PD_REGISTER_KERNEL(scale_sr, GPU, ALL_LAYOUT, phi::ScaleSR, diff --git a/paddle/phi/kernels/sparse/cpu/sparse_utils_kernel.cc b/paddle/phi/kernels/sparse/cpu/sparse_utils_kernel.cc index 408240b90a98841c0565f3b19469e1e70bbe7a18..4374b5d7f1a1d9992619cffcdafcda8708e4c640 100644 --- a/paddle/phi/kernels/sparse/cpu/sparse_utils_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/sparse_utils_kernel.cc @@ -284,7 +284,7 @@ void SparseCooToDenseKernel(const Context& dev_ctx, } // namespace sparse } // namespace phi -PT_REGISTER_KERNEL(dense_to_sparse_coo, +PD_REGISTER_KERNEL(dense_to_sparse_coo, CPU, ALL_LAYOUT, phi::sparse::DenseToSparseCooKernel, @@ -297,7 +297,7 @@ PT_REGISTER_KERNEL(dense_to_sparse_coo, int, int64_t) {} -PT_REGISTER_KERNEL(sparse_csr_to_coo, +PD_REGISTER_KERNEL(sparse_csr_to_coo, CPU, ALL_LAYOUT, phi::sparse::SparseCsrToCooKernel, @@ -310,7 +310,7 @@ PT_REGISTER_KERNEL(sparse_csr_to_coo, int, int64_t) {} -PT_REGISTER_KERNEL(sparse_coo_to_csr, +PD_REGISTER_KERNEL(sparse_coo_to_csr, CPU, ALL_LAYOUT, phi::sparse::SparseCooToCsrKernel, @@ -323,7 +323,7 @@ PT_REGISTER_KERNEL(sparse_coo_to_csr, int, int64_t) {} -PT_REGISTER_KERNEL(dense_to_sparse_csr, +PD_REGISTER_KERNEL(dense_to_sparse_csr, CPU, ALL_LAYOUT, phi::sparse::DenseToSparseCsrKernel, @@ -336,7 +336,7 @@ PT_REGISTER_KERNEL(dense_to_sparse_csr, int, int64_t) {} -PT_REGISTER_KERNEL(sparse_coo_to_dense, +PD_REGISTER_KERNEL(sparse_coo_to_dense, CPU, ALL_LAYOUT, phi::sparse::SparseCooToDenseKernel, @@ -349,7 +349,7 @@ PT_REGISTER_KERNEL(sparse_coo_to_dense, int, int64_t) {} -PT_REGISTER_KERNEL(sparse_csr_to_dense, +PD_REGISTER_KERNEL(sparse_csr_to_dense, CPU, ALL_LAYOUT, phi::sparse::SparseCsrToDenseKernel, diff --git a/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu b/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu index ab2be13615e0e0e2b4ebc868c4b1606ae4b8cd8e..b7793e40554455075e98b12192750d862045fa82 100644 --- a/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu @@ -553,7 +553,7 @@ void SparseCooToDenseKernel(const Context& dev_ctx, } // namespace sparse } // namespace phi -PT_REGISTER_KERNEL(dense_to_sparse_coo, +PD_REGISTER_KERNEL(dense_to_sparse_coo, GPU, ALL_LAYOUT, phi::sparse::DenseToSparseCooKernel, @@ -566,7 +566,7 @@ PT_REGISTER_KERNEL(dense_to_sparse_coo, int, int64_t) {} -PT_REGISTER_KERNEL(sparse_csr_to_coo, +PD_REGISTER_KERNEL(sparse_csr_to_coo, GPU, ALL_LAYOUT, phi::sparse::SparseCsrToCooKernel, @@ -579,7 +579,7 @@ PT_REGISTER_KERNEL(sparse_csr_to_coo, int, int64_t) {} -PT_REGISTER_KERNEL(sparse_coo_to_csr, +PD_REGISTER_KERNEL(sparse_coo_to_csr, GPU, ALL_LAYOUT, phi::sparse::SparseCooToCsrKernel, @@ -592,7 +592,7 @@ PT_REGISTER_KERNEL(sparse_coo_to_csr, int, int64_t) {} -PT_REGISTER_KERNEL(dense_to_sparse_csr, +PD_REGISTER_KERNEL(dense_to_sparse_csr, GPU, ALL_LAYOUT, phi::sparse::DenseToSparseCsrKernel, @@ -605,7 +605,7 @@ PT_REGISTER_KERNEL(dense_to_sparse_csr, int, int64_t) {} -PT_REGISTER_KERNEL(sparse_coo_to_dense, +PD_REGISTER_KERNEL(sparse_coo_to_dense, GPU, ALL_LAYOUT, phi::sparse::SparseCooToDenseKernel, @@ -618,7 +618,7 @@ PT_REGISTER_KERNEL(sparse_coo_to_dense, int, int64_t) {} -PT_REGISTER_KERNEL(sparse_csr_to_dense, +PD_REGISTER_KERNEL(sparse_csr_to_dense, GPU, ALL_LAYOUT, phi::sparse::SparseCsrToDenseKernel, diff --git a/paddle/phi/kernels/transfer_layout_kernel.cc b/paddle/phi/kernels/transfer_layout_kernel.cc index eb7146487e38b2ca9f64ab27cb420507cb190b96..c981ca115850707857ed1f25a9e546138d9d950c 100644 --- a/paddle/phi/kernels/transfer_layout_kernel.cc +++ b/paddle/phi/kernels/transfer_layout_kernel.cc @@ -69,7 +69,7 @@ void TransferLayoutKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_GENERAL_KERNEL(pten_transfer_layout, +PD_REGISTER_GENERAL_KERNEL(pten_transfer_layout, CPU, ALL_LAYOUT, phi::TransferLayoutKernel, diff --git a/paddle/phi/kernels/xpu/cast_kernel.cc b/paddle/phi/kernels/xpu/cast_kernel.cc index 0e50306a068c894b720c1a8ef1cef915999781d9..9aa503d58736defa477414df43cd812d75cfca36 100644 --- a/paddle/phi/kernels/xpu/cast_kernel.cc +++ b/paddle/phi/kernels/xpu/cast_kernel.cc @@ -86,7 +86,7 @@ void CastKernel(const Context& dev_ctx, } } // namespace phi -PT_REGISTER_KERNEL(cast, +PD_REGISTER_KERNEL(cast, XPU, ALL_LAYOUT, phi::CastKernel, diff --git a/paddle/phi/kernels/xpu/copy_kernel.cc b/paddle/phi/kernels/xpu/copy_kernel.cc index 559d110a9e8ad8eae9571dd2986e14420da70fa9..3bbedbbb346e42e55824c833244774544648ab40 100644 --- a/paddle/phi/kernels/xpu/copy_kernel.cc +++ b/paddle/phi/kernels/xpu/copy_kernel.cc @@ -69,5 +69,5 @@ void Copy(const Context& dev_ctx, } // namespace phi -PT_REGISTER_GENERAL_KERNEL( +PD_REGISTER_GENERAL_KERNEL( copy, XPU, ALL_LAYOUT, phi::Copy, ALL_DTYPE) {} diff --git a/paddle/phi/kernels/xpu/full_kernel.cc b/paddle/phi/kernels/xpu/full_kernel.cc index 98810fa9779a4afea4991ecd751f68a140d8c185..b514425cc54da2fe004fdda2319f5d3c1e488afe 100644 --- a/paddle/phi/kernels/xpu/full_kernel.cc +++ b/paddle/phi/kernels/xpu/full_kernel.cc @@ -116,7 +116,7 @@ void FullLikeKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(full, +PD_REGISTER_KERNEL(full, XPU, ALL_LAYOUT, phi::FullKernel, @@ -132,7 +132,7 @@ PT_REGISTER_KERNEL(full, phi::dtype::complex, phi::dtype::complex) {} -PT_REGISTER_KERNEL(full_like, +PD_REGISTER_KERNEL(full_like, XPU, ALL_LAYOUT, phi::FullLikeKernel, diff --git a/paddle/phi/kernels/xpu/scale_kernel.cc b/paddle/phi/kernels/xpu/scale_kernel.cc index 0814e2d9b322fc94d298d14adde03314e06e734b..e103e5afdcf9bea9206541ee5c94c1c3d7a87e5f 100644 --- a/paddle/phi/kernels/xpu/scale_kernel.cc +++ b/paddle/phi/kernels/xpu/scale_kernel.cc @@ -56,7 +56,7 @@ void ScaleKernel(const Context& dev_ctx, } // namespace phi -PT_REGISTER_KERNEL(scale, +PD_REGISTER_KERNEL(scale, XPU, ALL_LAYOUT, phi::ScaleKernel, diff --git a/paddle/phi/ops/compat/abs_sig.cc b/paddle/phi/ops/compat/abs_sig.cc index 67319a18aafa1d5012603ccf162a124cc0260733..b4b94457e6be9f15ffbecad64cd9189c3e2c3b08 100644 --- a/paddle/phi/ops/compat/abs_sig.cc +++ b/paddle/phi/ops/compat/abs_sig.cc @@ -32,7 +32,7 @@ KernelSignature AbsDoubleGradOpArgumentMapping( } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(abs, phi::AbsOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(abs_grad, phi::AbsGradOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(abs_double_grad, +PD_REGISTER_ARG_MAPPING_FN(abs, phi::AbsOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(abs_grad, phi::AbsGradOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(abs_double_grad, phi::AbsDoubleGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/cast_sig.cc b/paddle/phi/ops/compat/cast_sig.cc index 79cf59f32990e93b907d6965fc9749ee8cd80f1c..3d970e92a7d688e274b1ad4b45ed4a0014d7ee27 100644 --- a/paddle/phi/ops/compat/cast_sig.cc +++ b/paddle/phi/ops/compat/cast_sig.cc @@ -22,4 +22,4 @@ KernelSignature CastOpArgumentMapping(const ArgumentMappingContext& ctx) { } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(cast, phi::CastOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(cast, phi::CastOpArgumentMapping); diff --git a/paddle/phi/ops/compat/concat_sig.cc b/paddle/phi/ops/compat/concat_sig.cc index de37b973409e94e3464fcfebc27c9096478abd8e..21e653ccfe90f8643f601324d1310452924dd1ee 100644 --- a/paddle/phi/ops/compat/concat_sig.cc +++ b/paddle/phi/ops/compat/concat_sig.cc @@ -25,4 +25,4 @@ KernelSignature ConcatOpArgumentMapping(const ArgumentMappingContext& ctx) { } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(concat, phi::ConcatOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(concat, phi::ConcatOpArgumentMapping); diff --git a/paddle/phi/ops/compat/diagonal_sig.cc b/paddle/phi/ops/compat/diagonal_sig.cc index 430edea89bea2ab632a0b1fbcc5c14e3e4df502b..b4a424ec06bf2b018de5a0aea4d268f669685fe9 100644 --- a/paddle/phi/ops/compat/diagonal_sig.cc +++ b/paddle/phi/ops/compat/diagonal_sig.cc @@ -25,4 +25,4 @@ KernelSignature DiagonalGradOpArgumentMapping( } } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(diagonal_grad, phi::DiagonalGradOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(diagonal_grad, phi::DiagonalGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/digamma_sig.cc b/paddle/phi/ops/compat/digamma_sig.cc index 555c16ef6b6bf1a5044f60b1be8971c180ca2b0c..fa693f92c6fe3ade527953b632bf94cf4c1b10c1 100644 --- a/paddle/phi/ops/compat/digamma_sig.cc +++ b/paddle/phi/ops/compat/digamma_sig.cc @@ -24,4 +24,4 @@ KernelSignature DigammaGradOpArgumentMapping( } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(digamma_grad, phi::DigammaGradOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(digamma_grad, phi::DigammaGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/dot_sig.cc b/paddle/phi/ops/compat/dot_sig.cc index 481bd3a4949d8cedb2f222ecd74acd9b6b0960c1..2437ecc1ca76720007f68ddb94439f03cb291a9a 100644 --- a/paddle/phi/ops/compat/dot_sig.cc +++ b/paddle/phi/ops/compat/dot_sig.cc @@ -25,4 +25,4 @@ KernelSignature DotGradOpArgumentMapping(const ArgumentMappingContext& ctx) { } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(dot_grad, phi::DotGradOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(dot_grad, phi::DotGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/elementwise_sig.cc b/paddle/phi/ops/compat/elementwise_sig.cc index dfffa034f1d1de8f35e683d880f518d50df02cf6..cddebcbce1273a2f88d7b1ce50f1c340d313ecf0 100644 --- a/paddle/phi/ops/compat/elementwise_sig.cc +++ b/paddle/phi/ops/compat/elementwise_sig.cc @@ -102,28 +102,28 @@ KernelSignature ElementwiseSubGradOpArgumentMapping( } // namespace phi -PT_REGISTER_BASE_KERNEL_NAME(elementwise_add, add); -PT_REGISTER_BASE_KERNEL_NAME(elementwise_sub, subtract); -PT_REGISTER_BASE_KERNEL_NAME(elementwise_mul, multiply); -PT_REGISTER_BASE_KERNEL_NAME(elementwise_div, divide); -PT_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad, add_grad); -PT_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad_grad, add_double_grad); -PT_REGISTER_BASE_KERNEL_NAME(elementwise_add_triple_grad, add_triple_grad); -PT_REGISTER_BASE_KERNEL_NAME(elementwise_sub_grad, subtract_grad); - -PT_REGISTER_ARG_MAPPING_FN(elementwise_add, +PD_REGISTER_BASE_KERNEL_NAME(elementwise_add, add); +PD_REGISTER_BASE_KERNEL_NAME(elementwise_sub, subtract); +PD_REGISTER_BASE_KERNEL_NAME(elementwise_mul, multiply); +PD_REGISTER_BASE_KERNEL_NAME(elementwise_div, divide); +PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad, add_grad); +PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad_grad, add_double_grad); +PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_triple_grad, add_triple_grad); +PD_REGISTER_BASE_KERNEL_NAME(elementwise_sub_grad, subtract_grad); + +PD_REGISTER_ARG_MAPPING_FN(elementwise_add, phi::ElementwiseAddOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(elementwise_sub, +PD_REGISTER_ARG_MAPPING_FN(elementwise_sub, phi::ElementwiseSubOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(elementwise_mul, +PD_REGISTER_ARG_MAPPING_FN(elementwise_mul, phi::ElementwiseMulOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(elementwise_div, +PD_REGISTER_ARG_MAPPING_FN(elementwise_div, phi::ElementwiseDivOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(elementwise_add_grad, +PD_REGISTER_ARG_MAPPING_FN(elementwise_add_grad, phi::ElementwiseAddGradOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(elementwise_add_grad_grad, +PD_REGISTER_ARG_MAPPING_FN(elementwise_add_grad_grad, phi::ElementwiseAddDoubleGradOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(elementwise_add_triple_grad, +PD_REGISTER_ARG_MAPPING_FN(elementwise_add_triple_grad, phi::ElementwiseAddTripleGradOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(elementwise_sub_grad, +PD_REGISTER_ARG_MAPPING_FN(elementwise_sub_grad, phi::ElementwiseSubGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/empty_sig.cc b/paddle/phi/ops/compat/empty_sig.cc index 9315fdf827dcf0cacac87dbff98b17ca85125993..42cd55bdc0cdab412912f9b1d5d731f18f7b6237 100644 --- a/paddle/phi/ops/compat/empty_sig.cc +++ b/paddle/phi/ops/compat/empty_sig.cc @@ -28,4 +28,4 @@ KernelSignature EmptyOpArgumentMapping(const ArgumentMappingContext& ctx) { } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(empty, phi::EmptyOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(empty, phi::EmptyOpArgumentMapping); diff --git a/paddle/phi/ops/compat/expand_sig.cc b/paddle/phi/ops/compat/expand_sig.cc index 3f7ff458296c7e4797f331ffd79650fc986fa054..3b2e468267da03ba97917a4899508f1fa3b9b283 100644 --- a/paddle/phi/ops/compat/expand_sig.cc +++ b/paddle/phi/ops/compat/expand_sig.cc @@ -47,8 +47,8 @@ KernelSignature ExpandGradOpArgumentMapping(const ArgumentMappingContext& ctx) { } // namespace phi -PT_REGISTER_BASE_KERNEL_NAME(expand_v2, expand); -PT_REGISTER_BASE_KERNEL_NAME(expand_v2_grad, expand_grad); +PD_REGISTER_BASE_KERNEL_NAME(expand_v2, expand); +PD_REGISTER_BASE_KERNEL_NAME(expand_v2_grad, expand_grad); -PT_REGISTER_ARG_MAPPING_FN(expand_v2, phi::ExpandOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(expand_v2_grad, phi::ExpandGradOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(expand_v2, phi::ExpandOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(expand_v2_grad, phi::ExpandGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/fill_any_like_sig.cc b/paddle/phi/ops/compat/fill_any_like_sig.cc index 3fbd022ca6a9a9d4dd452530f8beaa6086e158eb..84af155d402d6ba8034f5e65a9b9b6e0d74ffff4 100644 --- a/paddle/phi/ops/compat/fill_any_like_sig.cc +++ b/paddle/phi/ops/compat/fill_any_like_sig.cc @@ -23,6 +23,6 @@ KernelSignature FillAnyLikeOpArgumentMapping( } // namespace phi -PT_REGISTER_BASE_KERNEL_NAME(fill_any_like, full_like); +PD_REGISTER_BASE_KERNEL_NAME(fill_any_like, full_like); -PT_REGISTER_ARG_MAPPING_FN(fill_any_like, phi::FillAnyLikeOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(fill_any_like, phi::FillAnyLikeOpArgumentMapping); diff --git a/paddle/phi/ops/compat/fill_constant_sig.cc b/paddle/phi/ops/compat/fill_constant_sig.cc index 85dfdc3db3eaed742e58d1c857896198c316349e..df28a7b81b61b5c424a11c7484a8b60696f47d10 100644 --- a/paddle/phi/ops/compat/fill_constant_sig.cc +++ b/paddle/phi/ops/compat/fill_constant_sig.cc @@ -123,6 +123,6 @@ KernelSignature FillConstantOpArgumentMapping( } // namespace phi -PT_REGISTER_BASE_KERNEL_NAME(fill_constant, full); +PD_REGISTER_BASE_KERNEL_NAME(fill_constant, full); -PT_REGISTER_ARG_MAPPING_FN(fill_constant, phi::FillConstantOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(fill_constant, phi::FillConstantOpArgumentMapping); diff --git a/paddle/phi/ops/compat/flatten_sig.cc b/paddle/phi/ops/compat/flatten_sig.cc index ae5f438cafc248d265d4c68c68e1057a2fbae487..b72ad05ea09d8d3525c5699f73103b9d40adef90 100644 --- a/paddle/phi/ops/compat/flatten_sig.cc +++ b/paddle/phi/ops/compat/flatten_sig.cc @@ -36,10 +36,10 @@ KernelSignature FlattenGradOpArgumentMapping( } // namespace phi -PT_REGISTER_BASE_KERNEL_NAME(flatten_contiguous_range, flatten); -PT_REGISTER_BASE_KERNEL_NAME(flatten_contiguous_range_grad, flatten_grad); +PD_REGISTER_BASE_KERNEL_NAME(flatten_contiguous_range, flatten); +PD_REGISTER_BASE_KERNEL_NAME(flatten_contiguous_range_grad, flatten_grad); -PT_REGISTER_ARG_MAPPING_FN(flatten_contiguous_range, +PD_REGISTER_ARG_MAPPING_FN(flatten_contiguous_range, phi::FlattenOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(flatten_contiguous_range_grad, +PD_REGISTER_ARG_MAPPING_FN(flatten_contiguous_range_grad, phi::FlattenGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/histogram_sig.cc b/paddle/phi/ops/compat/histogram_sig.cc index 0fd1fdea7642481c73310518edb726c2deddfae0..0cea146ea4e7fcde06d1bca25f57194371a8451b 100644 --- a/paddle/phi/ops/compat/histogram_sig.cc +++ b/paddle/phi/ops/compat/histogram_sig.cc @@ -22,4 +22,4 @@ KernelSignature HistogramOpArgumentMapping(const ArgumentMappingContext& ctx) { } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(histogram, phi::HistogramOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(histogram, phi::HistogramOpArgumentMapping); diff --git a/paddle/phi/ops/compat/huber_loss_sig.cc b/paddle/phi/ops/compat/huber_loss_sig.cc index 6e7183ff9f281f4f953cf6faeaa8f8b1f21bf408..6f669a4a8b697a1df83429773b257014d709756c 100644 --- a/paddle/phi/ops/compat/huber_loss_sig.cc +++ b/paddle/phi/ops/compat/huber_loss_sig.cc @@ -31,6 +31,6 @@ KernelSignature HuberLossGradOpArgumentMapping( } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(huber_loss, phi::HuberLossOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(huber_loss_grad, +PD_REGISTER_ARG_MAPPING_FN(huber_loss, phi::HuberLossOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(huber_loss_grad, phi::HuberLossGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/lerp_sig.cc b/paddle/phi/ops/compat/lerp_sig.cc index d33a714048bd00fae57d5ff4eeab8158ec6a49cb..3a8b23ca4c4a4a87f1b157679fd4e2d769deeb29 100644 --- a/paddle/phi/ops/compat/lerp_sig.cc +++ b/paddle/phi/ops/compat/lerp_sig.cc @@ -29,5 +29,5 @@ KernelSignature LerpGradOpArgumentMapping(const ArgumentMappingContext& ctx) { } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(lerp, phi::LerpOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(lerp_grad, phi::LerpGradOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(lerp, phi::LerpOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(lerp_grad, phi::LerpGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/masked_select_sig.cc b/paddle/phi/ops/compat/masked_select_sig.cc index 77a97d103e8890eecb84c8e93e47dfde6b22ec5d..8083b123bcff53deb67db667d5166a8d1da3f95d 100644 --- a/paddle/phi/ops/compat/masked_select_sig.cc +++ b/paddle/phi/ops/compat/masked_select_sig.cc @@ -31,6 +31,6 @@ KernelSignature MaskedSelectGradOpArgumentMapping( } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(masked_select, phi::MaskedSelectOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(masked_select_grad, +PD_REGISTER_ARG_MAPPING_FN(masked_select, phi::MaskedSelectOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(masked_select_grad, phi::MaskedSelectGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/matmul_sig.cc b/paddle/phi/ops/compat/matmul_sig.cc index d4106cd39e30445482b813438241aa314e8eea46..771a7c3acc39dfade48c210d9937fbf719ad911a 100644 --- a/paddle/phi/ops/compat/matmul_sig.cc +++ b/paddle/phi/ops/compat/matmul_sig.cc @@ -49,13 +49,13 @@ KernelSignature MatmulTripleGradOpArgumentMapping( } // namespace phi -PT_REGISTER_BASE_KERNEL_NAME(matmul_v2, matmul); -PT_REGISTER_BASE_KERNEL_NAME(matmul_v2_grad, matmul_grad); -PT_REGISTER_BASE_KERNEL_NAME(matmul_v2_grad_grad, matmul_double_grad); -PT_REGISTER_BASE_KERNEL_NAME(matmul_v2_triple_grad, matmul_triple_grad); +PD_REGISTER_BASE_KERNEL_NAME(matmul_v2, matmul); +PD_REGISTER_BASE_KERNEL_NAME(matmul_v2_grad, matmul_grad); +PD_REGISTER_BASE_KERNEL_NAME(matmul_v2_grad_grad, matmul_double_grad); +PD_REGISTER_BASE_KERNEL_NAME(matmul_v2_triple_grad, matmul_triple_grad); -PT_REGISTER_ARG_MAPPING_FN(matmul_v2_grad, phi::MatmulGradOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(matmul_v2_grad_grad, +PD_REGISTER_ARG_MAPPING_FN(matmul_v2_grad, phi::MatmulGradOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(matmul_v2_grad_grad, phi::MatmulDoubleGradOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(matmul_v2_triple_grad, +PD_REGISTER_ARG_MAPPING_FN(matmul_v2_triple_grad, phi::MatmulTripleGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/norm_sig.cc b/paddle/phi/ops/compat/norm_sig.cc index f67c22ba712c8cb84af74063d1d55938b58d46fd..81d294b84248578d5f29b3b2d432e81f3499e9fa 100644 --- a/paddle/phi/ops/compat/norm_sig.cc +++ b/paddle/phi/ops/compat/norm_sig.cc @@ -30,5 +30,5 @@ KernelSignature NormGradOpArgumentMapping(const ArgumentMappingContext& ctx) { } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(norm, phi::NormOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(norm_grad, phi::NormGradOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(norm, phi::NormOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(norm_grad, phi::NormGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/reduce_sig.cc b/paddle/phi/ops/compat/reduce_sig.cc index 2d16817ad886b6a2d6df4cee1e8a3209499ab50e..74704671f8b5d244b2c3b07ada5e592a8c64da27 100644 --- a/paddle/phi/ops/compat/reduce_sig.cc +++ b/paddle/phi/ops/compat/reduce_sig.cc @@ -45,8 +45,8 @@ KernelSignature ReduceMeanOpArgumentMapping(const ArgumentMappingContext& ctx) { } // namespace phi -PT_REGISTER_BASE_KERNEL_NAME(reduce_sum, sum); -PT_REGISTER_BASE_KERNEL_NAME(reduce_mean, mean); +PD_REGISTER_BASE_KERNEL_NAME(reduce_sum, sum); +PD_REGISTER_BASE_KERNEL_NAME(reduce_mean, mean); -PT_REGISTER_ARG_MAPPING_FN(reduce_sum, phi::ReduceSumOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(reduce_mean, phi::ReduceMeanOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(reduce_sum, phi::ReduceSumOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(reduce_mean, phi::ReduceMeanOpArgumentMapping); diff --git a/paddle/phi/ops/compat/reshape_sig.cc b/paddle/phi/ops/compat/reshape_sig.cc index 8e8b7592f909adaa3de7e4357b3adf9d812704c2..b6d10dabb1c7f60432c7653461e41fd086273964 100644 --- a/paddle/phi/ops/compat/reshape_sig.cc +++ b/paddle/phi/ops/compat/reshape_sig.cc @@ -45,11 +45,11 @@ KernelSignature ReshapeDoubleGradOpArgumentMapping( } // namespace phi -PT_REGISTER_BASE_KERNEL_NAME(reshape2, reshape); -PT_REGISTER_BASE_KERNEL_NAME(reshape2_grad, reshape_grad); -PT_REGISTER_BASE_KERNEL_NAME(reshape2_grad_grad, reshape_double_grad); +PD_REGISTER_BASE_KERNEL_NAME(reshape2, reshape); +PD_REGISTER_BASE_KERNEL_NAME(reshape2_grad, reshape_grad); +PD_REGISTER_BASE_KERNEL_NAME(reshape2_grad_grad, reshape_double_grad); -PT_REGISTER_ARG_MAPPING_FN(reshape2, phi::ReshapeOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(reshape2_grad, phi::ReshapeGradOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(reshape2_grad_grad, +PD_REGISTER_ARG_MAPPING_FN(reshape2, phi::ReshapeOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(reshape2_grad, phi::ReshapeGradOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(reshape2_grad_grad, phi::ReshapeDoubleGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/scale_sig.cc b/paddle/phi/ops/compat/scale_sig.cc index da8d028b2e39ad31c08d320b1f1eaddff62a617a..915ea4ce302aea6a4a11f1c0745229fb09c1d8c8 100644 --- a/paddle/phi/ops/compat/scale_sig.cc +++ b/paddle/phi/ops/compat/scale_sig.cc @@ -72,4 +72,4 @@ KernelSignature ScaleOpArgumentMapping(const ArgumentMappingContext& ctx) { } // namespace phi // op_type, api_name, arg_mapping_fn -PT_REGISTER_ARG_MAPPING_FN(scale, phi::ScaleOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(scale, phi::ScaleOpArgumentMapping); diff --git a/paddle/phi/ops/compat/split_sig.cc b/paddle/phi/ops/compat/split_sig.cc index 361a928e7539488adadefe9b170a725afa32b4fd..b3a614aab001269a58c11f331a03355de62dd5d9 100644 --- a/paddle/phi/ops/compat/split_sig.cc +++ b/paddle/phi/ops/compat/split_sig.cc @@ -46,4 +46,4 @@ KernelSignature SplitOpArgumentMapping(const ArgumentMappingContext& ctx) { } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(split, phi::SplitOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(split, phi::SplitOpArgumentMapping); diff --git a/paddle/phi/ops/compat/trace_sig.cc b/paddle/phi/ops/compat/trace_sig.cc index 774ac5a944f5961ecbc8cfb1698a3806210ed98c..44fd53db98a3cf12098a676d1a2abf0bc629bb70 100644 --- a/paddle/phi/ops/compat/trace_sig.cc +++ b/paddle/phi/ops/compat/trace_sig.cc @@ -30,5 +30,5 @@ KernelSignature TraceGradOpArgumentMapping(const ArgumentMappingContext& ctx) { } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(trace, phi::TraceOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(trace_grad, phi::TraceGradOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(trace, phi::TraceOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(trace_grad, phi::TraceGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/trunc_sig.cc b/paddle/phi/ops/compat/trunc_sig.cc index 47fa5bc47b4b50a6e3451c155b607203dcdf1c29..2d35439216da522ecc3f279814226afeb3e24948 100644 --- a/paddle/phi/ops/compat/trunc_sig.cc +++ b/paddle/phi/ops/compat/trunc_sig.cc @@ -27,5 +27,5 @@ KernelSignature TruncGradOpArgumentMapping(const ArgumentMappingContext& ctx) { } // namespace phi -PT_REGISTER_ARG_MAPPING_FN(trunc, phi::TruncOpArgumentMapping); -PT_REGISTER_ARG_MAPPING_FN(trunc_grad, phi::TruncGradOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(trunc, phi::TruncOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(trunc_grad, phi::TruncGradOpArgumentMapping); diff --git a/paddle/phi/tests/core/test_custom_kernel.cc b/paddle/phi/tests/core/test_custom_kernel.cc index b0957d80aa95e75d13667ef580a0dbafc84917ac..bc75e6ec45245eceb7f919cd96fd4e76f0af9409 100644 --- a/paddle/phi/tests/core/test_custom_kernel.cc +++ b/paddle/phi/tests/core/test_custom_kernel.cc @@ -17,6 +17,8 @@ limitations under the License. */ #define _LINUX #endif +#include + #ifdef _LINUX #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/platform/device_context.h" @@ -30,8 +32,6 @@ limitations under the License. */ #include "paddle/phi/core/meta_tensor.h" #include "paddle/phi/infermeta/binary.h" -#include - // user kernel function namespace custom_kernel { @@ -98,16 +98,16 @@ void FakeDot(const Context& dev_ctx, } } // namespace custom_kernel -PD_REGISTER_KERNEL(fake_dot, - CPU, - ALL_LAYOUT, - custom_kernel::FakeDot, - float, - double, - int, - int64_t, - int8_t, - uint8_t) {} +PD_REGISTER_BUILTIN_KERNEL(fake_dot, + CPU, + ALL_LAYOUT, + custom_kernel::FakeDot, + float, + double, + int, + int64_t, + int8_t, + uint8_t) {} namespace phi { namespace tests { diff --git a/paddle/phi/tests/core/test_kernel_factory.cc b/paddle/phi/tests/core/test_kernel_factory.cc index c85485cb91513613077af1ae1fefaa3c491272f9..cb4b50f5b6c3dce52f5d188b86d748a59cd41f1e 100644 --- a/paddle/phi/tests/core/test_kernel_factory.cc +++ b/paddle/phi/tests/core/test_kernel_factory.cc @@ -22,7 +22,7 @@ limitations under the License. */ #include "gtest/gtest.h" -PT_DECLARE_KERNEL(scale, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(scale, CPU, ALL_LAYOUT); namespace phi { namespace tests { @@ -76,7 +76,7 @@ TEST(KernelRegistry, SetFP32Input) { } // namespace tests } // namespace phi -PT_REGISTER_KERNEL(test, +PD_REGISTER_KERNEL(test, CPU, ALL_LAYOUT, phi::tests::TestKernel, diff --git a/paddle/phi/tests/kernels/test_flatten_dev_api.cc b/paddle/phi/tests/kernels/test_flatten_dev_api.cc index b65720a4b4e241020df0fc4360814f7ec10a2aaf..dc283728ee5f761e79c9c396d63121d555139dee 100644 --- a/paddle/phi/tests/kernels/test_flatten_dev_api.cc +++ b/paddle/phi/tests/kernels/test_flatten_dev_api.cc @@ -23,14 +23,14 @@ limitations under the License. */ #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" -PT_DECLARE_KERNEL(copy, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(copy, CPU, ALL_LAYOUT); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PT_DECLARE_KERNEL(copy, GPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(copy, GPU, ALL_LAYOUT); #endif #ifdef PADDLE_WITH_XPU -PT_DECLARE_KERNEL(copy, XPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(copy, XPU, ALL_LAYOUT); #endif namespace phi { diff --git a/python/paddle/fluid/tests/custom_kernel/custom_kernel_dot.cc b/python/paddle/fluid/tests/custom_kernel/custom_kernel_dot.cc index 68393cba57e368950a98cd7eead105207ea6b70e..b0519138ca54042e510bef87fdb49e125f11eca4 100644 --- a/python/paddle/fluid/tests/custom_kernel/custom_kernel_dot.cc +++ b/python/paddle/fluid/tests/custom_kernel/custom_kernel_dot.cc @@ -45,7 +45,7 @@ void DotKernel(const Context& dev_ctx, } // namespace custom_kernel } // namespace paddle -PD_REGISTER_KERNEL( +PD_REGISTER_BUILTIN_KERNEL( dot, CPU, ALL_LAYOUT, paddle::custom_kernel::DotKernel, int8_t) { kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::INT8); } diff --git a/python/paddle/utils/code_gen/api_gen.py b/python/paddle/utils/code_gen/api_gen.py index 106f698fd4b1e659c51f2ed09537a647e82d34d5..f1e69a21f28d86e424bd2bb91732e29c7a2971d9 100644 --- a/python/paddle/utils/code_gen/api_gen.py +++ b/python/paddle/utils/code_gen/api_gen.py @@ -118,7 +118,7 @@ def source_include(header_file_path): def api_register(): return """ -PT_REGISTER_API(Math); +PD_REGISTER_API(Math); """ diff --git a/python/paddle/utils/code_gen/wrapped_infermeta_gen.py b/python/paddle/utils/code_gen/wrapped_infermeta_gen.py index 53270c0546eae1f13d67f6f640c97d83e5be60d4..0d018f8e3f64fc2f9a89e78d81d3a392e799b441 100644 --- a/python/paddle/utils/code_gen/wrapped_infermeta_gen.py +++ b/python/paddle/utils/code_gen/wrapped_infermeta_gen.py @@ -26,7 +26,7 @@ def get_wrapped_infermeta_name(api_name): def gene_wrapped_infermeta_and_register(api): if api.is_base_api and not api.is_dygraph_api: register_code = f""" -PT_REGISTER_INFER_META_FN({api.kernel['func'][0]}, phi::{api.infer_meta['func']});""" +PD_REGISTER_INFER_META_FN({api.kernel['func'][0]}, phi::{api.infer_meta['func']});""" if api.infer_meta['param'] is not None: kernel_params = api.kernel['param'] @@ -73,7 +73,7 @@ void {wrapped_infermeta_name}({", ".join(args)}) {{ """ register_code = f""" -PT_REGISTER_INFER_META_FN({api.kernel['func'][0]}, phi::{get_wrapped_infermeta_name(api.kernel['func'][0])});""" +PD_REGISTER_INFER_META_FN({api.kernel['func'][0]}, phi::{get_wrapped_infermeta_name(api.kernel['func'][0])});""" return declare_code, defind_code, register_code else: diff --git a/tools/infrt/get_pten_kernel_function.sh b/tools/infrt/get_pten_kernel_function.sh index 75009b077b823a93d939757ae9af592dd938df53..488c5b4c4328d123f2118d175359d3dab38b4626 100644 --- a/tools/infrt/get_pten_kernel_function.sh +++ b/tools/infrt/get_pten_kernel_function.sh @@ -24,9 +24,9 @@ set -e kernel_register_info_file=`mktemp` PADDLE_ROOT="$( cd "$( dirname "$0" )/../../" && pwd )" unset GREP_OPTIONS && find ${PADDLE_ROOT}/paddle/phi/kernels -name "*.c*" \ - | xargs sed -e '/PT_REGISTER_\(GENERAL_\)\?KERNEL(/,/)/!d' \ + | xargs sed -e '/PD_REGISTER_\(GENERAL_\)\?KERNEL(/,/)/!d' \ | awk 'BEGIN { RS="{" }{ gsub(/\n /,""); print $0 }' \ - | grep PT_REGISTER \ + | grep PD_REGISTER \ | awk -F ",|\(|\)" '{gsub(/ /,"");$1="";print}' \ | sort -u | awk '{gsub(/phi::/,"");gsub(/paddle::platform::/,"");gsub(/dtype::/,"");gsub(/paddle::/,"");print $0}' \ | grep -v "_grad" > $kernel_register_info_file @@ -38,7 +38,7 @@ python3 ${PADDLE_ROOT}/python/paddle/utils/code_gen/wrapped_infermeta_gen.py \ --wrapped_infermeta_header_path ${temp_path}/generate.h \ --wrapped_infermeta_source_path ${temp_path}/generate.cc -grep PT_REGISTER_INFER_META_FN ${temp_path}/generate.cc \ +grep PD_REGISTER_INFER_META_FN ${temp_path}/generate.cc \ | awk -F "\(|,|::|\)" '{print $2, $4}' > ${temp_path}/wrap_info.txt #step 3: merge all infos