diff --git a/paddle/fluid/pybind/kernel_signature_generator.cc b/paddle/fluid/pybind/kernel_signature_generator.cc index 0b0a8628b14f1c64e84d82baf743d22d823d7b19..1156fd0b52c25393fac6abaa4f9d2dffe8b3779f 100644 --- a/paddle/fluid/pybind/kernel_signature_generator.cc +++ b/paddle/fluid/pybind/kernel_signature_generator.cc @@ -48,7 +48,7 @@ int main(int argc, char **argv) { for (const auto &op_kernel_pair : kernel_factory.kernels()) { std::string op_name = op_kernel_pair.first; const paddle::flat_hash_map &kernel_name_map = - phi::OpUtilsMap::Instance().base_kernel_name_map(); + phi::OpUtilsMap::Instance().fluid_op_to_phi_kernel(); for (auto &it : kernel_name_map) { if (it.second == op_name) { op_name = it.first; diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 98beaa776336f8c08117a283eba776ca1d69af48..22968a08be9d48fae31764a7cbba321d90d7344c 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -633,7 +633,7 @@ func : UnchangedInferMeta param : [x] kernel : - func : hard_swish_grad + func : hardswish_grad inplace : (out_grad -> x_grad) - backward_op : hardtanh_grad @@ -644,7 +644,7 @@ func : UnchangedInferMeta param : [x] kernel : - func : hard_tanh_grad + func : hardtanh_grad inplace : (out_grad -> x_grad) - backward_op : hsigmoid_loss_grad diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index 5f7bc550083c02b9a3c40763d3d4ecdf0042409f..5592ab325a9f668e7d64668f9fba10df053768a2 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -171,7 +171,7 @@ infer_meta : func : ArgMinMaxInferMeta kernel : - func : arg_max + func : argmax - op : argmin args : (Tensor x, Scalar axis, bool keepdims, bool flatten, int dtype) @@ -179,7 +179,7 @@ infer_meta : func : ArgMinMaxInferMeta kernel : - func : arg_min + func : argmin - op : assign args : (Tensor x) @@ -914,7 +914,7 @@ func : UnchangedInferMeta param : [x] kernel : - func : hard_swish + func : hardswish backward : hardswish_grad - op : hardtanh @@ -924,7 +924,7 @@ func : UnchangedInferMeta param : [x] kernel : - func : hard_tanh + func : hardtanh backward : hardtanh_grad - op : hsigmoid_loss diff --git a/paddle/phi/core/compat/convert_utils.cc b/paddle/phi/core/compat/convert_utils.cc index 4a807801e3245223c6ad74c3d8eb0ca364879797..03fb3f3e6b6b668a719a3c0af4976c26a826c2d2 100644 --- a/paddle/phi/core/compat/convert_utils.cc +++ b/paddle/phi/core/compat/convert_utils.cc @@ -110,14 +110,11 @@ const std::string& TransToPhiKernelName(const std::string& fluid_op_name) { } const std::string& TransToFluidOpName(const std::string& phi_kernel_name) { - auto& base_kernel_name_map = OpUtilsMap::Instance().base_kernel_name_map(); - auto it = std::find_if(base_kernel_name_map.begin(), - base_kernel_name_map.end(), - [&phi_kernel_name](const auto& pair) { - return pair.second == phi_kernel_name; - }); - if (it != base_kernel_name_map.end()) { - return it->first; + const auto& phi_kernel_to_fluid_op = + OpUtilsMap::Instance().phi_kernel_to_fluid_op(); + auto it = phi_kernel_to_fluid_op.find(phi_kernel_name); + if (it != phi_kernel_to_fluid_op.end()) { + return it->second; } return phi_kernel_name; } diff --git a/paddle/phi/core/compat/op_utils.h b/paddle/phi/core/compat/op_utils.h index 9f62bffb7ecd1ee69079a039dc54b284d2192c99..1017aa3341e8808d4496daf40e5f3d8e045ddd59 100644 --- a/paddle/phi/core/compat/op_utils.h +++ b/paddle/phi/core/compat/op_utils.h @@ -131,18 +131,23 @@ class OpUtilsMap { static OpUtilsMap& Instance(); bool Contains(const std::string& op_type) const { - return base_kernel_name_map_.count(op_type) || + return fluid_op_to_phi_kernel_.count(op_type) || arg_mapping_fn_map_.count(op_type); } - void InsertBaseKernelName(std::string op_type, std::string base_kernel_name) { + void InsertBaseKernelName(const std::string& op_type, + const std::string& base_kernel_name) { + fluid_op_to_phi_kernel_.insert({op_type, base_kernel_name}); + } + void InsertFluidOplName(std::string op_type, std::string base_kernel_name) { PADDLE_ENFORCE_EQ( - base_kernel_name_map_.count(op_type), + phi_kernel_to_fluid_op_.count(base_kernel_name), 0UL, phi::errors::AlreadyExists( - "Operator (%s)'s api name has been registered.", op_type)); - base_kernel_name_map_.insert( - {std::move(op_type), std::move(base_kernel_name)}); + "Operator (%s)'s kernel name (%s) has been registered.", + op_type, + base_kernel_name)); + phi_kernel_to_fluid_op_.insert({base_kernel_name, op_type}); } bool HasArgumentMappingFn(const std::string& op_type) const { @@ -163,8 +168,8 @@ class OpUtilsMap { if (deprecated_op_names.find(op_type) != deprecated_op_names.end()) { return deprecated_kernel_name; } - auto it = base_kernel_name_map_.find(op_type); - if (it == base_kernel_name_map_.end()) { + auto it = fluid_op_to_phi_kernel_.find(op_type); + if (it == fluid_op_to_phi_kernel_.end()) { return op_type; } else { return it->second; @@ -181,15 +186,23 @@ class OpUtilsMap { } } - const paddle::flat_hash_map& base_kernel_name_map() - const { - return base_kernel_name_map_; + const paddle::flat_hash_map& + fluid_op_to_phi_kernel() const { + return fluid_op_to_phi_kernel_; + } + + const paddle::flat_hash_map& + phi_kernel_to_fluid_op() const { + return phi_kernel_to_fluid_op_; } private: OpUtilsMap() = default; - paddle::flat_hash_map base_kernel_name_map_; + paddle::flat_hash_map fluid_op_to_phi_kernel_; + + paddle::flat_hash_map phi_kernel_to_fluid_op_; + paddle::flat_hash_map arg_mapping_fn_map_; DISABLE_COPY_AND_ASSIGN(OpUtilsMap); @@ -198,6 +211,7 @@ class OpUtilsMap { struct BaseKernelNameRegistrar { BaseKernelNameRegistrar(const char* op_type, const char* base_kernel_name) { OpUtilsMap::Instance().InsertBaseKernelName(op_type, base_kernel_name); + OpUtilsMap::Instance().InsertFluidOplName(op_type, base_kernel_name); } }; diff --git a/paddle/phi/kernels/activation_kernel.cc b/paddle/phi/kernels/activation_kernel.cc index 26ce10324636fd1c1be607f1a6ef7410d970979e..3de8a867fd600c424ef31f07dd165f637a99a5dc 100644 --- a/paddle/phi/kernels/activation_kernel.cc +++ b/paddle/phi/kernels/activation_kernel.cc @@ -45,12 +45,12 @@ using complex64 = ::phi::dtype::complex; using complex128 = ::phi::dtype::complex; PD_REGISTER_KERNEL( - hard_swish, CPU, ALL_LAYOUT, phi::HardSwishKernel, float, double) {} + hardswish, CPU, ALL_LAYOUT, phi::HardSwishKernel, float, double) {} PD_REGISTER_KERNEL(relu6, CPU, ALL_LAYOUT, phi::Relu6Kernel, float, double) {} PD_REGISTER_KERNEL(swish, CPU, ALL_LAYOUT, phi::SwishKernel, float, double) {} #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PD_REGISTER_KERNEL(hard_swish, +PD_REGISTER_KERNEL(hardswish, GPU, ALL_LAYOUT, phi::HardSwishKernel, @@ -80,13 +80,13 @@ PD_REGISTER_KERNEL(swish, #endif #if defined PADDLE_WITH_XPU -PD_REGISTER_KERNEL(hard_swish, XPU, ALL_LAYOUT, phi::HardSwishKernel, float) {} +PD_REGISTER_KERNEL(hardswish, XPU, ALL_LAYOUT, phi::HardSwishKernel, float) {} PD_REGISTER_KERNEL(relu6, XPU, ALL_LAYOUT, phi::Relu6Kernel, float) {} PD_REGISTER_KERNEL(swish, XPU, ALL_LAYOUT, phi::SwishKernel, float) {} #endif #ifdef PADDLE_WITH_MKLDNN -PD_REGISTER_KERNEL(hard_swish, +PD_REGISTER_KERNEL(hardswish, OneDNN, ONEDNN, phi::HardSwishKernel, diff --git a/paddle/phi/kernels/cpu/activation_grad_kernel.cc b/paddle/phi/kernels/cpu/activation_grad_kernel.cc index 06485e847d6ada977e44ff9d81cb4dcda34da0b2..128336d6a5e60ca981ae5185838ca590ae8b2810 100644 --- a/paddle/phi/kernels/cpu/activation_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/activation_grad_kernel.cc @@ -263,7 +263,7 @@ PD_REGISTER_ACTIVATION_GRAD_KERNEL(asinh_grad, AsinhGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(acosh_grad, AcoshGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(atanh_grad, AtanhGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(tanh_grad, TanhGradKernel) -PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_tanh_grad, HardTanhGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL(hardtanh_grad, HardTanhGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(leaky_relu_grad, LeakyReluGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(thresholded_relu_grad, ThresholdedReluGradKernel) @@ -388,7 +388,7 @@ PD_REGISTER_ACTIVATION_GRAD_KERNEL(log2_grad, Log2GradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(log10_grad, Log10GradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(log1p_grad, Log1pGradKernel) PD_REGISTER_ACTIVATION_DOUBLE_GRAD_KERNEL(log_double_grad, LogDoubleGradKernel) -PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_swish_grad, HardSwishGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL(hardswish_grad, HardSwishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(swish_grad, SwishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(round_grad, RoundGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(floor_grad, FloorGradKernel) diff --git a/paddle/phi/kernels/cpu/activation_kernel.cc b/paddle/phi/kernels/cpu/activation_kernel.cc index f3905c1f805af4dcb18ad7605f53d2816fbbf37e..70b011eafe5a6b4efd50ef371025e15d0e85ea44 100644 --- a/paddle/phi/kernels/cpu/activation_kernel.cc +++ b/paddle/phi/kernels/cpu/activation_kernel.cc @@ -146,7 +146,7 @@ PD_REGISTER_ACTIVATION_KERNEL(asinh, AsinhKernel) PD_REGISTER_ACTIVATION_KERNEL(acosh, AcoshKernel) PD_REGISTER_ACTIVATION_KERNEL(atanh, AtanhKernel) PD_REGISTER_ACTIVATION_KERNEL(tanh, TanhKernel) -PD_REGISTER_ACTIVATION_KERNEL(hard_tanh, HardTanhKernel) +PD_REGISTER_ACTIVATION_KERNEL(hardtanh, HardTanhKernel) PD_REGISTER_ACTIVATION_KERNEL(leaky_relu, LeakyReluKernel) PD_REGISTER_ACTIVATION_KERNEL(thresholded_relu, ThresholdedReluKernel) PD_REGISTER_ACTIVATION_KERNEL(relu6_raw, Relu6RawKernel) @@ -183,7 +183,7 @@ PD_REGISTER_ACTIVATION_KERNEL(log2, Log2Kernel) PD_REGISTER_ACTIVATION_KERNEL(log10, Log10Kernel) PD_REGISTER_ACTIVATION_KERNEL(log1p, Log1pKernel) PD_REGISTER_ACTIVATION_KERNEL(swish_raw, SwishRawKernel) -PD_REGISTER_ACTIVATION_KERNEL(hard_swish_raw, HardSwishRawKernel) +PD_REGISTER_ACTIVATION_KERNEL(hardswish_raw, HardSwishRawKernel) PD_REGISTER_ACTIVATION_KERNEL(round, RoundKernel) PD_REGISTER_ACTIVATION_KERNEL(floor, FloorKernel) PD_REGISTER_ACTIVATION_KERNEL(ceil, CeilKernel) diff --git a/paddle/phi/kernels/cpu/arg_min_max_kernel.cc b/paddle/phi/kernels/cpu/arg_min_max_kernel.cc index 5c7e79c9e8adee32f4425282e01c4587d9b5bd13..61d20ac32f15af65dd97ce80b491b1ce5e7888ae 100644 --- a/paddle/phi/kernels/cpu/arg_min_max_kernel.cc +++ b/paddle/phi/kernels/cpu/arg_min_max_kernel.cc @@ -180,7 +180,7 @@ void ArgMaxKernel(const Context& dev_ctx, } // namespace phi -PD_REGISTER_KERNEL(arg_min, +PD_REGISTER_KERNEL(argmin, CPU, ALL_LAYOUT, phi::ArgMinKernel, @@ -191,7 +191,7 @@ PD_REGISTER_KERNEL(arg_min, int16_t, uint8_t) {} -PD_REGISTER_KERNEL(arg_max, +PD_REGISTER_KERNEL(argmax, CPU, ALL_LAYOUT, phi::ArgMaxKernel, diff --git a/paddle/phi/kernels/gpu/activation_grad_kernel.cu b/paddle/phi/kernels/gpu/activation_grad_kernel.cu index 2c2ca16e2623f59fceea48bb9b81f495f535b07f..0c8c8b43a0bac9034abff5393a2bef0739bdeb7e 100644 --- a/paddle/phi/kernels/gpu/activation_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/activation_grad_kernel.cu @@ -347,7 +347,7 @@ PD_REGISTER_ACTIVATION_GRAD_KERNEL(atanh_grad, AtanhGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(tanh_grad, TanhGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(tanh_double_grad, TanhDoubleGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(tanh_triple_grad, TanhTripleGradKernel) -PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_tanh_grad, HardTanhGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL(hardtanh_grad, HardTanhGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(leaky_relu_grad, LeakyReluGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(leaky_relu_double_grad, LeakyReluDoubleGradKernel) @@ -474,7 +474,7 @@ PD_REGISTER_KERNEL(log_double_grad, float, double, phi::dtype::float16) {} -PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_swish_grad, HardSwishGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL(hardswish_grad, HardSwishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(swish_grad, SwishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(round_grad, RoundGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(floor_grad, FloorGradKernel) diff --git a/paddle/phi/kernels/gpu/activation_kernel.cu b/paddle/phi/kernels/gpu/activation_kernel.cu index 5168a1de07335d37b9c43894663a5a1fd4f1d095..271ad6107bce408d0896bf5523465f72d36cd964 100644 --- a/paddle/phi/kernels/gpu/activation_kernel.cu +++ b/paddle/phi/kernels/gpu/activation_kernel.cu @@ -196,7 +196,7 @@ PD_REGISTER_ACTIVATION_KERNEL(asinh, AsinhKernel) PD_REGISTER_ACTIVATION_KERNEL(acosh, AcoshKernel) PD_REGISTER_ACTIVATION_KERNEL(atanh, AtanhKernel) PD_REGISTER_ACTIVATION_KERNEL(tanh, TanhKernel) -PD_REGISTER_ACTIVATION_KERNEL(hard_tanh, HardTanhKernel) +PD_REGISTER_ACTIVATION_KERNEL(hardtanh, HardTanhKernel) PD_REGISTER_ACTIVATION_KERNEL(thresholded_relu, ThresholdedReluKernel) PD_REGISTER_ACTIVATION_KERNEL(relu6_raw, Relu6RawKernel) PD_REGISTER_ACTIVATION_KERNEL(leaky_relu, LeakyReluKernel) @@ -254,7 +254,7 @@ PD_REGISTER_ACTIVATION_KERNEL(log, LogKernel) PD_REGISTER_ACTIVATION_KERNEL(log2, Log2Kernel) PD_REGISTER_ACTIVATION_KERNEL(log10, Log10Kernel) PD_REGISTER_ACTIVATION_KERNEL(log1p, Log1pKernel) -PD_REGISTER_ACTIVATION_KERNEL(hard_swish_raw, HardSwishRawKernel) +PD_REGISTER_ACTIVATION_KERNEL(hardswish_raw, HardSwishRawKernel) PD_REGISTER_ACTIVATION_KERNEL(swish_raw, SwishRawKernel) PD_REGISTER_ACTIVATION_KERNEL(round, RoundKernel) PD_REGISTER_ACTIVATION_KERNEL(floor, FloorKernel) diff --git a/paddle/phi/kernels/gpu/arg_min_max_kernel.cu b/paddle/phi/kernels/gpu/arg_min_max_kernel.cu index 74be557c7d667bcc900cdc8eae6de889383492f8..affd36a95ef8bc56c4ecd3225b8f20ffdf3f2485 100644 --- a/paddle/phi/kernels/gpu/arg_min_max_kernel.cu +++ b/paddle/phi/kernels/gpu/arg_min_max_kernel.cu @@ -248,7 +248,7 @@ void ArgMaxKernel(const Context& dev_ctx, } // namespace phi -PD_REGISTER_KERNEL(arg_min, +PD_REGISTER_KERNEL(argmin, GPU, ALL_LAYOUT, phi::ArgMinKernel, @@ -261,7 +261,7 @@ PD_REGISTER_KERNEL(arg_min, int16_t, uint8_t) {} -PD_REGISTER_KERNEL(arg_max, +PD_REGISTER_KERNEL(argmax, GPU, ALL_LAYOUT, phi::ArgMaxKernel, diff --git a/paddle/phi/kernels/onednn/activation_grad_kernel.cc b/paddle/phi/kernels/onednn/activation_grad_kernel.cc index 1ebe9f20c63ea2afde7786c04479e0f6476b04c2..489f53da760324e91b1d8f8448cb70b24ecaca20 100644 --- a/paddle/phi/kernels/onednn/activation_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/activation_grad_kernel.cc @@ -273,7 +273,7 @@ PD_REGISTER_ACTIVATION_GRAD_KERNEL(abs_grad, AbsGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(elu_grad, EluGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(exp_grad, ExpGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(gelu_grad, GeluGradKernel) -PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_swish_grad, HardSwishGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL(hardswish_grad, HardSwishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(leaky_relu_grad, LeakyReluGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(mish_grad, MishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(relu6_grad, Relu6GradKernel) diff --git a/paddle/phi/kernels/onednn/activation_kernel.cc b/paddle/phi/kernels/onednn/activation_kernel.cc index c6367c826cfcfe6e0eac16224519af452f56ad2f..75bc13556398bd829806040d845f779212abca4c 100644 --- a/paddle/phi/kernels/onednn/activation_kernel.cc +++ b/paddle/phi/kernels/onednn/activation_kernel.cc @@ -202,7 +202,7 @@ PD_REGISTER_ACTIVATION_KERNEL(abs, AbsKernel) PD_REGISTER_ACTIVATION_KERNEL(elu, EluKernel) PD_REGISTER_ACTIVATION_KERNEL(exp, ExpKernel) PD_REGISTER_ACTIVATION_KERNEL(gelu, GeluKernel) -PD_REGISTER_ACTIVATION_KERNEL(hard_swish_raw, HardSwishRawKernel) +PD_REGISTER_ACTIVATION_KERNEL(hardswish_raw, HardSwishRawKernel) PD_REGISTER_ACTIVATION_KERNEL(leaky_relu, LeakyReluKernel) PD_REGISTER_ACTIVATION_KERNEL(mish, MishKernel) PD_REGISTER_ACTIVATION_KERNEL(relu, ReluKernel) diff --git a/paddle/phi/kernels/xpu/activation_grad_kernel.cc b/paddle/phi/kernels/xpu/activation_grad_kernel.cc index e3b5e1bfcd3fd0d20fc98d870a517b1e98a634ca..9585e2264db6762fcfb8912419312ec4d0ff50c8 100644 --- a/paddle/phi/kernels/xpu/activation_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/activation_grad_kernel.cc @@ -617,7 +617,7 @@ PD_REGISTER_ACTIVATION_GRAD_KERNEL(exp_grad, ExpGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(log_grad, LogGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(leaky_relu_grad, LeakyReluGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_sigmoid_grad, HardSigmoidGradKernel) -PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_swish_grad, HardSwishGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL(hardswish_grad, HardSwishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(reciprocal_grad, ReciprocalGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(relu6_grad, Relu6GradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(sigmoid_grad, SigmoidGradKernel) diff --git a/paddle/phi/kernels/xpu/activation_kernel.cc b/paddle/phi/kernels/xpu/activation_kernel.cc index 2425f304a3a98c150f0dbed90430d5e0918c28db..0d41afeeacc2a61821a17181de38cd2ef345d698 100644 --- a/paddle/phi/kernels/xpu/activation_kernel.cc +++ b/paddle/phi/kernels/xpu/activation_kernel.cc @@ -486,7 +486,7 @@ PD_REGISTER_ACTIVATION_KERNEL(exp, ExpKernel) // no grad PD_REGISTER_ACTIVATION_KERNEL(log, LogKernel) PD_REGISTER_ACTIVATION_KERNEL(leaky_relu, LeakyReluKernel) PD_REGISTER_ACTIVATION_KERNEL(hard_sigmoid, HardSigmoidKernel) -PD_REGISTER_ACTIVATION_KERNEL(hard_swish_raw, HardSwishRawKernel) +PD_REGISTER_ACTIVATION_KERNEL(hardswish_raw, HardSwishRawKernel) PD_REGISTER_ACTIVATION_KERNEL(mish, MishKernel) PD_REGISTER_ACTIVATION_KERNEL(pow, PowKernel) PD_REGISTER_ACTIVATION_KERNEL(reciprocal, ReciprocalKernel) diff --git a/paddle/phi/kernels/xpu/arg_min_max_kernel.cc b/paddle/phi/kernels/xpu/arg_min_max_kernel.cc index b3a73931723ac5fd9377d5297368ea4df54fcfde..3513b64bc600ebfe565dbb1bc69a3b25ac72c21c 100644 --- a/paddle/phi/kernels/xpu/arg_min_max_kernel.cc +++ b/paddle/phi/kernels/xpu/arg_min_max_kernel.cc @@ -65,4 +65,4 @@ void ArgMaxKernel(const Context& dev_ctx, XPUAPIErrorMsg[r])); } } // namespace phi -PD_REGISTER_KERNEL(arg_max, XPU, ALL_LAYOUT, phi::ArgMaxKernel, float) {} +PD_REGISTER_KERNEL(argmax, XPU, ALL_LAYOUT, phi::ArgMaxKernel, float) {} diff --git a/paddle/phi/ops/compat/activation_sig.cc b/paddle/phi/ops/compat/activation_sig.cc index da61faf543e1f29dd0905d1c1827af0b36987e5e..e40bb6bc3d9d002a859ec991db23319a2c5ef9aa 100644 --- a/paddle/phi/ops/compat/activation_sig.cc +++ b/paddle/phi/ops/compat/activation_sig.cc @@ -39,10 +39,10 @@ namespace phi { #define comma , -DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(HardTanh, "hard_tanh", "t_min" comma "t_max"); +DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(HardTanh, "hardtanh", "t_min" comma "t_max"); DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Mish, "mish", "threshold"); DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(HardSwish, - "hard_swish", + "hardswish", "threshold" comma "scale" comma "offset"); // NOLINT DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Swish, "swish", "beta"); // NOLINT @@ -55,7 +55,7 @@ DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Relu6, "relu6", "threshold"); // NOLINT KernelSignature HardSwishOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature( - "hard_swish_raw", {"X"}, {"threshold", "scale", "offset"}, {"Out"}); + "hardswish_raw", {"X"}, {"threshold", "scale", "offset"}, {"Out"}); } KernelSignature SwishOpArgumentMapping(const ArgumentMappingContext& ctx) { @@ -113,8 +113,10 @@ KernelSignature PowTripleGradOpArgumentMapping( } } // namespace phi -PD_REGISTER_BASE_KERNEL_NAME(brelu, hard_tanh); -PD_REGISTER_BASE_KERNEL_NAME(brelu_grad, hard_tanh_grad); +PD_REGISTER_BASE_KERNEL_NAME(brelu, hardtanh); +PD_REGISTER_BASE_KERNEL_NAME(brelu_grad, hardtanh_grad); +PD_REGISTER_BASE_KERNEL_NAME(hard_swish, hardswish); +PD_REGISTER_BASE_KERNEL_NAME(hard_swish_grad, hardswish_grad); PD_REGISTER_ARG_MAPPING_FN(mish_grad, phi::MishGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(stanh_grad, phi::STanhGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/arg_min_max_sig.cc b/paddle/phi/ops/compat/arg_min_max_sig.cc new file mode 100644 index 0000000000000000000000000000000000000000..4739165c8299cb7a97038e6078e8f5d267d3689d --- /dev/null +++ b/paddle/phi/ops/compat/arg_min_max_sig.cc @@ -0,0 +1,18 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/phi/core/compat/op_utils.h" + +PD_REGISTER_BASE_KERNEL_NAME(arg_max, argmax); +PD_REGISTER_BASE_KERNEL_NAME(arg_min, argmin);