From d3f52efd584e08bb5213b52dd3c12b9efa318c9e Mon Sep 17 00:00:00 2001 From: zyfncg Date: Mon, 28 Nov 2022 11:11:12 +0800 Subject: [PATCH] Fix bug of TransToFluidOpName (#48355) * add fluid_op_name_map * rename some kernel name * add comments for op-kernel map * refine map name of op to kernel --- .../pybind/kernel_signature_generator.cc | 2 +- paddle/phi/api/yaml/legacy_backward.yaml | 4 +- paddle/phi/api/yaml/legacy_ops.yaml | 8 ++-- paddle/phi/core/compat/convert_utils.cc | 13 +++---- paddle/phi/core/compat/op_utils.h | 38 +++++++++++++------ paddle/phi/kernels/activation_kernel.cc | 8 ++-- .../phi/kernels/cpu/activation_grad_kernel.cc | 4 +- paddle/phi/kernels/cpu/activation_kernel.cc | 4 +- paddle/phi/kernels/cpu/arg_min_max_kernel.cc | 4 +- .../phi/kernels/gpu/activation_grad_kernel.cu | 4 +- paddle/phi/kernels/gpu/activation_kernel.cu | 4 +- paddle/phi/kernels/gpu/arg_min_max_kernel.cu | 4 +- .../kernels/onednn/activation_grad_kernel.cc | 2 +- .../phi/kernels/onednn/activation_kernel.cc | 2 +- .../phi/kernels/xpu/activation_grad_kernel.cc | 2 +- paddle/phi/kernels/xpu/activation_kernel.cc | 2 +- paddle/phi/kernels/xpu/arg_min_max_kernel.cc | 2 +- paddle/phi/ops/compat/activation_sig.cc | 12 +++--- paddle/phi/ops/compat/arg_min_max_sig.cc | 18 +++++++++ 19 files changed, 84 insertions(+), 53 deletions(-) create mode 100644 paddle/phi/ops/compat/arg_min_max_sig.cc diff --git a/paddle/fluid/pybind/kernel_signature_generator.cc b/paddle/fluid/pybind/kernel_signature_generator.cc index 0b0a8628b1..1156fd0b52 100644 --- a/paddle/fluid/pybind/kernel_signature_generator.cc +++ b/paddle/fluid/pybind/kernel_signature_generator.cc @@ -48,7 +48,7 @@ int main(int argc, char **argv) { for (const auto &op_kernel_pair : kernel_factory.kernels()) { std::string op_name = op_kernel_pair.first; const paddle::flat_hash_map &kernel_name_map = - phi::OpUtilsMap::Instance().base_kernel_name_map(); + phi::OpUtilsMap::Instance().fluid_op_to_phi_kernel(); for (auto &it : kernel_name_map) { if (it.second == op_name) { op_name = it.first; diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 98beaa7763..22968a08be 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -633,7 +633,7 @@ func : UnchangedInferMeta param : [x] kernel : - func : hard_swish_grad + func : hardswish_grad inplace : (out_grad -> x_grad) - backward_op : hardtanh_grad @@ -644,7 +644,7 @@ func : UnchangedInferMeta param : [x] kernel : - func : hard_tanh_grad + func : hardtanh_grad inplace : (out_grad -> x_grad) - backward_op : hsigmoid_loss_grad diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index 5f7bc55008..5592ab325a 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -171,7 +171,7 @@ infer_meta : func : ArgMinMaxInferMeta kernel : - func : arg_max + func : argmax - op : argmin args : (Tensor x, Scalar axis, bool keepdims, bool flatten, int dtype) @@ -179,7 +179,7 @@ infer_meta : func : ArgMinMaxInferMeta kernel : - func : arg_min + func : argmin - op : assign args : (Tensor x) @@ -914,7 +914,7 @@ func : UnchangedInferMeta param : [x] kernel : - func : hard_swish + func : hardswish backward : hardswish_grad - op : hardtanh @@ -924,7 +924,7 @@ func : UnchangedInferMeta param : [x] kernel : - func : hard_tanh + func : hardtanh backward : hardtanh_grad - op : hsigmoid_loss diff --git a/paddle/phi/core/compat/convert_utils.cc b/paddle/phi/core/compat/convert_utils.cc index 4a807801e3..03fb3f3e6b 100644 --- a/paddle/phi/core/compat/convert_utils.cc +++ b/paddle/phi/core/compat/convert_utils.cc @@ -110,14 +110,11 @@ const std::string& TransToPhiKernelName(const std::string& fluid_op_name) { } const std::string& TransToFluidOpName(const std::string& phi_kernel_name) { - auto& base_kernel_name_map = OpUtilsMap::Instance().base_kernel_name_map(); - auto it = std::find_if(base_kernel_name_map.begin(), - base_kernel_name_map.end(), - [&phi_kernel_name](const auto& pair) { - return pair.second == phi_kernel_name; - }); - if (it != base_kernel_name_map.end()) { - return it->first; + const auto& phi_kernel_to_fluid_op = + OpUtilsMap::Instance().phi_kernel_to_fluid_op(); + auto it = phi_kernel_to_fluid_op.find(phi_kernel_name); + if (it != phi_kernel_to_fluid_op.end()) { + return it->second; } return phi_kernel_name; } diff --git a/paddle/phi/core/compat/op_utils.h b/paddle/phi/core/compat/op_utils.h index 9f62bffb7e..1017aa3341 100644 --- a/paddle/phi/core/compat/op_utils.h +++ b/paddle/phi/core/compat/op_utils.h @@ -131,18 +131,23 @@ class OpUtilsMap { static OpUtilsMap& Instance(); bool Contains(const std::string& op_type) const { - return base_kernel_name_map_.count(op_type) || + return fluid_op_to_phi_kernel_.count(op_type) || arg_mapping_fn_map_.count(op_type); } - void InsertBaseKernelName(std::string op_type, std::string base_kernel_name) { + void InsertBaseKernelName(const std::string& op_type, + const std::string& base_kernel_name) { + fluid_op_to_phi_kernel_.insert({op_type, base_kernel_name}); + } + void InsertFluidOplName(std::string op_type, std::string base_kernel_name) { PADDLE_ENFORCE_EQ( - base_kernel_name_map_.count(op_type), + phi_kernel_to_fluid_op_.count(base_kernel_name), 0UL, phi::errors::AlreadyExists( - "Operator (%s)'s api name has been registered.", op_type)); - base_kernel_name_map_.insert( - {std::move(op_type), std::move(base_kernel_name)}); + "Operator (%s)'s kernel name (%s) has been registered.", + op_type, + base_kernel_name)); + phi_kernel_to_fluid_op_.insert({base_kernel_name, op_type}); } bool HasArgumentMappingFn(const std::string& op_type) const { @@ -163,8 +168,8 @@ class OpUtilsMap { if (deprecated_op_names.find(op_type) != deprecated_op_names.end()) { return deprecated_kernel_name; } - auto it = base_kernel_name_map_.find(op_type); - if (it == base_kernel_name_map_.end()) { + auto it = fluid_op_to_phi_kernel_.find(op_type); + if (it == fluid_op_to_phi_kernel_.end()) { return op_type; } else { return it->second; @@ -181,15 +186,23 @@ class OpUtilsMap { } } - const paddle::flat_hash_map& base_kernel_name_map() - const { - return base_kernel_name_map_; + const paddle::flat_hash_map& + fluid_op_to_phi_kernel() const { + return fluid_op_to_phi_kernel_; + } + + const paddle::flat_hash_map& + phi_kernel_to_fluid_op() const { + return phi_kernel_to_fluid_op_; } private: OpUtilsMap() = default; - paddle::flat_hash_map base_kernel_name_map_; + paddle::flat_hash_map fluid_op_to_phi_kernel_; + + paddle::flat_hash_map phi_kernel_to_fluid_op_; + paddle::flat_hash_map arg_mapping_fn_map_; DISABLE_COPY_AND_ASSIGN(OpUtilsMap); @@ -198,6 +211,7 @@ class OpUtilsMap { struct BaseKernelNameRegistrar { BaseKernelNameRegistrar(const char* op_type, const char* base_kernel_name) { OpUtilsMap::Instance().InsertBaseKernelName(op_type, base_kernel_name); + OpUtilsMap::Instance().InsertFluidOplName(op_type, base_kernel_name); } }; diff --git a/paddle/phi/kernels/activation_kernel.cc b/paddle/phi/kernels/activation_kernel.cc index 26ce103246..3de8a867fd 100644 --- a/paddle/phi/kernels/activation_kernel.cc +++ b/paddle/phi/kernels/activation_kernel.cc @@ -45,12 +45,12 @@ using complex64 = ::phi::dtype::complex; using complex128 = ::phi::dtype::complex; PD_REGISTER_KERNEL( - hard_swish, CPU, ALL_LAYOUT, phi::HardSwishKernel, float, double) {} + hardswish, CPU, ALL_LAYOUT, phi::HardSwishKernel, float, double) {} PD_REGISTER_KERNEL(relu6, CPU, ALL_LAYOUT, phi::Relu6Kernel, float, double) {} PD_REGISTER_KERNEL(swish, CPU, ALL_LAYOUT, phi::SwishKernel, float, double) {} #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PD_REGISTER_KERNEL(hard_swish, +PD_REGISTER_KERNEL(hardswish, GPU, ALL_LAYOUT, phi::HardSwishKernel, @@ -80,13 +80,13 @@ PD_REGISTER_KERNEL(swish, #endif #if defined PADDLE_WITH_XPU -PD_REGISTER_KERNEL(hard_swish, XPU, ALL_LAYOUT, phi::HardSwishKernel, float) {} +PD_REGISTER_KERNEL(hardswish, XPU, ALL_LAYOUT, phi::HardSwishKernel, float) {} PD_REGISTER_KERNEL(relu6, XPU, ALL_LAYOUT, phi::Relu6Kernel, float) {} PD_REGISTER_KERNEL(swish, XPU, ALL_LAYOUT, phi::SwishKernel, float) {} #endif #ifdef PADDLE_WITH_MKLDNN -PD_REGISTER_KERNEL(hard_swish, +PD_REGISTER_KERNEL(hardswish, OneDNN, ONEDNN, phi::HardSwishKernel, diff --git a/paddle/phi/kernels/cpu/activation_grad_kernel.cc b/paddle/phi/kernels/cpu/activation_grad_kernel.cc index 06485e847d..128336d6a5 100644 --- a/paddle/phi/kernels/cpu/activation_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/activation_grad_kernel.cc @@ -263,7 +263,7 @@ PD_REGISTER_ACTIVATION_GRAD_KERNEL(asinh_grad, AsinhGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(acosh_grad, AcoshGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(atanh_grad, AtanhGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(tanh_grad, TanhGradKernel) -PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_tanh_grad, HardTanhGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL(hardtanh_grad, HardTanhGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(leaky_relu_grad, LeakyReluGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(thresholded_relu_grad, ThresholdedReluGradKernel) @@ -388,7 +388,7 @@ PD_REGISTER_ACTIVATION_GRAD_KERNEL(log2_grad, Log2GradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(log10_grad, Log10GradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(log1p_grad, Log1pGradKernel) PD_REGISTER_ACTIVATION_DOUBLE_GRAD_KERNEL(log_double_grad, LogDoubleGradKernel) -PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_swish_grad, HardSwishGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL(hardswish_grad, HardSwishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(swish_grad, SwishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(round_grad, RoundGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(floor_grad, FloorGradKernel) diff --git a/paddle/phi/kernels/cpu/activation_kernel.cc b/paddle/phi/kernels/cpu/activation_kernel.cc index f3905c1f80..70b011eafe 100644 --- a/paddle/phi/kernels/cpu/activation_kernel.cc +++ b/paddle/phi/kernels/cpu/activation_kernel.cc @@ -146,7 +146,7 @@ PD_REGISTER_ACTIVATION_KERNEL(asinh, AsinhKernel) PD_REGISTER_ACTIVATION_KERNEL(acosh, AcoshKernel) PD_REGISTER_ACTIVATION_KERNEL(atanh, AtanhKernel) PD_REGISTER_ACTIVATION_KERNEL(tanh, TanhKernel) -PD_REGISTER_ACTIVATION_KERNEL(hard_tanh, HardTanhKernel) +PD_REGISTER_ACTIVATION_KERNEL(hardtanh, HardTanhKernel) PD_REGISTER_ACTIVATION_KERNEL(leaky_relu, LeakyReluKernel) PD_REGISTER_ACTIVATION_KERNEL(thresholded_relu, ThresholdedReluKernel) PD_REGISTER_ACTIVATION_KERNEL(relu6_raw, Relu6RawKernel) @@ -183,7 +183,7 @@ PD_REGISTER_ACTIVATION_KERNEL(log2, Log2Kernel) PD_REGISTER_ACTIVATION_KERNEL(log10, Log10Kernel) PD_REGISTER_ACTIVATION_KERNEL(log1p, Log1pKernel) PD_REGISTER_ACTIVATION_KERNEL(swish_raw, SwishRawKernel) -PD_REGISTER_ACTIVATION_KERNEL(hard_swish_raw, HardSwishRawKernel) +PD_REGISTER_ACTIVATION_KERNEL(hardswish_raw, HardSwishRawKernel) PD_REGISTER_ACTIVATION_KERNEL(round, RoundKernel) PD_REGISTER_ACTIVATION_KERNEL(floor, FloorKernel) PD_REGISTER_ACTIVATION_KERNEL(ceil, CeilKernel) diff --git a/paddle/phi/kernels/cpu/arg_min_max_kernel.cc b/paddle/phi/kernels/cpu/arg_min_max_kernel.cc index 5c7e79c9e8..61d20ac32f 100644 --- a/paddle/phi/kernels/cpu/arg_min_max_kernel.cc +++ b/paddle/phi/kernels/cpu/arg_min_max_kernel.cc @@ -180,7 +180,7 @@ void ArgMaxKernel(const Context& dev_ctx, } // namespace phi -PD_REGISTER_KERNEL(arg_min, +PD_REGISTER_KERNEL(argmin, CPU, ALL_LAYOUT, phi::ArgMinKernel, @@ -191,7 +191,7 @@ PD_REGISTER_KERNEL(arg_min, int16_t, uint8_t) {} -PD_REGISTER_KERNEL(arg_max, +PD_REGISTER_KERNEL(argmax, CPU, ALL_LAYOUT, phi::ArgMaxKernel, diff --git a/paddle/phi/kernels/gpu/activation_grad_kernel.cu b/paddle/phi/kernels/gpu/activation_grad_kernel.cu index 2c2ca16e26..0c8c8b43a0 100644 --- a/paddle/phi/kernels/gpu/activation_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/activation_grad_kernel.cu @@ -347,7 +347,7 @@ PD_REGISTER_ACTIVATION_GRAD_KERNEL(atanh_grad, AtanhGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(tanh_grad, TanhGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(tanh_double_grad, TanhDoubleGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(tanh_triple_grad, TanhTripleGradKernel) -PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_tanh_grad, HardTanhGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL(hardtanh_grad, HardTanhGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(leaky_relu_grad, LeakyReluGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(leaky_relu_double_grad, LeakyReluDoubleGradKernel) @@ -474,7 +474,7 @@ PD_REGISTER_KERNEL(log_double_grad, float, double, phi::dtype::float16) {} -PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_swish_grad, HardSwishGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL(hardswish_grad, HardSwishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(swish_grad, SwishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(round_grad, RoundGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(floor_grad, FloorGradKernel) diff --git a/paddle/phi/kernels/gpu/activation_kernel.cu b/paddle/phi/kernels/gpu/activation_kernel.cu index 5168a1de07..271ad6107b 100644 --- a/paddle/phi/kernels/gpu/activation_kernel.cu +++ b/paddle/phi/kernels/gpu/activation_kernel.cu @@ -196,7 +196,7 @@ PD_REGISTER_ACTIVATION_KERNEL(asinh, AsinhKernel) PD_REGISTER_ACTIVATION_KERNEL(acosh, AcoshKernel) PD_REGISTER_ACTIVATION_KERNEL(atanh, AtanhKernel) PD_REGISTER_ACTIVATION_KERNEL(tanh, TanhKernel) -PD_REGISTER_ACTIVATION_KERNEL(hard_tanh, HardTanhKernel) +PD_REGISTER_ACTIVATION_KERNEL(hardtanh, HardTanhKernel) PD_REGISTER_ACTIVATION_KERNEL(thresholded_relu, ThresholdedReluKernel) PD_REGISTER_ACTIVATION_KERNEL(relu6_raw, Relu6RawKernel) PD_REGISTER_ACTIVATION_KERNEL(leaky_relu, LeakyReluKernel) @@ -254,7 +254,7 @@ PD_REGISTER_ACTIVATION_KERNEL(log, LogKernel) PD_REGISTER_ACTIVATION_KERNEL(log2, Log2Kernel) PD_REGISTER_ACTIVATION_KERNEL(log10, Log10Kernel) PD_REGISTER_ACTIVATION_KERNEL(log1p, Log1pKernel) -PD_REGISTER_ACTIVATION_KERNEL(hard_swish_raw, HardSwishRawKernel) +PD_REGISTER_ACTIVATION_KERNEL(hardswish_raw, HardSwishRawKernel) PD_REGISTER_ACTIVATION_KERNEL(swish_raw, SwishRawKernel) PD_REGISTER_ACTIVATION_KERNEL(round, RoundKernel) PD_REGISTER_ACTIVATION_KERNEL(floor, FloorKernel) diff --git a/paddle/phi/kernels/gpu/arg_min_max_kernel.cu b/paddle/phi/kernels/gpu/arg_min_max_kernel.cu index 74be557c7d..affd36a95e 100644 --- a/paddle/phi/kernels/gpu/arg_min_max_kernel.cu +++ b/paddle/phi/kernels/gpu/arg_min_max_kernel.cu @@ -248,7 +248,7 @@ void ArgMaxKernel(const Context& dev_ctx, } // namespace phi -PD_REGISTER_KERNEL(arg_min, +PD_REGISTER_KERNEL(argmin, GPU, ALL_LAYOUT, phi::ArgMinKernel, @@ -261,7 +261,7 @@ PD_REGISTER_KERNEL(arg_min, int16_t, uint8_t) {} -PD_REGISTER_KERNEL(arg_max, +PD_REGISTER_KERNEL(argmax, GPU, ALL_LAYOUT, phi::ArgMaxKernel, diff --git a/paddle/phi/kernels/onednn/activation_grad_kernel.cc b/paddle/phi/kernels/onednn/activation_grad_kernel.cc index 1ebe9f20c6..489f53da76 100644 --- a/paddle/phi/kernels/onednn/activation_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/activation_grad_kernel.cc @@ -273,7 +273,7 @@ PD_REGISTER_ACTIVATION_GRAD_KERNEL(abs_grad, AbsGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(elu_grad, EluGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(exp_grad, ExpGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(gelu_grad, GeluGradKernel) -PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_swish_grad, HardSwishGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL(hardswish_grad, HardSwishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(leaky_relu_grad, LeakyReluGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(mish_grad, MishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(relu6_grad, Relu6GradKernel) diff --git a/paddle/phi/kernels/onednn/activation_kernel.cc b/paddle/phi/kernels/onednn/activation_kernel.cc index c6367c826c..75bc135563 100644 --- a/paddle/phi/kernels/onednn/activation_kernel.cc +++ b/paddle/phi/kernels/onednn/activation_kernel.cc @@ -202,7 +202,7 @@ PD_REGISTER_ACTIVATION_KERNEL(abs, AbsKernel) PD_REGISTER_ACTIVATION_KERNEL(elu, EluKernel) PD_REGISTER_ACTIVATION_KERNEL(exp, ExpKernel) PD_REGISTER_ACTIVATION_KERNEL(gelu, GeluKernel) -PD_REGISTER_ACTIVATION_KERNEL(hard_swish_raw, HardSwishRawKernel) +PD_REGISTER_ACTIVATION_KERNEL(hardswish_raw, HardSwishRawKernel) PD_REGISTER_ACTIVATION_KERNEL(leaky_relu, LeakyReluKernel) PD_REGISTER_ACTIVATION_KERNEL(mish, MishKernel) PD_REGISTER_ACTIVATION_KERNEL(relu, ReluKernel) diff --git a/paddle/phi/kernels/xpu/activation_grad_kernel.cc b/paddle/phi/kernels/xpu/activation_grad_kernel.cc index e3b5e1bfcd..9585e2264d 100644 --- a/paddle/phi/kernels/xpu/activation_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/activation_grad_kernel.cc @@ -617,7 +617,7 @@ PD_REGISTER_ACTIVATION_GRAD_KERNEL(exp_grad, ExpGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(log_grad, LogGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(leaky_relu_grad, LeakyReluGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_sigmoid_grad, HardSigmoidGradKernel) -PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_swish_grad, HardSwishGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL(hardswish_grad, HardSwishGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(reciprocal_grad, ReciprocalGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(relu6_grad, Relu6GradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(sigmoid_grad, SigmoidGradKernel) diff --git a/paddle/phi/kernels/xpu/activation_kernel.cc b/paddle/phi/kernels/xpu/activation_kernel.cc index 2425f304a3..0d41afeeac 100644 --- a/paddle/phi/kernels/xpu/activation_kernel.cc +++ b/paddle/phi/kernels/xpu/activation_kernel.cc @@ -486,7 +486,7 @@ PD_REGISTER_ACTIVATION_KERNEL(exp, ExpKernel) // no grad PD_REGISTER_ACTIVATION_KERNEL(log, LogKernel) PD_REGISTER_ACTIVATION_KERNEL(leaky_relu, LeakyReluKernel) PD_REGISTER_ACTIVATION_KERNEL(hard_sigmoid, HardSigmoidKernel) -PD_REGISTER_ACTIVATION_KERNEL(hard_swish_raw, HardSwishRawKernel) +PD_REGISTER_ACTIVATION_KERNEL(hardswish_raw, HardSwishRawKernel) PD_REGISTER_ACTIVATION_KERNEL(mish, MishKernel) PD_REGISTER_ACTIVATION_KERNEL(pow, PowKernel) PD_REGISTER_ACTIVATION_KERNEL(reciprocal, ReciprocalKernel) diff --git a/paddle/phi/kernels/xpu/arg_min_max_kernel.cc b/paddle/phi/kernels/xpu/arg_min_max_kernel.cc index b3a7393172..3513b64bc6 100644 --- a/paddle/phi/kernels/xpu/arg_min_max_kernel.cc +++ b/paddle/phi/kernels/xpu/arg_min_max_kernel.cc @@ -65,4 +65,4 @@ void ArgMaxKernel(const Context& dev_ctx, XPUAPIErrorMsg[r])); } } // namespace phi -PD_REGISTER_KERNEL(arg_max, XPU, ALL_LAYOUT, phi::ArgMaxKernel, float) {} +PD_REGISTER_KERNEL(argmax, XPU, ALL_LAYOUT, phi::ArgMaxKernel, float) {} diff --git a/paddle/phi/ops/compat/activation_sig.cc b/paddle/phi/ops/compat/activation_sig.cc index da61faf543..e40bb6bc3d 100644 --- a/paddle/phi/ops/compat/activation_sig.cc +++ b/paddle/phi/ops/compat/activation_sig.cc @@ -39,10 +39,10 @@ namespace phi { #define comma , -DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(HardTanh, "hard_tanh", "t_min" comma "t_max"); +DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(HardTanh, "hardtanh", "t_min" comma "t_max"); DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Mish, "mish", "threshold"); DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(HardSwish, - "hard_swish", + "hardswish", "threshold" comma "scale" comma "offset"); // NOLINT DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Swish, "swish", "beta"); // NOLINT @@ -55,7 +55,7 @@ DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Relu6, "relu6", "threshold"); // NOLINT KernelSignature HardSwishOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature( - "hard_swish_raw", {"X"}, {"threshold", "scale", "offset"}, {"Out"}); + "hardswish_raw", {"X"}, {"threshold", "scale", "offset"}, {"Out"}); } KernelSignature SwishOpArgumentMapping(const ArgumentMappingContext& ctx) { @@ -113,8 +113,10 @@ KernelSignature PowTripleGradOpArgumentMapping( } } // namespace phi -PD_REGISTER_BASE_KERNEL_NAME(brelu, hard_tanh); -PD_REGISTER_BASE_KERNEL_NAME(brelu_grad, hard_tanh_grad); +PD_REGISTER_BASE_KERNEL_NAME(brelu, hardtanh); +PD_REGISTER_BASE_KERNEL_NAME(brelu_grad, hardtanh_grad); +PD_REGISTER_BASE_KERNEL_NAME(hard_swish, hardswish); +PD_REGISTER_BASE_KERNEL_NAME(hard_swish_grad, hardswish_grad); PD_REGISTER_ARG_MAPPING_FN(mish_grad, phi::MishGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(stanh_grad, phi::STanhGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/arg_min_max_sig.cc b/paddle/phi/ops/compat/arg_min_max_sig.cc new file mode 100644 index 0000000000..4739165c82 --- /dev/null +++ b/paddle/phi/ops/compat/arg_min_max_sig.cc @@ -0,0 +1,18 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/phi/core/compat/op_utils.h" + +PD_REGISTER_BASE_KERNEL_NAME(arg_max, argmax); +PD_REGISTER_BASE_KERNEL_NAME(arg_min, argmin); -- GitLab