From 7d138402a57d27cd12c75f92953702f87b07894b Mon Sep 17 00:00:00 2001 From: zhangyuqin1998 <75946871+zhangyuqin1998@users.noreply.github.com> Date: Thu, 9 Mar 2023 10:13:46 +0800 Subject: [PATCH] delete axis of fmax (#51264) --- paddle/phi/api/yaml/legacy_backward.yaml | 2 +- paddle/phi/kernels/cpu/elementwise_kernel.cc | 10 ++------- paddle/phi/kernels/elementwise_grad_kernel.h | 1 - paddle/phi/kernels/elementwise_kernel.cc | 21 ------------------- paddle/phi/kernels/elementwise_kernel.h | 7 ------- .../impl/elementwise_grad_kernel_impl.h | 2 +- .../kernels/impl/elementwise_kernel_impl.h | 11 +++++----- paddle/phi/kernels/kps/elementwise_kernel.cu | 4 ++-- paddle/phi/ops/compat/elementwise_sig.cc | 4 ++-- 9 files changed, 13 insertions(+), 49 deletions(-) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 9129bc803c0..4d98226120d 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -499,7 +499,7 @@ - backward_op : fmax_grad forward : fmax(Tensor x, Tensor y) -> Tensor(out) - args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1) + args : (Tensor x, Tensor y, Tensor out_grad) output : Tensor(x_grad), Tensor(y_grad) infer_meta : func : GeneralBinaryGradInferMeta diff --git a/paddle/phi/kernels/cpu/elementwise_kernel.cc b/paddle/phi/kernels/cpu/elementwise_kernel.cc index 1e0bd41d51a..545e0ddd6bd 100644 --- a/paddle/phi/kernels/cpu/elementwise_kernel.cc +++ b/paddle/phi/kernels/cpu/elementwise_kernel.cc @@ -121,14 +121,8 @@ using complex128 = ::phi::dtype::complex; // NOTE(chenweihang): using bfloat16 will cause redefine with xpu bfloat16 // using bfloat16 = ::phi::dtype::bfloat16; -PD_REGISTER_KERNEL(fmax_raw, - CPU, - ALL_LAYOUT, - phi::FMaxRawKernel, - float, - double, - int, - int64_t) {} +PD_REGISTER_KERNEL( + fmax, CPU, ALL_LAYOUT, phi::FMaxKernel, float, double, int, int64_t) {} PD_REGISTER_KERNEL( fmin, CPU, ALL_LAYOUT, phi::FMinKernel, float, double, int, int64_t) {} diff --git a/paddle/phi/kernels/elementwise_grad_kernel.h b/paddle/phi/kernels/elementwise_grad_kernel.h index 03773bfb336..db6d937f7ea 100644 --- a/paddle/phi/kernels/elementwise_grad_kernel.h +++ b/paddle/phi/kernels/elementwise_grad_kernel.h @@ -24,7 +24,6 @@ void ElementwiseFMaxGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& out_grad, - int axis, DenseTensor* x_grad, DenseTensor* y_grad); diff --git a/paddle/phi/kernels/elementwise_kernel.cc b/paddle/phi/kernels/elementwise_kernel.cc index b2ac152e3a1..7cefbfd4145 100644 --- a/paddle/phi/kernels/elementwise_kernel.cc +++ b/paddle/phi/kernels/elementwise_kernel.cc @@ -101,21 +101,10 @@ void SubtractKernel(const Context& dev_ctx, SubtractRawKernel(dev_ctx, x, y, axis, out); } -template -void FMaxKernel(const Context& dev_ctx, - const DenseTensor& x, - const DenseTensor& y, - DenseTensor* out) { - FMaxRawKernel(dev_ctx, x, y, -1, out); -} - } // namespace phi using complex64 = ::phi::dtype::complex; using complex128 = ::phi::dtype::complex; -PD_REGISTER_KERNEL( - fmax, CPU, ALL_LAYOUT, phi::FMaxKernel, float, double, int, int64_t) {} - PD_REGISTER_KERNEL(maximum, CPU, ALL_LAYOUT, @@ -204,16 +193,6 @@ PD_REGISTER_KERNEL(divide, #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PD_REGISTER_KERNEL(fmax, - KPS, - ALL_LAYOUT, - phi::FMaxKernel, - float, - double, - int, - phi::dtype::float16, - int64_t) {} - PD_REGISTER_KERNEL(maximum, KPS, ALL_LAYOUT, diff --git a/paddle/phi/kernels/elementwise_kernel.h b/paddle/phi/kernels/elementwise_kernel.h index 1e4d99b6bd4..3bc4163d59e 100644 --- a/paddle/phi/kernels/elementwise_kernel.h +++ b/paddle/phi/kernels/elementwise_kernel.h @@ -19,13 +19,6 @@ namespace phi { -template -void FMaxRawKernel(const Context& dev_ctx, - const DenseTensor& x, - const DenseTensor& y, - int axis, - DenseTensor* out); - template void FMaxKernel(const Context& dev_ctx, const DenseTensor& x, diff --git a/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h b/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h index c440561b365..4daca4d72e9 100644 --- a/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h @@ -265,7 +265,6 @@ void ElementwiseFMaxGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& out_grad, - int axis, DenseTensor* x_grad, DenseTensor* y_grad) { funcs::ElementwiseGradPreProcess(out_grad, x_grad); @@ -273,6 +272,7 @@ void ElementwiseFMaxGradKernel(const Context& dev_ctx, auto out = out_grad; // Fake out, not used auto x_dim = x.dims(); auto y_dim = y.dims(); + int axis = -1; if (x.dims() == y.dims()) { funcs::ElemwiseGradComputeNoBroadcast -void FMaxRawKernel(const Context& dev_ctx, - const DenseTensor& x, - const DenseTensor& y, - int axis, - DenseTensor* out) { +void FMaxKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + DenseTensor* out) { dev_ctx.template Alloc(out); funcs::ElementwiseCompute, T, T>( - dev_ctx, x, y, axis, funcs::FMaxFunctor(), out); + dev_ctx, x, y, -1, funcs::FMaxFunctor(), out); } template diff --git a/paddle/phi/kernels/kps/elementwise_kernel.cu b/paddle/phi/kernels/kps/elementwise_kernel.cu index f90b57d235b..2d1aaec2ee1 100644 --- a/paddle/phi/kernels/kps/elementwise_kernel.cu +++ b/paddle/phi/kernels/kps/elementwise_kernel.cu @@ -109,10 +109,10 @@ using bfloat16 = phi::dtype::bfloat16; using complex64 = ::phi::dtype::complex; using complex128 = ::phi::dtype::complex; -PD_REGISTER_KERNEL(fmax_raw, +PD_REGISTER_KERNEL(fmax, KPS, ALL_LAYOUT, - phi::FMaxRawKernel, + phi::FMaxKernel, float, double, int, diff --git a/paddle/phi/ops/compat/elementwise_sig.cc b/paddle/phi/ops/compat/elementwise_sig.cc index 044bf52bb65..2667e0e1201 100644 --- a/paddle/phi/ops/compat/elementwise_sig.cc +++ b/paddle/phi/ops/compat/elementwise_sig.cc @@ -176,7 +176,7 @@ KernelSignature ElementwiseMulGradOpArgumentMapping( KernelSignature ElementwiseFMaxOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("fmax_raw", {"X", "Y"}, {"axis"}, {"Out"}); + return KernelSignature("fmax", {"X", "Y"}, {}, {"Out"}); } KernelSignature ElementwiseFMinOpArgumentMapping( @@ -187,7 +187,7 @@ KernelSignature ElementwiseFMinOpArgumentMapping( KernelSignature ElementwiseFMaxGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( - "fmax_grad", {"X", "Y", "Out@GRAD"}, {"axis"}, {"X@GRAD", "Y@GRAD"}); + "fmax_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"}); } KernelSignature ElementwiseMulDoubleGradOpArgumentMapping( -- GitLab