diff --git a/paddle/fluid/operators/elementwise/elementwise_max_op.cc b/paddle/fluid/operators/elementwise/elementwise_max_op.cc index 0da6e495dc506ae44eff091c2570360c1e3bc697..a945456a226ef01e02c25d9f9f40b9a5de293f56 100644 --- a/paddle/fluid/operators/elementwise/elementwise_max_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_max_op.cc @@ -86,15 +86,8 @@ class ElementwiseMaxCompositeGradOpMaker paddle::Tensor dy = this->GetSingleInputGrad("Y"); auto* dy_ptr = this->GetOutputPtr(&dy); std::string dy_name = this->GetOutputName(dy); - int axis = static_cast(this->Attr("axis")); - PADDLE_ENFORCE_EQ( - axis, - -1, - phi::errors::InvalidArgument( - "We only support axis = -1 in composite maximum_grad but we got: ", - axis)); VLOG(6) << "Runing maximum_grad composite func"; - prim::maximum_grad(x, y, out_grad, axis, dx_ptr, dy_ptr); + prim::maximum_grad(x, y, out_grad, dx_ptr, dy_ptr); this->RecoverOutputName(dx, dx_name); this->RecoverOutputName(dy, dy_name); } diff --git a/paddle/fluid/operators/elementwise/elementwise_pow_op.cc b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc index b6389b97476683c3fb990fd08b258cb438a974c7..d2c047f0e4bbd85e44e161a01de44f3e35bf08c7 100644 --- a/paddle/fluid/operators/elementwise/elementwise_pow_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc @@ -60,15 +60,8 @@ class ElementwisePowCompositeGradOpMaker paddle::Tensor dy = this->GetSingleInputGrad("Y"); auto dy_ptr = this->GetOutputPtr(&dy); std::string dy_name = this->GetOutputName(dy); - int axis = static_cast(this->Attr("axis")); - PADDLE_ENFORCE_EQ( - axis, - -1, - phi::errors::InvalidArgument( - "We only support axis = -1 in composite pow but we got: ", axis)); - VLOG(6) << "Runing pow_grad composite func"; prim::elementwise_pow_grad( - x, y, out_grad, axis, dx_ptr, dy_ptr); + x, y, out_grad, dx_ptr, dy_ptr); this->RecoverOutputName(dx, dx_name); this->RecoverOutputName(dy, dy_name); } diff --git a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h index ef2a4ffcd6496f6bd9356d6f883ba04d9b3fbb0a..e5725b189fdb61cda442a057e9fe1d0175ba90ba 100644 --- a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h +++ b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h @@ -391,7 +391,6 @@ template void elementwise_pow_grad(const Tensor& x, const Tensor& y, const Tensor& out_grad, - int axis, Tensor* dx, Tensor* dy) { if (dy) { @@ -1380,7 +1379,6 @@ template void maximum_grad(const Tensor& x, const Tensor& y, const Tensor& out_grad, - int axis, Tensor* x_grad, Tensor* y_grad) { if (x_grad) { diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index dddd68d873d89037bcbbe304181d854cc8218811..0674270a272515d1cdfed70a99a3774ce909c834 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -332,7 +332,7 @@ - backward_op : elementwise_pow_grad forward : elementwise_pow(Tensor x, Tensor y) -> Tensor(out) - args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1) + args : (Tensor x, Tensor y, Tensor out_grad) output : Tensor(x_grad), Tensor(y_grad) infer_meta : func : GeneralBinaryGradInferMeta @@ -577,7 +577,7 @@ - backward_op : maximum_grad forward : maximum(Tensor x, Tensor y) -> Tensor(out) - args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1) + args : (Tensor x, Tensor y, Tensor out_grad) output : Tensor(x_grad), Tensor(y_grad) infer_meta : func : GeneralBinaryGradInferMeta @@ -616,7 +616,7 @@ - backward_op : minimum_grad forward : minimum(Tensor x, Tensor y) -> Tensor(out) - args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1) + args : (Tensor x, Tensor y, Tensor out_grad) output : Tensor(x_grad), Tensor(y_grad) infer_meta : func : GeneralBinaryGradInferMeta diff --git a/paddle/phi/kernels/cpu/elementwise_grad_kernel.cc b/paddle/phi/kernels/cpu/elementwise_grad_kernel.cc index 6b7f1aa3ee61dead5ac1e4128cbd5cfa5a6597f8..dfcea53207c22d25cf6ad93b8549d2fae009e808 100644 --- a/paddle/phi/kernels/cpu/elementwise_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/elementwise_grad_kernel.cc @@ -28,10 +28,10 @@ void MaximumGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - int axis, DenseTensor* dx, DenseTensor* dy) { funcs::ElementwiseGradPreProcess(dout, dx); + int axis = -1; phi::funcs::ElemwiseGradCompute, MaxGradDy>( dev_ctx, x, y, dout, dout, axis, dx, dy, MaxGradDx(), MaxGradDy()); } @@ -41,10 +41,10 @@ void MinimumGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - int axis, DenseTensor* dx, DenseTensor* dy) { funcs::ElementwiseGradPreProcess(dout, dx); + int axis = -1; phi::funcs::ElemwiseGradCompute, MinGradDy>( dev_ctx, x, y, dout, dout, axis, dx, dy, MinGradDx(), MinGradDy()); } diff --git a/paddle/phi/kernels/elementwise_grad_kernel.h b/paddle/phi/kernels/elementwise_grad_kernel.h index db6d937f7ea54f0487d528ed48478582a962539b..32137e600d1fa3a5bb41e7784dee650cbca4258e 100644 --- a/paddle/phi/kernels/elementwise_grad_kernel.h +++ b/paddle/phi/kernels/elementwise_grad_kernel.h @@ -40,7 +40,6 @@ void MaximumGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - int axis, DenseTensor* dx, DenseTensor* dy); @@ -49,7 +48,6 @@ void MinimumGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - int axis, DenseTensor* dx, DenseTensor* dy); @@ -66,7 +64,6 @@ void ElementwisePowGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - int axis, DenseTensor* dx, DenseTensor* dy); } // namespace phi diff --git a/paddle/phi/kernels/gpu/elementwise_grad_kernel.cu b/paddle/phi/kernels/gpu/elementwise_grad_kernel.cu index 4ff1164e64e082fc4549ed59b81a1820d33a8a4a..b3ad0dacae37c94e6a536410a02571b104ea2846 100644 --- a/paddle/phi/kernels/gpu/elementwise_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/elementwise_grad_kernel.cu @@ -31,11 +31,10 @@ void MaximumGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - int axis, DenseTensor* dx, DenseTensor* dy) { const auto place = dev_ctx.GetPlace(); - + int axis = -1; if (dx != nullptr && dy != nullptr) { std::vector ins = {&x, &y, &dout}; GetGradXAndYOut( @@ -63,10 +62,10 @@ void MinimumGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - int axis, DenseTensor* dx, DenseTensor* dy) { const auto place = dev_ctx.GetPlace(); + int axis = -1; if (dx != nullptr && dy != nullptr) { std::vector ins = {&x, &y, &dout}; GetGradXAndYOut( diff --git a/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h b/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h index 6aee9383da3c1d6bd6bd06afc4e9ee037179ef3d..1d069775f225dd9aa2f0059cd5050b0717fb05c3 100644 --- a/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h @@ -958,10 +958,10 @@ void ElementwisePowGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - int axis, DenseTensor* dx, DenseTensor* dy) { funcs::ElementwiseGradPreProcess(dout, dx); + int axis = -1; phi::funcs::ElemwiseGradCompute, PowGradDY>( dev_ctx, x, y, dout, dout, axis, dx, dy, PowGradDX(), PowGradDY()); } diff --git a/paddle/phi/kernels/xpu/elementwise_grad_kernel.cc b/paddle/phi/kernels/xpu/elementwise_grad_kernel.cc index 47da6b25de92017ae6aa817c4bdc547004447809..49b74480ded797de5dcea535ac0ca21315c69316 100644 --- a/paddle/phi/kernels/xpu/elementwise_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/elementwise_grad_kernel.cc @@ -25,11 +25,10 @@ void MaximumGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - int axis, DenseTensor* dx, DenseTensor* dy) { using XPUType = typename XPUTypeTrait::Type; - + int axis = -1; auto f = [](xpu::Context* ctx, const XPUType* x, const XPUType* y, @@ -51,11 +50,10 @@ void MinimumGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - int axis, DenseTensor* dx, DenseTensor* dy) { using XPUType = typename XPUTypeTrait::Type; - + int axis = -1; auto f = [](xpu::Context* ctx, const XPUType* x, const XPUType* y, diff --git a/paddle/phi/ops/compat/elementwise_sig.cc b/paddle/phi/ops/compat/elementwise_sig.cc index 2667e0e12013c44ce3c8bba4865ae5d71193f2a4..1906b0a220bb7963325ead0311d2ca723942b71d 100644 --- a/paddle/phi/ops/compat/elementwise_sig.cc +++ b/paddle/phi/ops/compat/elementwise_sig.cc @@ -210,13 +210,13 @@ KernelSignature ElementwiseMulTripleGradOpArgumentMapping( KernelSignature ElementwiseMaxGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( - "maximum_grad", {"X", "Y", "Out@GRAD"}, {"axis"}, {"X@GRAD", "Y@GRAD"}); + "maximum_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"}); } KernelSignature ElementwiseMinGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( - "minimum_grad", {"X", "Y", "Out@GRAD"}, {"axis"}, {"X@GRAD", "Y@GRAD"}); + "minimum_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"}); } KernelSignature ElementwiseHeavisideGradOpArgumentMapping( @@ -227,10 +227,8 @@ KernelSignature ElementwiseHeavisideGradOpArgumentMapping( KernelSignature ElementwisePowGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("elementwise_pow_grad", - {"X", "Y", "Out@GRAD"}, - {"axis"}, - {"X@GRAD", "Y@GRAD"}); + return KernelSignature( + "elementwise_pow_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"}); } } // namespace phi