From 67ffb86e6fdfcd1366537759bf464b343f0bf76e Mon Sep 17 00:00:00 2001 From: chentianyu03 Date: Tue, 22 Mar 2022 10:42:13 +0800 Subject: [PATCH] [Phi]Modify reduce arg order (#40706) * modify out and out_grad order in reduce_grad_kernel * delete unsed boolReduceKernel * fix conflict --- paddle/fluid/operators/reduce_ops/reduce_op.h | 61 ------------------- paddle/phi/kernels/cpu/reduce_grad_kernel.cc | 4 +- .../impl/frobenius_norm_grad_kernel_impl.h | 2 +- paddle/phi/kernels/impl/reduce_grad.h | 2 +- .../impl/reduce_max_grad_kernel_impl.h | 4 +- .../impl/reduce_min_grad_kernel_impl.h | 4 +- .../impl/reduce_prod_grad_kernel_impl.h | 4 +- paddle/phi/kernels/reduce_grad_kernel.h | 6 +- paddle/phi/ops/compat/reduce_sig.cc | 6 +- 9 files changed, 16 insertions(+), 77 deletions(-) diff --git a/paddle/fluid/operators/reduce_ops/reduce_op.h b/paddle/fluid/operators/reduce_ops/reduce_op.h index 65cca94814e..ff1ddb4175f 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_op.h +++ b/paddle/fluid/operators/reduce_ops/reduce_op.h @@ -265,67 +265,6 @@ class ReduceKernel : public framework::OpKernel { framework::TransToPhiDataType(cast_out_dtype), output); } }; -template -class BoolReduceKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - bool reduce_all = context.Attr("reduce_all"); - auto* input = context.Input("X"); - auto* output = context.Output("Out"); - output->mutable_data(context.GetPlace()); - - auto dims = context.Attr>("dim"); - bool keep_dim = context.Attr("keep_dim"); - - // The dims has full dim, set the reduce_all is True - const auto& input_dim_size = context.Input("X")->dims().size(); - std::set dims_set(dims.begin(), dims.end()); - bool full_dim = true; - for (auto i = 0; i < input_dim_size; i++) { - if (dims_set.find(i) == dims_set.end()) { - full_dim = false; - break; - } - } - reduce_all = (reduce_all || full_dim); - - if (reduce_all) { - // Flatten and reduce 1-D tensor - auto x = EigenVector::Flatten(*input); - auto out = EigenScalar::From(*output); - auto& place = - *context.template device_context().eigen_device(); - auto reduce_dim = Eigen::array({{0}}); - Functor functor; - functor(place, &x, &out, reduce_dim); - } else { - int ndim = input->dims().size(); - int rdim = dims.size(); - // comments for accelerating compiling temporarily. - if (ndim > 6) { - HandleLargeDim(context, input, output, - dims, keep_dim); - } else { - HANDLE_DIM(6, 5); - HANDLE_DIM(6, 4); - HANDLE_DIM(6, 3); - HANDLE_DIM(6, 2); - HANDLE_DIM(6, 1); - HANDLE_DIM(5, 4); - HANDLE_DIM(5, 3); - HANDLE_DIM(5, 2); - HANDLE_DIM(5, 1); - HANDLE_DIM(4, 3); - HANDLE_DIM(4, 2); - HANDLE_DIM(4, 1); - HANDLE_DIM(3, 2); - HANDLE_DIM(3, 1); - HANDLE_DIM(2, 1); - HANDLE_DIM(1, 1); - } - } - } -}; template void LaunchReduceGradKernel(const framework::ExecutionContext& context, diff --git a/paddle/phi/kernels/cpu/reduce_grad_kernel.cc b/paddle/phi/kernels/cpu/reduce_grad_kernel.cc index 78a7ae8d415..4b3b1fc16e9 100644 --- a/paddle/phi/kernels/cpu/reduce_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/reduce_grad_kernel.cc @@ -99,8 +99,8 @@ void ReduceSumGradKernel(const Context& dev_ctx, ReduceGradKernel(dev_ctx, x, - out_grad, paddle::none, + out_grad, dims, keep_dim, reduce_all, @@ -121,8 +121,8 @@ void ReduceMeanGradKernel(const Context& dev_ctx, DenseTensor* x_grad) { ReduceGradKernel(dev_ctx, x, - out_grad, paddle::none, + out_grad, dims, keep_dim, reduce_all, diff --git a/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h b/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h index 65d903a7fe4..1877a4ecc22 100644 --- a/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h @@ -33,7 +33,7 @@ void FrobeniusNormGradKernel(const Context& ctx, DataType out_dtype, DenseTensor* dx) { ReduceGradKernel( - ctx, x, dout, out, axis, keep_dim, reduce_all, in_dtype, out_dtype, dx); + ctx, x, out, dout, axis, keep_dim, reduce_all, in_dtype, out_dtype, dx); } } // namespace phi diff --git a/paddle/phi/kernels/impl/reduce_grad.h b/paddle/phi/kernels/impl/reduce_grad.h index f56d3d3ed50..0b1c43b5f04 100644 --- a/paddle/phi/kernels/impl/reduce_grad.h +++ b/paddle/phi/kernels/impl/reduce_grad.h @@ -87,8 +87,8 @@ template void ReduceGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const paddle::optional& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, diff --git a/paddle/phi/kernels/impl/reduce_max_grad_kernel_impl.h b/paddle/phi/kernels/impl/reduce_max_grad_kernel_impl.h index 4a74416e391..0a0c1abac80 100644 --- a/paddle/phi/kernels/impl/reduce_max_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/reduce_max_grad_kernel_impl.h @@ -24,8 +24,8 @@ namespace phi { template void ReduceMaxGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const DenseTensor& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, @@ -34,8 +34,8 @@ void ReduceMaxGradKernel(const Context& dev_ctx, DenseTensor* x_grad) { ReduceGradKernel(dev_ctx, x, - out_grad, out, + out_grad, dims, keep_dim, reduce_all, diff --git a/paddle/phi/kernels/impl/reduce_min_grad_kernel_impl.h b/paddle/phi/kernels/impl/reduce_min_grad_kernel_impl.h index baaa544f137..965fc686e27 100644 --- a/paddle/phi/kernels/impl/reduce_min_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/reduce_min_grad_kernel_impl.h @@ -24,8 +24,8 @@ namespace phi { template void ReduceMinGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const DenseTensor& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, @@ -34,8 +34,8 @@ void ReduceMinGradKernel(const Context& dev_ctx, DenseTensor* x_grad) { ReduceGradKernel(dev_ctx, x, - out_grad, out, + out_grad, dims, keep_dim, reduce_all, diff --git a/paddle/phi/kernels/impl/reduce_prod_grad_kernel_impl.h b/paddle/phi/kernels/impl/reduce_prod_grad_kernel_impl.h index 6b93e98cec0..fb361e34205 100644 --- a/paddle/phi/kernels/impl/reduce_prod_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/reduce_prod_grad_kernel_impl.h @@ -24,8 +24,8 @@ namespace phi { template void ReduceProdGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const DenseTensor& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, @@ -34,8 +34,8 @@ void ReduceProdGradKernel(const Context& dev_ctx, DenseTensor* x_grad) { ReduceGradKernel(dev_ctx, x, - out_grad, out, + out_grad, dims, keep_dim, reduce_all, diff --git a/paddle/phi/kernels/reduce_grad_kernel.h b/paddle/phi/kernels/reduce_grad_kernel.h index ee6f3d19a09..a4b472c4458 100644 --- a/paddle/phi/kernels/reduce_grad_kernel.h +++ b/paddle/phi/kernels/reduce_grad_kernel.h @@ -43,8 +43,8 @@ void ReduceMeanGradKernel(const Context& dev_ctx, template void ReduceProdGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const DenseTensor& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, @@ -55,8 +55,8 @@ void ReduceProdGradKernel(const Context& dev_ctx, template void ReduceMaxGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const DenseTensor& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, @@ -67,8 +67,8 @@ void ReduceMaxGradKernel(const Context& dev_ctx, template void ReduceMinGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const DenseTensor& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, diff --git a/paddle/phi/ops/compat/reduce_sig.cc b/paddle/phi/ops/compat/reduce_sig.cc index 4bca0523801..273badee623 100644 --- a/paddle/phi/ops/compat/reduce_sig.cc +++ b/paddle/phi/ops/compat/reduce_sig.cc @@ -149,7 +149,7 @@ KernelSignature ReduceMaxGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( "max_grad", - {"X", GradVarName("Out"), "Out"}, + {"X", "Out", GradVarName("Out")}, {"dim", "keep_dim", "reduce_all", "in_dtype", "out_dtype"}, {GradVarName("X")}); } @@ -158,7 +158,7 @@ KernelSignature ReduceMinGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( "min_grad", - {"X", GradVarName("Out"), "Out"}, + {"X", "Out", GradVarName("Out")}, {"dim", "keep_dim", "reduce_all", "in_dtype", "out_dtype"}, {GradVarName("X")}); } @@ -167,7 +167,7 @@ KernelSignature ReduceProdGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( "prod_grad", - {"X", GradVarName("Out"), "Out"}, + {"X", "Out", GradVarName("Out")}, {"dim", "keep_dim", "reduce_all", "in_dtype", "out_dtype"}, {GradVarName("X")}); } -- GitLab