diff --git a/paddle/fluid/operators/reduce_ops/reduce_op.h b/paddle/fluid/operators/reduce_ops/reduce_op.h index 65cca94814e88111239aef3559285d6fe321a72d..ff1ddb4175feff0b059c7ba43e77b306197a11a0 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_op.h +++ b/paddle/fluid/operators/reduce_ops/reduce_op.h @@ -265,67 +265,6 @@ class ReduceKernel : public framework::OpKernel { framework::TransToPhiDataType(cast_out_dtype), output); } }; -template -class BoolReduceKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - bool reduce_all = context.Attr("reduce_all"); - auto* input = context.Input("X"); - auto* output = context.Output("Out"); - output->mutable_data(context.GetPlace()); - - auto dims = context.Attr>("dim"); - bool keep_dim = context.Attr("keep_dim"); - - // The dims has full dim, set the reduce_all is True - const auto& input_dim_size = context.Input("X")->dims().size(); - std::set dims_set(dims.begin(), dims.end()); - bool full_dim = true; - for (auto i = 0; i < input_dim_size; i++) { - if (dims_set.find(i) == dims_set.end()) { - full_dim = false; - break; - } - } - reduce_all = (reduce_all || full_dim); - - if (reduce_all) { - // Flatten and reduce 1-D tensor - auto x = EigenVector::Flatten(*input); - auto out = EigenScalar::From(*output); - auto& place = - *context.template device_context().eigen_device(); - auto reduce_dim = Eigen::array({{0}}); - Functor functor; - functor(place, &x, &out, reduce_dim); - } else { - int ndim = input->dims().size(); - int rdim = dims.size(); - // comments for accelerating compiling temporarily. - if (ndim > 6) { - HandleLargeDim(context, input, output, - dims, keep_dim); - } else { - HANDLE_DIM(6, 5); - HANDLE_DIM(6, 4); - HANDLE_DIM(6, 3); - HANDLE_DIM(6, 2); - HANDLE_DIM(6, 1); - HANDLE_DIM(5, 4); - HANDLE_DIM(5, 3); - HANDLE_DIM(5, 2); - HANDLE_DIM(5, 1); - HANDLE_DIM(4, 3); - HANDLE_DIM(4, 2); - HANDLE_DIM(4, 1); - HANDLE_DIM(3, 2); - HANDLE_DIM(3, 1); - HANDLE_DIM(2, 1); - HANDLE_DIM(1, 1); - } - } - } -}; template void LaunchReduceGradKernel(const framework::ExecutionContext& context, diff --git a/paddle/phi/kernels/cpu/reduce_grad_kernel.cc b/paddle/phi/kernels/cpu/reduce_grad_kernel.cc index 78a7ae8d415b5d4b18fdf8e469576db50f739e38..4b3b1fc16e9c426daccb6364f926a7c44ab63e59 100644 --- a/paddle/phi/kernels/cpu/reduce_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/reduce_grad_kernel.cc @@ -99,8 +99,8 @@ void ReduceSumGradKernel(const Context& dev_ctx, ReduceGradKernel(dev_ctx, x, - out_grad, paddle::none, + out_grad, dims, keep_dim, reduce_all, @@ -121,8 +121,8 @@ void ReduceMeanGradKernel(const Context& dev_ctx, DenseTensor* x_grad) { ReduceGradKernel(dev_ctx, x, - out_grad, paddle::none, + out_grad, dims, keep_dim, reduce_all, diff --git a/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h b/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h index 65d903a7fe426c6eed6cba6f38e8c636001d47b0..1877a4ecc227e36cad474d8a5c11de5ff7a1946c 100644 --- a/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h @@ -33,7 +33,7 @@ void FrobeniusNormGradKernel(const Context& ctx, DataType out_dtype, DenseTensor* dx) { ReduceGradKernel( - ctx, x, dout, out, axis, keep_dim, reduce_all, in_dtype, out_dtype, dx); + ctx, x, out, dout, axis, keep_dim, reduce_all, in_dtype, out_dtype, dx); } } // namespace phi diff --git a/paddle/phi/kernels/impl/reduce_grad.h b/paddle/phi/kernels/impl/reduce_grad.h index f56d3d3ed50f7e72910115f7ec28914a5eade2e8..0b1c43b5f040227b691fca6bdc257fbf8c697957 100644 --- a/paddle/phi/kernels/impl/reduce_grad.h +++ b/paddle/phi/kernels/impl/reduce_grad.h @@ -87,8 +87,8 @@ template void ReduceGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const paddle::optional& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, diff --git a/paddle/phi/kernels/impl/reduce_max_grad_kernel_impl.h b/paddle/phi/kernels/impl/reduce_max_grad_kernel_impl.h index 4a74416e3916492e6d3a40e09ca347db485fff7c..0a0c1abac808646db157b39ef929b007a25e6985 100644 --- a/paddle/phi/kernels/impl/reduce_max_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/reduce_max_grad_kernel_impl.h @@ -24,8 +24,8 @@ namespace phi { template void ReduceMaxGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const DenseTensor& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, @@ -34,8 +34,8 @@ void ReduceMaxGradKernel(const Context& dev_ctx, DenseTensor* x_grad) { ReduceGradKernel(dev_ctx, x, - out_grad, out, + out_grad, dims, keep_dim, reduce_all, diff --git a/paddle/phi/kernels/impl/reduce_min_grad_kernel_impl.h b/paddle/phi/kernels/impl/reduce_min_grad_kernel_impl.h index baaa544f137366f1e0343c25bc373cc08350f7fd..965fc686e27838ccc3ec73b7ae7d8c5267cd59bf 100644 --- a/paddle/phi/kernels/impl/reduce_min_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/reduce_min_grad_kernel_impl.h @@ -24,8 +24,8 @@ namespace phi { template void ReduceMinGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const DenseTensor& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, @@ -34,8 +34,8 @@ void ReduceMinGradKernel(const Context& dev_ctx, DenseTensor* x_grad) { ReduceGradKernel(dev_ctx, x, - out_grad, out, + out_grad, dims, keep_dim, reduce_all, diff --git a/paddle/phi/kernels/impl/reduce_prod_grad_kernel_impl.h b/paddle/phi/kernels/impl/reduce_prod_grad_kernel_impl.h index 6b93e98cec0168ab55e15e3401a72738f79d3a07..fb361e34205582bd3478696ade10602240b7d66b 100644 --- a/paddle/phi/kernels/impl/reduce_prod_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/reduce_prod_grad_kernel_impl.h @@ -24,8 +24,8 @@ namespace phi { template void ReduceProdGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const DenseTensor& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, @@ -34,8 +34,8 @@ void ReduceProdGradKernel(const Context& dev_ctx, DenseTensor* x_grad) { ReduceGradKernel(dev_ctx, x, - out_grad, out, + out_grad, dims, keep_dim, reduce_all, diff --git a/paddle/phi/kernels/reduce_grad_kernel.h b/paddle/phi/kernels/reduce_grad_kernel.h index ee6f3d19a094d29546e82e7138933eceb96459d0..a4b472c445888fd45ff497884d309778b7447151 100644 --- a/paddle/phi/kernels/reduce_grad_kernel.h +++ b/paddle/phi/kernels/reduce_grad_kernel.h @@ -43,8 +43,8 @@ void ReduceMeanGradKernel(const Context& dev_ctx, template void ReduceProdGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const DenseTensor& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, @@ -55,8 +55,8 @@ void ReduceProdGradKernel(const Context& dev_ctx, template void ReduceMaxGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const DenseTensor& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, @@ -67,8 +67,8 @@ void ReduceMaxGradKernel(const Context& dev_ctx, template void ReduceMinGradKernel(const Context& dev_ctx, const DenseTensor& x, - const DenseTensor& out_grad, const DenseTensor& out, + const DenseTensor& out_grad, const std::vector& dims, bool keep_dim, bool reduce_all, diff --git a/paddle/phi/ops/compat/reduce_sig.cc b/paddle/phi/ops/compat/reduce_sig.cc index 4bca0523801c1a94f90197c93cc495c2c4f56eeb..273badee623816ee32d25b52f8951dd0f854d8a4 100644 --- a/paddle/phi/ops/compat/reduce_sig.cc +++ b/paddle/phi/ops/compat/reduce_sig.cc @@ -149,7 +149,7 @@ KernelSignature ReduceMaxGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( "max_grad", - {"X", GradVarName("Out"), "Out"}, + {"X", "Out", GradVarName("Out")}, {"dim", "keep_dim", "reduce_all", "in_dtype", "out_dtype"}, {GradVarName("X")}); } @@ -158,7 +158,7 @@ KernelSignature ReduceMinGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( "min_grad", - {"X", GradVarName("Out"), "Out"}, + {"X", "Out", GradVarName("Out")}, {"dim", "keep_dim", "reduce_all", "in_dtype", "out_dtype"}, {GradVarName("X")}); } @@ -167,7 +167,7 @@ KernelSignature ReduceProdGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( "prod_grad", - {"X", GradVarName("Out"), "Out"}, + {"X", "Out", GradVarName("Out")}, {"dim", "keep_dim", "reduce_all", "in_dtype", "out_dtype"}, {GradVarName("X")}); }