From 455a67358cbcfe14b53560acc17abc4bf363c55d Mon Sep 17 00:00:00 2001 From: RedContritio Date: Wed, 31 May 2023 22:31:01 +0800 Subject: [PATCH] support auto generate for static op reduce_amax (#54179) * support auto generate for static op reduce_amax * set reduce_amax attr 'axis' type as IntArray --- .../operators/reduce_ops/reduce_amax_op.cc | 38 ------------------- paddle/phi/api/yaml/op_compat.yaml | 23 ++++++++--- paddle/phi/api/yaml/static_backward.yaml | 10 +++++ paddle/phi/api/yaml/static_ops.yaml | 11 ++++++ paddle/phi/ops/compat/reduce_sig.cc | 11 ------ 5 files changed, 39 insertions(+), 54 deletions(-) delete mode 100644 paddle/fluid/operators/reduce_ops/reduce_amax_op.cc diff --git a/paddle/fluid/operators/reduce_ops/reduce_amax_op.cc b/paddle/fluid/operators/reduce_ops/reduce_amax_op.cc deleted file mode 100644 index 16650043fd3..00000000000 --- a/paddle/fluid/operators/reduce_ops/reduce_amax_op.cc +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/operators/reduce_ops/reduce_min_max_op.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/unary.h" - -namespace ops = paddle::operators; - -class ReduceAMaxOpMaker : public ops::ReduceBaseOpMaker { - protected: - virtual std::string GetName() const { return "reduce_amax"; } - virtual std::string GetOpType() const { return "Reduce reduce_amax"; } -}; - -DECLARE_INFER_SHAPE_FUNCTOR(reduce_amax, - ReduceAMaxInferShapeFunctor, - PD_INFER_META(phi::ReduceInferMetaBase)); - -REGISTER_OPERATOR( - reduce_amax, - ops::ReduceBaseOp, - ReduceAMaxOpMaker, - paddle::framework::DefaultGradOpMaker, - paddle::framework::DefaultGradOpMaker, - ReduceAMaxInferShapeFunctor); -REGISTER_OPERATOR(reduce_amax_grad, ops::ReduceGradOp) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index dfb60b46e35..8b6b6d60fdb 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -153,6 +153,24 @@ data_type : std::string tensor_name : Atol +- op : amax (reduce_amax) + backward : amax_grad (reduce_amax_grad) + inputs : + x : X + outputs : + out : Out + attrs: + { axis : dim, keepdim : keep_dim } + extra : + attrs : [bool use_mkldnn = false] + int_array: + axis : + data_type : int + support_tensor : true + get_expected_kernel_type : + amax_grad : GetReduceGradExpectedKernelType + manual_signature : [amax] + - op : angle backward : angle_grad inputs : @@ -1873,11 +1891,6 @@ extra : attrs : [bool use_mkldnn = false, bool use_cudnn = false] -- op : reduce_amax - backward : reduce_amax_grad - extra : - attrs : [bool use_mkldnn = false] - - op : reduce_amin backward : reduce_amin_grad extra : diff --git a/paddle/phi/api/yaml/static_backward.yaml b/paddle/phi/api/yaml/static_backward.yaml index 68157095c91..4579b11ebf5 100755 --- a/paddle/phi/api/yaml/static_backward.yaml +++ b/paddle/phi/api/yaml/static_backward.yaml @@ -1,5 +1,15 @@ # This file is to support those static ops different the dynamic. +- backward_op : amax_grad + forward: amax (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) -> Tensor(out) + args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={}, bool keepdim=false, bool reduce_all=false) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + kernel : + func : amax_grad + - backward_op : assign_grad forward : assign (Tensor x) -> Tensor(out) args : (Tensor out_grad) diff --git a/paddle/phi/api/yaml/static_ops.yaml b/paddle/phi/api/yaml/static_ops.yaml index 24e5fa39cd7..f9c71d58823 100755 --- a/paddle/phi/api/yaml/static_ops.yaml +++ b/paddle/phi/api/yaml/static_ops.yaml @@ -26,6 +26,17 @@ func : all_reduce param: [x, reduce_type] +- op : amax + args : (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) + output : Tensor(out) + infer_meta : + func : ReduceInferMeta + param : [x, axis, keepdim] + kernel : + func : amax_raw + param : [x, axis, keepdim, reduce_all] + backward : amax_grad + - op : arange args : (Tensor start, Tensor end, Tensor step) output : Tensor(out) diff --git a/paddle/phi/ops/compat/reduce_sig.cc b/paddle/phi/ops/compat/reduce_sig.cc index a14745acb3d..d65f550209c 100644 --- a/paddle/phi/ops/compat/reduce_sig.cc +++ b/paddle/phi/ops/compat/reduce_sig.cc @@ -183,14 +183,6 @@ KernelSignature ReduceMaxGradOpArgumentMapping( {"X@GRAD"}); } -KernelSignature ReduceAMaxGradOpArgumentMapping( - const ArgumentMappingContext& ctx UNUSED) { - return KernelSignature("amax_grad", - {"X", "Out", "Out@GRAD"}, - {"dim", "keep_dim", "reduce_all"}, - {"X@GRAD"}); -} - KernelSignature ReduceMinGradOpArgumentMapping( const ArgumentMappingContext& ctx UNUSED) { return KernelSignature("min_grad", @@ -232,7 +224,6 @@ PD_REGISTER_BASE_KERNEL_NAME(reduce_mean_grad, mean_grad); PD_REGISTER_BASE_KERNEL_NAME(reduce_prod_grad, prod_grad); PD_REGISTER_BASE_KERNEL_NAME(reduce_max_grad, max_grad); PD_REGISTER_BASE_KERNEL_NAME(reduce_min_grad, min_grad); -PD_REGISTER_BASE_KERNEL_NAME(reduce_amax_grad, amax_grad); PD_REGISTER_BASE_KERNEL_NAME(reduce_amin_grad, amin_grad); PD_REGISTER_ARG_MAPPING_FN(reduce_sum, phi::ReduceSumOpArgumentMapping); @@ -253,8 +244,6 @@ PD_REGISTER_ARG_MAPPING_FN(reduce_prod_grad, phi::ReduceProdGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(reduce_max_grad, phi::ReduceMaxGradOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(reduce_amax_grad, - phi::ReduceAMaxGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(reduce_min_grad, phi::ReduceMinGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(reduce_amin_grad, -- GitLab