diff --git a/paddle/fluid/operators/reduce_ops/reduce_amax_op.cc b/paddle/fluid/operators/reduce_ops/reduce_amax_op.cc deleted file mode 100644 index 16650043fd31a8b1834701c421fd7b8012563030..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/reduce_amax_op.cc +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/operators/reduce_ops/reduce_min_max_op.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/unary.h" - -namespace ops = paddle::operators; - -class ReduceAMaxOpMaker : public ops::ReduceBaseOpMaker { - protected: - virtual std::string GetName() const { return "reduce_amax"; } - virtual std::string GetOpType() const { return "Reduce reduce_amax"; } -}; - -DECLARE_INFER_SHAPE_FUNCTOR(reduce_amax, - ReduceAMaxInferShapeFunctor, - PD_INFER_META(phi::ReduceInferMetaBase)); - -REGISTER_OPERATOR( - reduce_amax, - ops::ReduceBaseOp, - ReduceAMaxOpMaker, - paddle::framework::DefaultGradOpMaker, - paddle::framework::DefaultGradOpMaker, - ReduceAMaxInferShapeFunctor); -REGISTER_OPERATOR(reduce_amax_grad, ops::ReduceGradOp) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index dfb60b46e35131bcc72a8bd2c7a093b75a72538e..8b6b6d60fdb7b55b354ccc593196bbaee99f5aaf 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -153,6 +153,24 @@ data_type : std::string tensor_name : Atol +- op : amax (reduce_amax) + backward : amax_grad (reduce_amax_grad) + inputs : + x : X + outputs : + out : Out + attrs: + { axis : dim, keepdim : keep_dim } + extra : + attrs : [bool use_mkldnn = false] + int_array: + axis : + data_type : int + support_tensor : true + get_expected_kernel_type : + amax_grad : GetReduceGradExpectedKernelType + manual_signature : [amax] + - op : angle backward : angle_grad inputs : @@ -1873,11 +1891,6 @@ extra : attrs : [bool use_mkldnn = false, bool use_cudnn = false] -- op : reduce_amax - backward : reduce_amax_grad - extra : - attrs : [bool use_mkldnn = false] - - op : reduce_amin backward : reduce_amin_grad extra : diff --git a/paddle/phi/api/yaml/static_backward.yaml b/paddle/phi/api/yaml/static_backward.yaml index 68157095c91abac6eb35d2daf702f1baa32e509e..4579b11ebf525030907b65c023660bb62376df22 100755 --- a/paddle/phi/api/yaml/static_backward.yaml +++ b/paddle/phi/api/yaml/static_backward.yaml @@ -1,5 +1,15 @@ # This file is to support those static ops different the dynamic. +- backward_op : amax_grad + forward: amax (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) -> Tensor(out) + args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={}, bool keepdim=false, bool reduce_all=false) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + kernel : + func : amax_grad + - backward_op : assign_grad forward : assign (Tensor x) -> Tensor(out) args : (Tensor out_grad) diff --git a/paddle/phi/api/yaml/static_ops.yaml b/paddle/phi/api/yaml/static_ops.yaml index 24e5fa39cd748484f6cab24f4f8b0a2223048c98..f9c71d588234ce477a77c620ffbd542105711b07 100755 --- a/paddle/phi/api/yaml/static_ops.yaml +++ b/paddle/phi/api/yaml/static_ops.yaml @@ -26,6 +26,17 @@ func : all_reduce param: [x, reduce_type] +- op : amax + args : (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) + output : Tensor(out) + infer_meta : + func : ReduceInferMeta + param : [x, axis, keepdim] + kernel : + func : amax_raw + param : [x, axis, keepdim, reduce_all] + backward : amax_grad + - op : arange args : (Tensor start, Tensor end, Tensor step) output : Tensor(out) diff --git a/paddle/phi/ops/compat/reduce_sig.cc b/paddle/phi/ops/compat/reduce_sig.cc index a14745acb3de44e0acbf9f92bebdeceb51ee57d0..d65f550209cbf09a78fd4ec3d991fb8c33dbc26e 100644 --- a/paddle/phi/ops/compat/reduce_sig.cc +++ b/paddle/phi/ops/compat/reduce_sig.cc @@ -183,14 +183,6 @@ KernelSignature ReduceMaxGradOpArgumentMapping( {"X@GRAD"}); } -KernelSignature ReduceAMaxGradOpArgumentMapping( - const ArgumentMappingContext& ctx UNUSED) { - return KernelSignature("amax_grad", - {"X", "Out", "Out@GRAD"}, - {"dim", "keep_dim", "reduce_all"}, - {"X@GRAD"}); -} - KernelSignature ReduceMinGradOpArgumentMapping( const ArgumentMappingContext& ctx UNUSED) { return KernelSignature("min_grad", @@ -232,7 +224,6 @@ PD_REGISTER_BASE_KERNEL_NAME(reduce_mean_grad, mean_grad); PD_REGISTER_BASE_KERNEL_NAME(reduce_prod_grad, prod_grad); PD_REGISTER_BASE_KERNEL_NAME(reduce_max_grad, max_grad); PD_REGISTER_BASE_KERNEL_NAME(reduce_min_grad, min_grad); -PD_REGISTER_BASE_KERNEL_NAME(reduce_amax_grad, amax_grad); PD_REGISTER_BASE_KERNEL_NAME(reduce_amin_grad, amin_grad); PD_REGISTER_ARG_MAPPING_FN(reduce_sum, phi::ReduceSumOpArgumentMapping); @@ -253,8 +244,6 @@ PD_REGISTER_ARG_MAPPING_FN(reduce_prod_grad, phi::ReduceProdGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(reduce_max_grad, phi::ReduceMaxGradOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(reduce_amax_grad, - phi::ReduceAMaxGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(reduce_min_grad, phi::ReduceMinGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(reduce_amin_grad,