未验证 提交 455a6735 编写于 作者: R RedContritio 提交者: GitHub

support auto generate for static op reduce_amax (#54179)

* support auto generate for static op reduce_amax

* set reduce_amax attr 'axis' type as IntArray
上级 5c0b60ae
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/operators/reduce_ops/reduce_min_max_op.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace ops = paddle::operators;
class ReduceAMaxOpMaker : public ops::ReduceBaseOpMaker {
protected:
virtual std::string GetName() const { return "reduce_amax"; }
virtual std::string GetOpType() const { return "Reduce reduce_amax"; }
};
DECLARE_INFER_SHAPE_FUNCTOR(reduce_amax,
ReduceAMaxInferShapeFunctor,
PD_INFER_META(phi::ReduceInferMetaBase));
REGISTER_OPERATOR(
reduce_amax,
ops::ReduceBaseOp,
ReduceAMaxOpMaker,
paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
ReduceAMaxInferShapeFunctor);
REGISTER_OPERATOR(reduce_amax_grad, ops::ReduceGradOp)
......@@ -153,6 +153,24 @@
data_type : std::string
tensor_name : Atol
- op : amax (reduce_amax)
backward : amax_grad (reduce_amax_grad)
inputs :
x : X
outputs :
out : Out
attrs:
{ axis : dim, keepdim : keep_dim }
extra :
attrs : [bool use_mkldnn = false]
int_array:
axis :
data_type : int
support_tensor : true
get_expected_kernel_type :
amax_grad : GetReduceGradExpectedKernelType
manual_signature : [amax]
- op : angle
backward : angle_grad
inputs :
......@@ -1873,11 +1891,6 @@
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : reduce_amax
backward : reduce_amax_grad
extra :
attrs : [bool use_mkldnn = false]
- op : reduce_amin
backward : reduce_amin_grad
extra :
......
# This file is to support those static ops different the dynamic.
- backward_op : amax_grad
forward: amax (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={}, bool keepdim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : amax_grad
- backward_op : assign_grad
forward : assign (Tensor x) -> Tensor(out)
args : (Tensor out_grad)
......
......@@ -26,6 +26,17 @@
func : all_reduce
param: [x, reduce_type]
- op : amax
args : (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
param : [x, axis, keepdim]
kernel :
func : amax_raw
param : [x, axis, keepdim, reduce_all]
backward : amax_grad
- op : arange
args : (Tensor start, Tensor end, Tensor step)
output : Tensor(out)
......
......@@ -183,14 +183,6 @@ KernelSignature ReduceMaxGradOpArgumentMapping(
{"X@GRAD"});
}
KernelSignature ReduceAMaxGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature("amax_grad",
{"X", "Out", "Out@GRAD"},
{"dim", "keep_dim", "reduce_all"},
{"X@GRAD"});
}
KernelSignature ReduceMinGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature("min_grad",
......@@ -232,7 +224,6 @@ PD_REGISTER_BASE_KERNEL_NAME(reduce_mean_grad, mean_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_prod_grad, prod_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_max_grad, max_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_min_grad, min_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_amax_grad, amax_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_amin_grad, amin_grad);
PD_REGISTER_ARG_MAPPING_FN(reduce_sum, phi::ReduceSumOpArgumentMapping);
......@@ -253,8 +244,6 @@ PD_REGISTER_ARG_MAPPING_FN(reduce_prod_grad,
phi::ReduceProdGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_max_grad,
phi::ReduceMaxGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_amax_grad,
phi::ReduceAMaxGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_min_grad,
phi::ReduceMinGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_amin_grad,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册