未验证 提交 6e210f92 编写于 作者: R RedContritio 提交者: GitHub

support auto generate for static op reduce_amin (#54187)

上级 4c19b8c7
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/operators/reduce_ops/reduce_min_max_op.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace ops = paddle::operators;
class ReduceAMinOpMaker : public ops::ReduceBaseOpMaker {
protected:
virtual std::string GetName() const { return "reduce_amin"; }
virtual std::string GetOpType() const { return "Reduce reduce_amin"; }
};
DECLARE_INFER_SHAPE_FUNCTOR(reduce_amin,
ReduceAMinInferShapeFunctor,
PD_INFER_META(phi::ReduceInferMetaBase));
REGISTER_OPERATOR(
reduce_amin,
ops::ReduceBaseOp,
ReduceAMinOpMaker,
paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
ReduceAMinInferShapeFunctor);
REGISTER_OPERATOR(reduce_amin_grad, ops::ReduceGradOp)
...@@ -171,6 +171,24 @@ ...@@ -171,6 +171,24 @@
amax_grad : GetReduceGradExpectedKernelType amax_grad : GetReduceGradExpectedKernelType
manual_signature : [amax] manual_signature : [amax]
- op : amin (reduce_amin)
backward : amin_grad (reduce_amin_grad)
inputs :
x : X
outputs :
out : Out
attrs:
{ axis : dim, keepdim : keep_dim }
extra :
attrs : [bool use_mkldnn = false]
int_array:
axis :
data_type : int
support_tensor : true
get_expected_kernel_type :
amin_grad : GetReduceGradExpectedKernelType
manual_signature : [amin]
- op : angle - op : angle
backward : angle_grad backward : angle_grad
inputs : inputs :
...@@ -1909,11 +1927,6 @@ ...@@ -1909,11 +1927,6 @@
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : reduce_amin
backward : reduce_amin_grad
extra :
attrs : [bool use_mkldnn = false]
- op : reduce_any - op : reduce_any
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
......
...@@ -10,6 +10,16 @@ ...@@ -10,6 +10,16 @@
kernel : kernel :
func : amax_grad func : amax_grad
- backward_op : amin_grad
forward: amin (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={}, bool keepdim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : amin_grad
- backward_op : assign_grad - backward_op : assign_grad
forward : assign (Tensor x) -> Tensor(out) forward : assign (Tensor x) -> Tensor(out)
args : (Tensor out_grad) args : (Tensor out_grad)
......
...@@ -37,6 +37,17 @@ ...@@ -37,6 +37,17 @@
param : [x, axis, keepdim, reduce_all] param : [x, axis, keepdim, reduce_all]
backward : amax_grad backward : amax_grad
- op : amin
args : (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1)
output : Tensor(out)
infer_meta :
func : ReduceInferMeta
param : [x, axis, keepdim]
kernel :
func : amin_raw
param : [x, axis, keepdim, reduce_all]
backward : amin_grad
- op : arange - op : arange
args : (Tensor start, Tensor end, Tensor step) args : (Tensor start, Tensor end, Tensor step)
output : Tensor(out) output : Tensor(out)
......
...@@ -191,14 +191,6 @@ KernelSignature ReduceMinGradOpArgumentMapping( ...@@ -191,14 +191,6 @@ KernelSignature ReduceMinGradOpArgumentMapping(
{"X@GRAD"}); {"X@GRAD"});
} }
KernelSignature ReduceAMinGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature("amin_grad",
{"X", "Out", "Out@GRAD"},
{"dim", "keep_dim", "reduce_all"},
{"X@GRAD"});
}
KernelSignature ReduceProdGradOpArgumentMapping( KernelSignature ReduceProdGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) { const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature("prod_grad", return KernelSignature("prod_grad",
...@@ -224,7 +216,6 @@ PD_REGISTER_BASE_KERNEL_NAME(reduce_mean_grad, mean_grad); ...@@ -224,7 +216,6 @@ PD_REGISTER_BASE_KERNEL_NAME(reduce_mean_grad, mean_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_prod_grad, prod_grad); PD_REGISTER_BASE_KERNEL_NAME(reduce_prod_grad, prod_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_max_grad, max_grad); PD_REGISTER_BASE_KERNEL_NAME(reduce_max_grad, max_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_min_grad, min_grad); PD_REGISTER_BASE_KERNEL_NAME(reduce_min_grad, min_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_amin_grad, amin_grad);
PD_REGISTER_ARG_MAPPING_FN(reduce_sum, phi::ReduceSumOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(reduce_sum, phi::ReduceSumOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_mean, phi::ReduceMeanOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(reduce_mean, phi::ReduceMeanOpArgumentMapping);
...@@ -246,5 +237,3 @@ PD_REGISTER_ARG_MAPPING_FN(reduce_max_grad, ...@@ -246,5 +237,3 @@ PD_REGISTER_ARG_MAPPING_FN(reduce_max_grad,
phi::ReduceMaxGradOpArgumentMapping); phi::ReduceMaxGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_min_grad, PD_REGISTER_ARG_MAPPING_FN(reduce_min_grad,
phi::ReduceMinGradOpArgumentMapping); phi::ReduceMinGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_amin_grad,
phi::ReduceAMinGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册