未验证 提交 1e34f9e1 编写于 作者: R RedContritio 提交者: GitHub

support auto generate for static op reduce_max (#54286)

上级 d015ecf5
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/operators/reduce_ops/reduce_min_max_op.h"
#include "paddle/fluid/prim/api/composite_backward/composite_backward_api.h"
#include "paddle/fluid/prim/utils/static/composite_grad_desc_maker.h"
#include "paddle/fluid/prim/utils/static/desc_tensor.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace ops = paddle::operators;
class ReduceMaxOpMaker : public ops::ReduceBaseOpMaker {
protected:
virtual std::string GetName() const { return "reduce_max"; }
virtual std::string GetOpType() const { return "Reduce reduce_max"; }
};
namespace paddle {
namespace operators {
class ReduceMaxCompositeGradOpMaker : public prim::CompositeGradOpMakerBase {
public:
using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase;
void Apply() override {
paddle::Tensor x = this->GetSingleForwardInput("X");
paddle::Tensor out = this->GetSingleForwardOutput("Out");
paddle::Tensor out_grad = this->GetSingleOutputGrad("Out");
std::vector<int> axis = this->Attr<std::vector<int>>("dim");
bool keep_dim = this->Attr<bool>("keep_dim");
bool reduce_all = this->Attr<bool>("reduce_all");
paddle::Tensor x_grad_t = this->GetSingleInputGrad("X");
paddle::Tensor* x_grad = this->GetOutputPtr(&x_grad_t);
std::string x_grad_name = this->GetOutputName(x_grad_t);
VLOG(6) << "Runing max_grad composite func";
prim::max_grad<prim::DescTensor>(
x, out, out_grad, axis, keep_dim, reduce_all, x_grad);
this->RecoverOutputName(x_grad_t, x_grad_name);
}
};
} // namespace operators
} // namespace paddle
DECLARE_INFER_SHAPE_FUNCTOR(
reduce_max,
ReduceMaxInferShapeFunctor,
PD_INFER_META(phi::ReduceIntArrayAxisInferMetaBase));
REGISTER_OPERATOR(
reduce_max,
ops::ReduceBaseOp,
ReduceMaxOpMaker,
paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
ops::ReduceMaxCompositeGradOpMaker,
ReduceMaxInferShapeFunctor);
REGISTER_OPERATOR(reduce_max_grad, ops::ReduceGradOp)
......@@ -1560,11 +1560,16 @@
{ axis : dim, keepdim : keep_dim}
outputs:
out : Out
extra :
attrs : [bool use_mkldnn = false]
int_array:
axis :
data_type : int
extra :
attrs : [bool use_mkldnn = false]
support_tensor : true
get_expected_kernel_type :
max : GetReduceExpectedKernelType
max_grad : GetReduceGradExpectedKernelType
manual_signature : [max]
- op : max_pool2d_with_index
inputs :
......
......@@ -75,6 +75,17 @@
param : [x, out_grad]
inplace : (out_grad -> x_grad)
- backward_op : max_grad
forward: max (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={}, bool keepdim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : max_grad
composite: max_grad(x, out, out_grad, axis, keepdim, reduce_all, x_grad)
- backward_op : relu6_grad
forward : relu6 (Tensor x, float threshold = 6.0f) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
......
......@@ -262,6 +262,17 @@
matrix_rank_tol {dense, dense -> dense}
data_type : x
- op : max
args : (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1)
output : Tensor(out)
infer_meta :
func : ReduceIntArrayAxisInferMetaBase
param : [x, axis, keepdim, reduce_all]
kernel :
func : max_raw
param : [x, axis, keepdim, reduce_all]
backward : max_grad
- op : not_equal
args : (Tensor x, Tensor y, int axis = -1, bool force_cpu=false)
output : Tensor(out)
......
......@@ -175,14 +175,6 @@ KernelSignature ReduceMeanGradOpArgumentMapping(
{"X@GRAD"});
}
KernelSignature ReduceMaxGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature("max_grad",
{"X", "Out", "Out@GRAD"},
{"dim", "keep_dim", "reduce_all"},
{"X@GRAD"});
}
KernelSignature ReduceMinGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature("min_grad",
......@@ -214,7 +206,6 @@ PD_REGISTER_BASE_KERNEL_NAME(reduce_any, any);
PD_REGISTER_BASE_KERNEL_NAME(reduce_sum_grad, sum_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_mean_grad, mean_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_prod_grad, prod_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_max_grad, max_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_min_grad, min_grad);
PD_REGISTER_ARG_MAPPING_FN(reduce_sum, phi::ReduceSumOpArgumentMapping);
......@@ -233,7 +224,5 @@ PD_REGISTER_ARG_MAPPING_FN(reduce_mean_grad,
phi::ReduceMeanGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_prod_grad,
phi::ReduceProdGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_max_grad,
phi::ReduceMaxGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_min_grad,
phi::ReduceMinGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册