未验证 提交 397758fb 编写于 作者: R RedContritio 提交者: GitHub

support auto generate for static op reduce_min (#54315)

上级 bfbc2263
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/operators/reduce_ops/reduce_op.h"
namespace paddle {
namespace operators {
struct MaxFunctor {
template <typename DeviceContext, typename X, typename Y, typename Dim>
void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) {
y->device(place) = x->maximum(dim);
}
};
struct MinFunctor {
template <typename DeviceContext, typename X, typename Y, typename Dim>
void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) {
y->device(place) = x->minimum(dim);
}
};
struct MaxOrMinGradFunctor {
template <typename DeviceContext,
typename X,
typename Y,
typename DX,
typename DY,
typename Dim>
void operator()(const DeviceContext& place,
X* x,
Y* y,
DX* dx,
DY* dy,
const Dim& dim,
int size) {
auto equals = (*x) == y->broadcast(dim);
auto ones = dx->constant(1);
auto zeros = dx->constant(0);
// If there are multiple minimum or maximum elements, the subgradient of
// each is the set [0, 1], and we pass gradient to all of them here.
dx->device(place) = dy->broadcast(dim) * equals.select(ones, zeros);
}
};
} // namespace operators
} // namespace paddle
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/operators/reduce_ops/reduce_min_max_op.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace ops = paddle::operators;
class ReduceMinOpMaker : public ops::ReduceBaseOpMaker {
protected:
virtual std::string GetName() const { return "reduce_min"; }
virtual std::string GetOpType() const { return "Reduce reduce_min"; }
};
DECLARE_INFER_SHAPE_FUNCTOR(
reduce_min,
ReduceMinInferShapeFunctor,
PD_INFER_META(phi::ReduceIntArrayAxisInferMetaBase));
REGISTER_OPERATOR(
reduce_min,
ops::ReduceBaseOp,
ReduceMinOpMaker,
paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
ReduceMinInferShapeFunctor);
REGISTER_OPERATOR(reduce_min_grad, ops::ReduceGradOp)
......@@ -11,5 +11,3 @@ register_unity_group(cu reduce_all_op.cu reduce_any_op.cu reduce_prod_op.cu
# compilation instruction when compiling in Unity Build.
register_unity_group(cu frobenius_norm_op.cu)
register_unity_group(cu logsumexp_op.cu)
register_unity_group(cu reduce_max_op.cu)
register_unity_group(cu reduce_min_op.cu)
......@@ -1692,6 +1692,25 @@
out : Out
drop_empty_grad : [inputs_grad]
- op : min (reduce_min)
backward : min_grad (reduce_min_grad)
inputs:
x : X
outputs:
out : Out
attrs:
{ axis : dim, keepdim : keep_dim}
extra :
attrs : [bool use_mkldnn = false]
int_array:
axis :
data_type : int
support_tensor : true
get_expected_kernel_type :
min : GetReduceExpectedKernelType
min_grad : GetReduceGradExpectedKernelType
manual_signature : [min]
- op : minimum (elementwise_min)
backward : minimum_grad (elementwise_min_grad)
extra :
......@@ -2008,11 +2027,6 @@
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : reduce_min
backward : reduce_min_grad
extra :
attrs : [bool use_mkldnn = false]
- op : relu
backward : relu_grad, relu_double_grad (relu_grad_grad)
inputs :
......
......@@ -123,6 +123,16 @@
func : max_grad
composite: max_grad(x, out, out_grad, axis, keepdim, reduce_all, x_grad)
- backward_op : min_grad
forward: min (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={}, bool keepdim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : min_grad
- backward_op : pool2d_double_grad
forward : pool2d_grad(Tensor x, Tensor out, Tensor grad_out, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
......
......@@ -309,6 +309,17 @@
param : [x, axis, keepdim, reduce_all]
backward : max_grad
- op : min
args : (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1)
output : Tensor(out)
infer_meta :
func : ReduceIntArrayAxisInferMetaBase
param : [x, axis, keepdim, reduce_all]
kernel :
func : min_raw
param : [x, axis, keepdim, reduce_all]
backward : min_grad
- op : not_equal
args : (Tensor x, Tensor y, int axis = -1, bool force_cpu=false)
output : Tensor(out)
......
......@@ -167,14 +167,6 @@ KernelSignature ReduceMeanGradOpArgumentMapping(
{"X@GRAD"});
}
KernelSignature ReduceMinGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature("min_grad",
{"X", "Out", "Out@GRAD"},
{"dim", "keep_dim", "reduce_all"},
{"X@GRAD"});
}
KernelSignature ReduceProdGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature("prod_grad",
......@@ -197,7 +189,6 @@ PD_REGISTER_BASE_KERNEL_NAME(reduce_any, any);
PD_REGISTER_BASE_KERNEL_NAME(reduce_mean_grad, mean_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_prod_grad, prod_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_min_grad, min_grad);
PD_REGISTER_ARG_MAPPING_FN(reduce_sum, phi::ReduceSumOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_mean, phi::ReduceMeanOpArgumentMapping);
......@@ -213,5 +204,3 @@ PD_REGISTER_ARG_MAPPING_FN(reduce_mean_grad,
phi::ReduceMeanGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_prod_grad,
phi::ReduceProdGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_min_grad,
phi::ReduceMinGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册