diff --git a/paddle/fluid/operators/reduce_ops/reduce_min_max_op.h b/paddle/fluid/operators/reduce_ops/reduce_min_max_op.h deleted file mode 100644 index a458dd09f4aaa4761cb8dac31764b7ea7f7b8c97..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/reduce_min_max_op.h +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#pragma once - -#include "paddle/fluid/operators/reduce_ops/reduce_op.h" - -namespace paddle { -namespace operators { - -struct MaxFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { - y->device(place) = x->maximum(dim); - } -}; - -struct MinFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { - y->device(place) = x->minimum(dim); - } -}; - -struct MaxOrMinGradFunctor { - template - void operator()(const DeviceContext& place, - X* x, - Y* y, - DX* dx, - DY* dy, - const Dim& dim, - int size) { - auto equals = (*x) == y->broadcast(dim); - auto ones = dx->constant(1); - auto zeros = dx->constant(0); - // If there are multiple minimum or maximum elements, the subgradient of - // each is the set [0, 1], and we pass gradient to all of them here. - dx->device(place) = dy->broadcast(dim) * equals.select(ones, zeros); - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/reduce_ops/reduce_min_op.cc b/paddle/fluid/operators/reduce_ops/reduce_min_op.cc deleted file mode 100644 index d755d4607227c9befbf799a91f4b14e1b433e4f7..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/reduce_min_op.cc +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/operators/reduce_ops/reduce_min_max_op.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/unary.h" - -namespace ops = paddle::operators; - -class ReduceMinOpMaker : public ops::ReduceBaseOpMaker { - protected: - virtual std::string GetName() const { return "reduce_min"; } - virtual std::string GetOpType() const { return "Reduce reduce_min"; } -}; - -DECLARE_INFER_SHAPE_FUNCTOR( - reduce_min, - ReduceMinInferShapeFunctor, - PD_INFER_META(phi::ReduceIntArrayAxisInferMetaBase)); - -REGISTER_OPERATOR( - reduce_min, - ops::ReduceBaseOp, - ReduceMinOpMaker, - paddle::framework::DefaultGradOpMaker, - paddle::framework::DefaultGradOpMaker, - ReduceMinInferShapeFunctor); -REGISTER_OPERATOR(reduce_min_grad, ops::ReduceGradOp) diff --git a/paddle/fluid/operators/reduce_ops/unity_build_rule.cmake b/paddle/fluid/operators/reduce_ops/unity_build_rule.cmake index a2d74832ddcc7d0fe010487c5c6082c83929812c..e761142c4304c8898eb4f1c2de4cf90cf6becbd4 100644 --- a/paddle/fluid/operators/reduce_ops/unity_build_rule.cmake +++ b/paddle/fluid/operators/reduce_ops/unity_build_rule.cmake @@ -11,5 +11,3 @@ register_unity_group(cu reduce_all_op.cu reduce_any_op.cu reduce_prod_op.cu # compilation instruction when compiling in Unity Build. register_unity_group(cu frobenius_norm_op.cu) register_unity_group(cu logsumexp_op.cu) -register_unity_group(cu reduce_max_op.cu) -register_unity_group(cu reduce_min_op.cu) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 4917af23ac7f4a918c7aad8b64d6df91f9f7ba11..2c9e720aedcd36222badf5f2e832d4f10c541655 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -1692,6 +1692,25 @@ out : Out drop_empty_grad : [inputs_grad] +- op : min (reduce_min) + backward : min_grad (reduce_min_grad) + inputs: + x : X + outputs: + out : Out + attrs: + { axis : dim, keepdim : keep_dim} + extra : + attrs : [bool use_mkldnn = false] + int_array: + axis : + data_type : int + support_tensor : true + get_expected_kernel_type : + min : GetReduceExpectedKernelType + min_grad : GetReduceGradExpectedKernelType + manual_signature : [min] + - op : minimum (elementwise_min) backward : minimum_grad (elementwise_min_grad) extra : @@ -2008,11 +2027,6 @@ extra : attrs : [bool use_mkldnn = false, bool use_cudnn = false] -- op : reduce_min - backward : reduce_min_grad - extra : - attrs : [bool use_mkldnn = false] - - op : relu backward : relu_grad, relu_double_grad (relu_grad_grad) inputs : diff --git a/paddle/phi/api/yaml/static_backward.yaml b/paddle/phi/api/yaml/static_backward.yaml index 736dcc01722d9508c1f3c534a22ca7b0c8d730aa..97a0dd90103597e72885863817fb6e0a212d7713 100755 --- a/paddle/phi/api/yaml/static_backward.yaml +++ b/paddle/phi/api/yaml/static_backward.yaml @@ -123,6 +123,16 @@ func : max_grad composite: max_grad(x, out, out_grad, axis, keepdim, reduce_all, x_grad) +- backward_op : min_grad + forward: min (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) -> Tensor(out) + args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={}, bool keepdim=false, bool reduce_all=false) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + kernel : + func : min_grad + - backward_op : pool2d_double_grad forward : pool2d_grad(Tensor x, Tensor out, Tensor grad_out, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(grad_x) args : (Tensor grad_x_grad, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) diff --git a/paddle/phi/api/yaml/static_ops.yaml b/paddle/phi/api/yaml/static_ops.yaml index 27bffc64fe595387601cd3328e16c3ed48d57282..c85d4ddfb2d07dd356ec37d077265de261196762 100755 --- a/paddle/phi/api/yaml/static_ops.yaml +++ b/paddle/phi/api/yaml/static_ops.yaml @@ -309,6 +309,17 @@ param : [x, axis, keepdim, reduce_all] backward : max_grad +- op : min + args : (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) + output : Tensor(out) + infer_meta : + func : ReduceIntArrayAxisInferMetaBase + param : [x, axis, keepdim, reduce_all] + kernel : + func : min_raw + param : [x, axis, keepdim, reduce_all] + backward : min_grad + - op : not_equal args : (Tensor x, Tensor y, int axis = -1, bool force_cpu=false) output : Tensor(out) diff --git a/paddle/phi/ops/compat/reduce_sig.cc b/paddle/phi/ops/compat/reduce_sig.cc index 2529724e4a78829a2ae7fcfe0057c2dd1a96c198..17cfe13b85674af2b06657a0a9cec8dda967b7de 100644 --- a/paddle/phi/ops/compat/reduce_sig.cc +++ b/paddle/phi/ops/compat/reduce_sig.cc @@ -167,14 +167,6 @@ KernelSignature ReduceMeanGradOpArgumentMapping( {"X@GRAD"}); } -KernelSignature ReduceMinGradOpArgumentMapping( - const ArgumentMappingContext& ctx UNUSED) { - return KernelSignature("min_grad", - {"X", "Out", "Out@GRAD"}, - {"dim", "keep_dim", "reduce_all"}, - {"X@GRAD"}); -} - KernelSignature ReduceProdGradOpArgumentMapping( const ArgumentMappingContext& ctx UNUSED) { return KernelSignature("prod_grad", @@ -197,7 +189,6 @@ PD_REGISTER_BASE_KERNEL_NAME(reduce_any, any); PD_REGISTER_BASE_KERNEL_NAME(reduce_mean_grad, mean_grad); PD_REGISTER_BASE_KERNEL_NAME(reduce_prod_grad, prod_grad); -PD_REGISTER_BASE_KERNEL_NAME(reduce_min_grad, min_grad); PD_REGISTER_ARG_MAPPING_FN(reduce_sum, phi::ReduceSumOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(reduce_mean, phi::ReduceMeanOpArgumentMapping); @@ -213,5 +204,3 @@ PD_REGISTER_ARG_MAPPING_FN(reduce_mean_grad, phi::ReduceMeanGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(reduce_prod_grad, phi::ReduceProdGradOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(reduce_min_grad, - phi::ReduceMinGradOpArgumentMapping);