未验证 提交 877948fa 编写于 作者: R RedContritio 提交者: GitHub

support auto generate for static op reduce_any (#54284)

* decouple reduce_any_op.h and reduce_op.h from reduce_any_op.cc

* support auto generate for static op reduce_any
上级 1e34f9e1
...@@ -109,6 +109,15 @@ phi::KernelKey GetReduceGradExpectedKernelType( ...@@ -109,6 +109,15 @@ phi::KernelKey GetReduceGradExpectedKernelType(
return phi::KernelKey(input_data_type, ctx.GetPlace()); return phi::KernelKey(input_data_type, ctx.GetPlace());
} }
phi::KernelKey GetReduceOpUseInputPlaceExpectedKernelType(
const framework::ExecutionContext& ctx,
const framework::OperatorWithKernel* op_ptr) {
phi::KernelKey kt = op_ptr->OperatorWithKernel::GetExpectedKernelType(ctx);
kt.set_backend(
phi::TransToPhiBackend(ctx.Input<phi::DenseTensor>("X")->place()));
return kt;
}
phi::KernelKey GetAssignExpectedKernelType( phi::KernelKey GetAssignExpectedKernelType(
const framework::ExecutionContext& ctx, const framework::ExecutionContext& ctx,
const framework::OperatorWithKernel* op_ptr) { const framework::OperatorWithKernel* op_ptr) {
......
...@@ -32,6 +32,10 @@ phi::KernelKey GetReduceGradExpectedKernelType( ...@@ -32,6 +32,10 @@ phi::KernelKey GetReduceGradExpectedKernelType(
const framework::ExecutionContext& ctx, const framework::ExecutionContext& ctx,
const framework::OperatorWithKernel* op_ptr); const framework::OperatorWithKernel* op_ptr);
phi::KernelKey GetReduceOpUseInputPlaceExpectedKernelType(
const framework::ExecutionContext& ctx,
const framework::OperatorWithKernel* op_ptr);
phi::KernelKey GetAssignExpectedKernelType( phi::KernelKey GetAssignExpectedKernelType(
const framework::ExecutionContext& ctx, const framework::ExecutionContext& ctx,
const framework::OperatorWithKernel* op_ptr); const framework::OperatorWithKernel* op_ptr);
......
// Copyright (c) 2018 PaddlePaddle Authors. Any Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/reduce_ops/reduce_any_op.h"
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace framework {
class OpDesc;
template <typename T>
class EmptyGradOpMaker;
} // namespace framework
namespace imperative {
class OpBase;
} // namespace imperative
} // namespace paddle
DECLARE_INFER_SHAPE_FUNCTOR(reduce_any,
ReduceAnyInferShapeFunctor,
PD_INFER_META(phi::ReduceInferMetaBase));
class ReduceAnyOpMaker : public ops::ReduceBaseOpMaker {
protected:
virtual std::string GetName() const { return "reduce_any"; }
virtual std::string GetOpType() const { return "Reduce reduce_any"; }
};
// kernel's device type is decided by input tensor place, to be consistent with
// compare and logical ops
REGISTER_OPERATOR(
reduce_any,
ops::ReduceOpUseInputPlace,
ReduceAnyOpMaker,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
ReduceAnyInferShapeFunctor);
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/operators/reduce_ops/reduce_op.h"
namespace paddle {
namespace operators {
struct AnyFunctor {
template <typename DeviceContext, typename X, typename Y, typename Dim>
void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) {
y->device(place) = x->any(dim);
}
};
} // namespace operators
} // namespace paddle
...@@ -198,6 +198,23 @@ ...@@ -198,6 +198,23 @@
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : any (reduce_any)
inputs :
x : X
outputs :
out : Out
attrs:
{ axis : dim, keepdim : keep_dim }
extra :
attrs : [bool use_mkldnn = false]
int_array:
axis :
data_type : int
support_tensor : true
get_expected_kernel_type :
any : GetReduceOpUseInputPlaceExpectedKernelType
manual_signature : [any]
- op : arange(range) - op : arange(range)
inputs : inputs :
{start : Start, end : End, step : Step} {start : Start, end : End, step : Step}
...@@ -1940,10 +1957,6 @@ ...@@ -1940,10 +1957,6 @@
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : reduce_any
extra :
attrs : [bool use_mkldnn = false]
- op : reduce_min - op : reduce_min
backward : reduce_min_grad backward : reduce_min_grad
extra : extra :
......
...@@ -48,6 +48,16 @@ ...@@ -48,6 +48,16 @@
param : [x, axis, keepdim, reduce_all] param : [x, axis, keepdim, reduce_all]
backward : amin_grad backward : amin_grad
- op : any
args : (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1)
output : Tensor(out)
infer_meta :
func : ReduceInferMetaBase
param : [x, axis, keepdim, reduce_all]
kernel :
func : any_raw
param : [x, axis, keepdim, reduce_all]
- op : arange - op : arange
args : (Tensor start, Tensor end, Tensor step) args : (Tensor start, Tensor end, Tensor step)
output : Tensor(out) output : Tensor(out)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册