未验证 提交 ac94b135 编写于 作者: R RedContritio 提交者: GitHub

support auto generate for static op reduce_prod (#54316)

上级 add77ccb
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/reduce_ops/reduce_prod_op.h"
#include "paddle/fluid/prim/api/composite_backward/composite_backward_api.h"
#include "paddle/fluid/prim/utils/static/composite_grad_desc_maker.h"
#include "paddle/fluid/prim/utils/static/desc_tensor.h"
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace framework {
class OpDesc;
} // namespace framework
namespace imperative {
class OpBase;
} // namespace imperative
} // namespace paddle
namespace paddle {
namespace operators {
class ReduceProdCompositeGradOpMaker : public prim::CompositeGradOpMakerBase {
public:
using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase;
void Apply() override {
// get inputs
paddle::Tensor x = this->GetSingleForwardInput("X");
paddle::Tensor out = this->GetSingleForwardOutput("Out");
paddle::Tensor out_grad = this->GetSingleOutputGrad("Out");
// get attr
std::vector<int> axis = this->Attr<std::vector<int>>("dim");
bool keep_dim = this->Attr<bool>("keep_dim");
bool reduce_all = this->Attr<bool>("reduce_all");
// get output
paddle::Tensor x_grad_t = this->GetSingleInputGrad("X");
// get output ptr
auto x_grad = this->GetOutputPtr(&x_grad_t);
// get output orginal name
std::string x_grad_name = this->GetOutputName(x_grad_t);
VLOG(6) << "Runing prod_grad composite func";
// call composite backward func
prim::prod_grad<prim::DescTensor>(
x, out, out_grad, axis, keep_dim, reduce_all, x_grad);
// recover output name
this->RecoverOutputName(x_grad_t, x_grad_name);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
class ReduceProdOpMaker : public ops::ReduceBaseOpMaker {
protected:
virtual std::string GetName() const { return "reduce_prod"; }
virtual std::string GetOpType() const { return "Reduce reduce_prod"; }
};
DECLARE_INFER_SHAPE_FUNCTOR(
reduce_prod,
ReduceProdInferShapeFunctor,
PD_INFER_META(phi::ReduceIntArrayAxisInferMetaBase));
REGISTER_OPERATOR(
reduce_prod,
ops::ReduceBaseOp,
ReduceProdOpMaker,
paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
ops::ReduceProdCompositeGradOpMaker,
ReduceProdInferShapeFunctor);
REGISTER_OPERATOR(reduce_prod_grad, ops::ReduceGradOp);
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/operators/reduce_ops/reduce_op.h"
namespace paddle {
namespace operators {
struct ProdGradFunctor {
template <typename DeviceContext,
typename X,
typename Y,
typename DX,
typename DY,
typename Dim>
void operator()(const DeviceContext& place,
X* x,
Y* y,
DX* dx,
DY* dy,
const Dim& dim,
int size) {
dx->device(place) = dy->broadcast(dim) * y->broadcast(dim) * x->inverse();
}
};
} // namespace operators
} // namespace paddle
......@@ -4,9 +4,8 @@
# Generally, the combination rules in this file do not need to be modified.
# If there are some redefined error in compiling with the source file which
# in combination rule, you can remove the source file from the following rules.
register_unity_group(cc reduce_all_op.cc reduce_any_op.cc reduce_prod_op.cc)
register_unity_group(cu reduce_all_op.cu reduce_any_op.cu reduce_prod_op.cu
reduce_prod_op.part.cu)
register_unity_group(cc reduce_all_op.cc reduce_any_op.cc)
register_unity_group(cu reduce_all_op.cu reduce_any_op.cu)
# The following groups are to make better use of `/MP` which MSVC's parallel
# compilation instruction when compiling in Unity Build.
register_unity_group(cu frobenius_norm_op.cu)
......
......@@ -2029,15 +2029,20 @@
backward : prod_grad (reduce_prod_grad)
inputs:
x : X
attrs:
{ dims : dim, keep_dim : keep_dim}
outputs:
out : Out
attrs:
{ dims : dim, keep_dim : keep_dim}
int_array:
dims :
data_type : int
support_tensor : true
extra :
attrs : [bool use_mkldnn = false]
get_expected_kernel_type :
prod : GetReduceExpectedKernelType
prod_grad : GetReduceGradExpectedKernelType
manual_signature : [prod]
- op : put_along_axis
backward : put_along_axis_grad
......
......@@ -222,6 +222,17 @@
func : pool3d_grad
param : [x, out, out_grad, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
- backward_op : prod_grad
forward : prod (Tensor x, IntArray dims={0}, bool keep_dim=false, bool reduce_all=false, int in_dtype=-1, DataType out_dtype=DataType::UNDEFINED) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray dims={0}, bool keep_dim=false, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : prod_grad
composite: prod_grad(x, out, out_grad, dims, keep_dim, reduce_all, x_grad)
- backward_op : relu6_grad
forward : relu6 (Tensor x, float threshold = 6.0f) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
......
......@@ -433,6 +433,18 @@
param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
backward : pool3d_grad
- op : prod
args : (Tensor x, IntArray dims={0}, bool keep_dim=false, bool reduce_all=false, int in_dtype=-1, DataType out_dtype=DataType::UNDEFINED)
output : Tensor(out)
infer_meta :
func : ReduceIntArrayAxisInferMetaBase
param : [x, dims, keep_dim, reduce_all, out_dtype]
kernel :
func : prod
param : [x, dims, keep_dim, reduce_all, out_dtype]
data_type : x
backward : prod_grad
- op : randint
args : (int low, int high, IntArray shape = {}, DataType dtype = DataType::INT64, int seed = 0)
output : Tensor(out)
......
......@@ -167,14 +167,6 @@ KernelSignature ReduceMeanGradOpArgumentMapping(
{"X@GRAD"});
}
KernelSignature ReduceProdGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature("prod_grad",
{"X", "Out", "Out@GRAD"},
{"dim", "keep_dim", "reduce_all"},
{"X@GRAD"});
}
} // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(reduce_sum, sum);
......@@ -188,7 +180,6 @@ PD_REGISTER_BASE_KERNEL_NAME(reduce_all, all);
PD_REGISTER_BASE_KERNEL_NAME(reduce_any, any);
PD_REGISTER_BASE_KERNEL_NAME(reduce_mean_grad, mean_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_prod_grad, prod_grad);
PD_REGISTER_ARG_MAPPING_FN(reduce_sum, phi::ReduceSumOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_mean, phi::ReduceMeanOpArgumentMapping);
......@@ -202,5 +193,3 @@ PD_REGISTER_ARG_MAPPING_FN(reduce_any, phi::ReduceAnyOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_mean_grad,
phi::ReduceMeanGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_prod_grad,
phi::ReduceProdGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册