未验证 提交 4277f61f 编写于 作者: H huangjiyi 提交者: GitHub

Support code generation for op fill_any (#54378)

* update

* update
上级 ded7d190
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
class FillAnyOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "(Tensor) The input tensor.");
AddOutput("Out", "Tensor, the tensor filled with input value ");
AddAttr<float>("value_float", "The float var to fill in Tensor")
.SetDefault(0);
AddAttr<int>("value_int", "The int var to fill in Tensor").SetDefault(0);
AddComment(R"DOC(Fill operator with backward;
Fill an tensor with `value`.
)DOC");
};
};
class FillAnyOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};
class FillAnyGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};
template <typename T>
class FillAnyGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> retv) const override {
retv->SetType(this->ForwardOpType() + "_grad");
retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
retv->SetAttrMap(this->Attrs());
}
};
DECLARE_INPLACE_OP_INFERER(FillAnyOpInplaceInferer, {"X", "Out"});
DECLARE_INPLACE_OP_INFERER(FillAnyGradInplaceInferer,
{framework::GradVarName("Out"),
framework::GradVarName("X")});
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(fill_any,
FillAnyInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(fill_any_grad,
FillAnyGradInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
REGISTER_OPERATOR(fill_any,
ops::FillAnyOp,
ops::FillAnyOpMaker,
ops::FillAnyGradOpMaker<paddle::framework::OpDesc>,
ops::FillAnyGradOpMaker<paddle::imperative::OpBase>,
ops::FillAnyOpInplaceInferer,
FillAnyInferShapeFunctor);
REGISTER_OPERATOR(fill_any_grad,
ops::FillAnyGradOp,
ops::FillAnyGradInplaceInferer,
FillAnyGradInferShapeFunctor);
......@@ -744,6 +744,17 @@
func : fill_diagonal_tensor_grad
inplace : (out_grad -> x_grad)
- backward_op : fill_grad
forward : fill (Tensor x, Scalar value=0) -> Tensor(out)
args : (Tensor out_grad, Scalar value)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out_grad]
kernel :
func : fill_grad
inplace : (out_grad -> x_grad)
- backward_op : flash_attn_grad
forward : flash_attn (Tensor q, Tensor k, Tensor v, Tensor fixed_seed_offset, float dropout = 0.0, bool causal = false, bool return_softmax = false, bool is_test = false, str rng_name = "") -> Tensor(out), Tensor(softmax), Tensor(softmax_lse), Tensor(seed_offset)
args : (Tensor q, Tensor k, Tensor v, Tensor out, Tensor softmax_lse, Tensor seed_offset, Tensor out_grad, float dropout = 0.0, bool causal = false)
......
......@@ -282,17 +282,6 @@
func : UnchangedInferMeta
invoke : zeros_like(out_grad)
- backward_op : fill_grad
forward : fill (Tensor x, Scalar value) -> Tensor(out)
args : (Tensor out_grad, Scalar value)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out_grad]
kernel :
func : fill_grad
inplace : (out_grad -> x_grad)
- backward_op : fmin_grad
forward : fmin(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
......
......@@ -342,17 +342,6 @@
data_type : dtype
backend : place
- op : fill
args : (Tensor x, Scalar value)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : fill
inplace : (x -> out)
backward: fill_grad
- op : floor_divide
args : (Tensor x, Tensor y)
output : Tensor(out)
......
......@@ -981,6 +981,17 @@
inputs: {x: X}
outputs: {out: Out}
- op : fill (fill_any)
backward : fill_grad (fill_any_grad)
inputs :
x : X
outputs :
out : Out
scalar :
value :
data_type : float
support_tensor : true
- op : fill_diagonal
backward : fill_diagonal_grad
inputs :
......
......@@ -801,6 +801,17 @@
func : fft_r2c
backward : fft_r2c_grad
- op : fill
args : (Tensor x, Scalar value=0)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : fill
inplace : (x -> out)
backward: fill_grad
- op : fill_diagonal
args : (Tensor x, float value=0, int offset=0, bool wrap=false)
output : Tensor(out)
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature FillOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature("fill", {"X"}, {"value_float"}, {"Out"});
}
KernelSignature FillGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature(
"fill_grad", {"Out@GRAD"}, {"value_float"}, {"X@GRAD"});
}
} // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(fill_any, fill);
PD_REGISTER_BASE_KERNEL_NAME(fill_any_grad, fill_grad);
PD_REGISTER_ARG_MAPPING_FN(fill_any, phi::FillOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(fill_any_grad, phi::FillGradOpArgumentMapping);
......@@ -20,10 +20,8 @@ from eager_op_test import OpTest
import paddle
def fill_any_wrapper(x, value_float=0, value_int=0):
return paddle._legacy_C_ops.fill_any(
x, "value_float", value_float, "value_int", value_int
)
def fill_any_wrapper(x, value=0):
return paddle._legacy_C_ops.fill_any(x, "value", value)
class TestFillAnyOp(OpTest):
......@@ -34,10 +32,7 @@ class TestFillAnyOp(OpTest):
self.value = 0.0
self.init()
self.inputs = {'X': np.random.random((20, 30)).astype(self.dtype)}
self.attrs = {
'value_float': float(self.value),
'value_int': int(self.value),
}
self.attrs = {'value': float(self.value)}
self.outputs = {
'Out': self.value
* np.ones_like(self.inputs["X"]).astype(self.dtype)
......
......@@ -39,10 +39,7 @@ class XPUTestFillAnyOp(XPUOpTestWrapper):
self.value = 0.0
self.init()
self.inputs = {'X': np.random.random((20, 30)).astype(self.dtype)}
self.attrs = {
'value_float': float(self.value),
'value_int': int(self.value),
}
self.attrs = {'value': float(self.value)}
self.outputs = {
'Out': self.value
* np.ones_like(self.inputs["X"]).astype(self.dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册