diff --git a/paddle/fluid/operators/fill_any_op.cc b/paddle/fluid/operators/fill_any_op.cc deleted file mode 100644 index 4e6929b44503870fac3f10474984d3e0c92b9647..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/fill_any_op.cc +++ /dev/null @@ -1,88 +0,0 @@ -/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/backward.h" -#include "paddle/phi/kernels/funcs/math_function.h" - -namespace paddle { -namespace operators { - -class FillAnyOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", "(Tensor) The input tensor."); - AddOutput("Out", "Tensor, the tensor filled with input value "); - AddAttr("value_float", "The float var to fill in Tensor") - .SetDefault(0); - AddAttr("value_int", "The int var to fill in Tensor").SetDefault(0); - AddComment(R"DOC(Fill operator with backward; - Fill an tensor with `value`. - )DOC"); - }; -}; - -class FillAnyOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; -}; - -class FillAnyGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; -}; - -template -class FillAnyGradOpMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr retv) const override { - retv->SetType(this->ForwardOpType() + "_grad"); - retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - retv->SetAttrMap(this->Attrs()); - } -}; - -DECLARE_INPLACE_OP_INFERER(FillAnyOpInplaceInferer, {"X", "Out"}); -DECLARE_INPLACE_OP_INFERER(FillAnyGradInplaceInferer, - {framework::GradVarName("Out"), - framework::GradVarName("X")}); -} // namespace operators -} // namespace paddle -namespace ops = paddle::operators; - -DECLARE_INFER_SHAPE_FUNCTOR(fill_any, - FillAnyInferShapeFunctor, - PD_INFER_META(phi::UnchangedInferMeta)); -DECLARE_INFER_SHAPE_FUNCTOR(fill_any_grad, - FillAnyGradInferShapeFunctor, - PD_INFER_META(phi::UnchangedInferMeta)); - -REGISTER_OPERATOR(fill_any, - ops::FillAnyOp, - ops::FillAnyOpMaker, - ops::FillAnyGradOpMaker, - ops::FillAnyGradOpMaker, - ops::FillAnyOpInplaceInferer, - FillAnyInferShapeFunctor); - -REGISTER_OPERATOR(fill_any_grad, - ops::FillAnyGradOp, - ops::FillAnyGradInplaceInferer, - FillAnyGradInferShapeFunctor); diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index 95fad7ca3d4e0885e4a9fb562ade518659d7aafd..43dc112ca79b09a5456e2f82ffa9d6b50ff2d18e 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -744,6 +744,17 @@ func : fill_diagonal_tensor_grad inplace : (out_grad -> x_grad) +- backward_op : fill_grad + forward : fill (Tensor x, Scalar value=0) -> Tensor(out) + args : (Tensor out_grad, Scalar value) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param : [out_grad] + kernel : + func : fill_grad + inplace : (out_grad -> x_grad) + - backward_op : flash_attn_grad forward : flash_attn (Tensor q, Tensor k, Tensor v, Tensor fixed_seed_offset, float dropout = 0.0, bool causal = false, bool return_softmax = false, bool is_test = false, str rng_name = "") -> Tensor(out), Tensor(softmax), Tensor(softmax_lse), Tensor(seed_offset) args : (Tensor q, Tensor k, Tensor v, Tensor out, Tensor softmax_lse, Tensor seed_offset, Tensor out_grad, float dropout = 0.0, bool causal = false) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index e4b4baca00c51570e35815a4721352226a4f6bf6..307ead7b599c811e2ad8e8d150207b3884b25952 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -282,17 +282,6 @@ func : UnchangedInferMeta invoke : zeros_like(out_grad) -- backward_op : fill_grad - forward : fill (Tensor x, Scalar value) -> Tensor(out) - args : (Tensor out_grad, Scalar value) - output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - param : [out_grad] - kernel : - func : fill_grad - inplace : (out_grad -> x_grad) - - backward_op : fmin_grad forward : fmin(Tensor x, Tensor y) -> Tensor(out) args : (Tensor x, Tensor y, Tensor out_grad) diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index 559370953b2f42304c67885dd8fdda2307751123..b817946691d2e5a80ed46a1b7d0d84a89cdd013f 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -342,17 +342,6 @@ data_type : dtype backend : place -- op : fill - args : (Tensor x, Scalar value) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : fill - inplace : (x -> out) - backward: fill_grad - - op : floor_divide args : (Tensor x, Tensor y) output : Tensor(out) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index d459cbc9a39218328ac63345dbae029396e050a9..75ad141814917168f473e93b5651279022620ffc 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -981,6 +981,17 @@ inputs: {x: X} outputs: {out: Out} +- op : fill (fill_any) + backward : fill_grad (fill_any_grad) + inputs : + x : X + outputs : + out : Out + scalar : + value : + data_type : float + support_tensor : true + - op : fill_diagonal backward : fill_diagonal_grad inputs : diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index a6dbdad1d3590aafb5ba9f987499100212af6ba6..130cd3785a16ffeb0aec2bee7039f8ed657b9ebd 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -801,6 +801,17 @@ func : fft_r2c backward : fft_r2c_grad +- op : fill + args : (Tensor x, Scalar value=0) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : fill + inplace : (x -> out) + backward: fill_grad + - op : fill_diagonal args : (Tensor x, float value=0, int offset=0, bool wrap=false) output : Tensor(out) diff --git a/paddle/phi/ops/compat/fill_sig.cc b/paddle/phi/ops/compat/fill_sig.cc deleted file mode 100644 index b8dff0faa4487ace47a0806c5cb726b7ecaf8bf1..0000000000000000000000000000000000000000 --- a/paddle/phi/ops/compat/fill_sig.cc +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { -KernelSignature FillOpArgumentMapping( - const ArgumentMappingContext& ctx UNUSED) { - return KernelSignature("fill", {"X"}, {"value_float"}, {"Out"}); -} - -KernelSignature FillGradOpArgumentMapping( - const ArgumentMappingContext& ctx UNUSED) { - return KernelSignature( - "fill_grad", {"Out@GRAD"}, {"value_float"}, {"X@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_BASE_KERNEL_NAME(fill_any, fill); -PD_REGISTER_BASE_KERNEL_NAME(fill_any_grad, fill_grad); - -PD_REGISTER_ARG_MAPPING_FN(fill_any, phi::FillOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(fill_any_grad, phi::FillGradOpArgumentMapping); diff --git a/test/legacy_test/test_fill_any_op.py b/test/legacy_test/test_fill_any_op.py index 0aad1b01be40cf4b141a74d14adc92b88376830f..2137cb1e22ea462bcf6ded4f81badd8f0d0812a9 100644 --- a/test/legacy_test/test_fill_any_op.py +++ b/test/legacy_test/test_fill_any_op.py @@ -20,10 +20,8 @@ from eager_op_test import OpTest import paddle -def fill_any_wrapper(x, value_float=0, value_int=0): - return paddle._legacy_C_ops.fill_any( - x, "value_float", value_float, "value_int", value_int - ) +def fill_any_wrapper(x, value=0): + return paddle._legacy_C_ops.fill_any(x, "value", value) class TestFillAnyOp(OpTest): @@ -34,10 +32,7 @@ class TestFillAnyOp(OpTest): self.value = 0.0 self.init() self.inputs = {'X': np.random.random((20, 30)).astype(self.dtype)} - self.attrs = { - 'value_float': float(self.value), - 'value_int': int(self.value), - } + self.attrs = {'value': float(self.value)} self.outputs = { 'Out': self.value * np.ones_like(self.inputs["X"]).astype(self.dtype) diff --git a/test/xpu/test_fill_any_op_xpu.py b/test/xpu/test_fill_any_op_xpu.py index e351d9dacd1a35497dcd93a6416c204e25e2a9ad..824accc91297aa978d7885ac76c9f8b129f38878 100644 --- a/test/xpu/test_fill_any_op_xpu.py +++ b/test/xpu/test_fill_any_op_xpu.py @@ -39,10 +39,7 @@ class XPUTestFillAnyOp(XPUOpTestWrapper): self.value = 0.0 self.init() self.inputs = {'X': np.random.random((20, 30)).astype(self.dtype)} - self.attrs = { - 'value_float': float(self.value), - 'value_int': int(self.value), - } + self.attrs = {'value': float(self.value)} self.outputs = { 'Out': self.value * np.ones_like(self.inputs["X"]).astype(self.dtype)