diff --git a/paddle/fluid/operators/cum_op.cc b/paddle/fluid/operators/cum_op.cc deleted file mode 100644 index a886e0dbbe99b5ebff74f7dcea066e4956bdb07e..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/cum_op.cc +++ /dev/null @@ -1,152 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/op_version_registry.h" -#include "paddle/fluid/prim/api/composite_backward/composite_backward_api.h" -#include "paddle/fluid/prim/utils/static/composite_grad_desc_maker.h" -#include "paddle/fluid/prim/utils/static/desc_tensor.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/unary.h" - -namespace paddle { -namespace operators { - -class CumOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - auto input_data_type = - framework::OperatorWithKernel::IndicateVarDataType(ctx, "X"); - return phi::KernelKey(input_data_type, ctx.GetPlace()); - } -}; - -class CumGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "cumsum"); - OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), - "Input", - "Out@GRAD", - "cumsum"); - ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); - } - - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - auto input_data_type = - framework::OperatorWithKernel::IndicateVarDataType(ctx, "X"); - return phi::KernelKey(input_data_type, ctx.GetPlace()); - } -}; - -class CumsumOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", "Input of cumsum operator"); - AddOutput("Out", "Output of cumsum operator"); - AddAttr("axis", - "The dimension to accumulate along. -1 means the last " - "dimension [default -1].") - .SetDefault(-1) - .SupportTensor(); - AddAttr("flatten", - "Whether to compute the cumsum over the flattened array. " - "[default false].") - .SetDefault(false); - AddAttr("exclusive", - "Whether to perform exclusive cumsum. [default false].") - .SetDefault(false); - AddAttr("reverse", - "If true, the cumsum is performed in the reversed direction. " - "[default false].") - .SetDefault(false); - AddComment(R"DOC( -The cumulative sum of the elements along a given axis. -By default, the first element of the result is the same of the first element of -the input. If exclusive is true, the first element of the result is 0. -)DOC"); - } -}; - -template -class CumsumGradMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr grad_op) const override { - grad_op->SetType("cumsum_grad"); - grad_op->SetInput("X", this->Input("X")); - grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - grad_op->SetAttrMap(this->Attrs()); - grad_op->SetAttr("reverse", - PADDLE_GET_CONST(bool, this->GetAttr("reverse"))); - } -}; - -class CumsumCompositeGradOpMaker : public prim::CompositeGradOpMakerBase { - using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; - - public: - void Apply() override { - paddle::Tensor x = this->GetSingleForwardInput("X"); - paddle::Tensor out_grad = this->GetSingleOutputGrad("Out"); - paddle::Tensor dx = this->GetSingleInputGrad("X"); - auto* dx_ptr = this->GetOutputPtr(&dx); - std::string dx_name = this->GetOutputName(dx); - int axis = static_cast(this->Attr("axis")); - bool flatten = static_cast(this->Attr("flatten")); - bool exclusive = static_cast(this->Attr("exclusive")); - bool reverse = static_cast(this->Attr("reverse")); - VLOG(6) << "Runing cumsum composite func"; - prim::cumsum_grad( - x, out_grad, axis, flatten, exclusive, reverse, dx_ptr); - this->RecoverOutputName(dx, dx_name); - } -}; -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -using CPU = phi::CPUContext; -DECLARE_INFER_SHAPE_FUNCTOR(cumsum, - CumsumInferShapeFunctor, - PD_INFER_META(phi::CumScalarAxisInferMeta)); - -REGISTER_OPERATOR(cumsum, - ops::CumOp, - ops::CumsumOpMaker, - ops::CumsumCompositeGradOpMaker, - ops::CumsumGradMaker, - ops::CumsumGradMaker, - CumsumInferShapeFunctor); -REGISTER_OPERATOR(cumsum_grad, ops::CumGradOp); - -REGISTER_OP_VERSION(cumsum).AddCheckpoint( - R"ROC( - Upgrade cumsum add a new attribute [flatten]. - )ROC", - paddle::framework::compatible::OpVersionDesc().NewAttr( - "flatten", - "In order to compute the cumsum over the flattened array when the " - "argument `axis` in python API is None.", - false)); diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index f52dd4fd9f2b689ae1d99d712abcf235f3515c43..15e91ac951883e9832cb201b34b0a817f52c85aa 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -493,6 +493,18 @@ kernel : func : cumprod_grad +- backward_op : cumsum_grad + forward : cumsum(Tensor x, Scalar axis=-1, bool flatten=false, bool exclusive=false, bool reverse=false) -> Tensor(out) + args : (Tensor x, Tensor out_grad, Scalar axis, bool flatten, bool exclusive, bool reverse) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + kernel : + func : cumsum_grad + data_type: x + composite: cumsum_grad(x, out_grad, axis, flatten, exclusive, reverse, x_grad) + - backward_op : depthwise_conv2d_double_grad forward : depthwise_conv2d_grad (Tensor input, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(grad_input), Tensor(grad_filter) args : (Tensor input, Tensor filter, Tensor grad_out, Tensor grad_input_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index d2c435c8d9f1d4675a9a7e7c0a6f6c2d81177475..57ea49d1c5c092865fac338be090df64a370712f 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -159,18 +159,6 @@ data_type : x backward : conv2d_transpose_double_grad -- backward_op : cumsum_grad - forward : cumsum(Tensor x, Scalar axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out) - args : (Tensor x, Tensor out_grad, Scalar axis, bool flatten, bool exclusive, bool reverse) - output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - param: [x] - kernel : - func : cumsum_grad - data_type: x - composite: cumsum_grad(x, out_grad, axis, flatten, exclusive, reverse, x_grad) - - backward_op : deformable_conv_grad forward : deformable_conv(Tensor x, Tensor offset, Tensor filter, Tensor mask, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step) -> Tensor(out) args : (Tensor x, Tensor offset, Tensor filter, Tensor mask, Tensor out_grad, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step) diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index 8052e0cd47d6d5175c4f05be9872dfa3c4812d2c..4c46d86b065f6c4ec6d4bd67da30d507de1a0f9a 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -166,15 +166,6 @@ output : Tensor(out) invoke : copy_to_impl(x, place, blocking) -- op : cumsum - args : (Tensor x, Scalar axis, bool flatten, bool exclusive, bool reverse) - output : Tensor(out) - infer_meta : - func : CumScalarAxisInferMeta - kernel : - func : cumsum - backward : cumsum_grad - - op : decode_jpeg args : (Tensor x, str mode, Place place) output : Tensor(out) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index ea243108c3078f57b7991aa9a227c066a062cd9b..927cb03e159c3390b578125a53bf5a65376882fb 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -663,7 +663,7 @@ scalar: axis: data_type : int - tensor_name: AxisTensor + support_tensor : true - op : data_norm backward : data_norm_grad diff --git a/paddle/phi/api/yaml/op_version.yaml b/paddle/phi/api/yaml/op_version.yaml index 48e13add28e35f31324591574ae595e5665d7eb4..db97ab126d7205480a3f56589aa661e2e00b8ea0 100644 --- a/paddle/phi/api/yaml/op_version.yaml +++ b/paddle/phi/api/yaml/op_version.yaml @@ -140,6 +140,14 @@ comment : In order to add additional size to one side of each dimension in the output. default : "std::vector{}" +- op : cumsum + version : + - checkpoint : Upgrade cumsum add a new attribute [flatten]. + action : + - add_attr : flatten + comment : In order to compute the cumsum over the flattened array when the argument `axis` in python API is None. + default : "false" + - op : depthwise_conv2d version : - checkpoint : Upgrade depthwise_conv2d, add a new attribute [use_addto]. diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index 5817122a8cb08430960a92734bb7120903b0cc91..e09f2168d6d7ef8c39bc6ae24e100b39850a4e0d 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -588,6 +588,16 @@ func : cumprod backward : cumprod_grad +- op : cumsum + args : (Tensor x, Scalar axis=-1, bool flatten=false, bool exclusive=false, bool reverse=false) + output : Tensor(out) + infer_meta : + func : CumScalarAxisInferMeta + kernel : + func : cumsum + data_type : x + backward : cumsum_grad + - op : depthwise_conv2d args : (Tensor input, Tensor filter, int[] strides={1, 1}, int[] paddings={0, 0}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW") output : Tensor(out) diff --git a/paddle/phi/ops/compat/cumsum_sig.cc b/paddle/phi/ops/compat/cumsum_sig.cc deleted file mode 100644 index c8fbcdec9026b6deb4445206fc8b14ff36490e9c..0000000000000000000000000000000000000000 --- a/paddle/phi/ops/compat/cumsum_sig.cc +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature CumsumOpArgumentMapping( - const ArgumentMappingContext& ctx UNUSED) { - return KernelSignature("cumsum_grad", - {"X", "Out@GRAD"}, - {"axis", "flatten", "exclusive", "reverse"}, - {"X@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(cumsum_grad, phi::CumsumOpArgumentMapping);