From 31f3f643668ac98afe76bcf9e95b752c4b872c29 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Fri, 11 Nov 2022 13:26:46 +0800 Subject: [PATCH] Generate static graph code for some ops by yaml (part3) (#47803) * generate static graph code for some ops by yaml * remove deleted files * update cmake * update cmake * udpate cmake --- paddle/fluid/framework/ir/CMakeLists.txt | 4 +- paddle/fluid/operators/CMakeLists.txt | 4 - .../operators/fill_diagonal_tensor_op.cc | 127 ---------- paddle/fluid/operators/fold_op.cc | 130 ----------- paddle/fluid/operators/gather_tree_op.cc | 71 ------ paddle/fluid/operators/gelu_op.cc | 127 ---------- paddle/fluid/operators/gumbel_softmax_op.cc | 108 --------- paddle/fluid/operators/unfold_op.cc | 136 ----------- paddle/phi/api/yaml/backward.yaml | 53 +++++ paddle/phi/api/yaml/legacy_backward.yaml | 52 ----- paddle/phi/api/yaml/legacy_ops.yaml | 217 +++++++----------- paddle/phi/api/yaml/op_compat.yaml | 34 +++ paddle/phi/api/yaml/ops.yaml | 56 +++++ .../ops/compat/fill_diagonal_tensor_sig.cc | 38 --- paddle/phi/ops/compat/fold_sig.cc | 26 --- paddle/phi/ops/compat/gelu_sig.cc | 31 --- paddle/phi/ops/compat/gumbel_softmax_sig.cc | 35 --- paddle/phi/ops/compat/unfold_sig.cc | 28 --- 18 files changed, 226 insertions(+), 1051 deletions(-) delete mode 100644 paddle/fluid/operators/fill_diagonal_tensor_op.cc delete mode 100644 paddle/fluid/operators/fold_op.cc delete mode 100644 paddle/fluid/operators/gather_tree_op.cc delete mode 100644 paddle/fluid/operators/gelu_op.cc delete mode 100644 paddle/fluid/operators/gumbel_softmax_op.cc delete mode 100644 paddle/fluid/operators/unfold_op.cc delete mode 100644 paddle/phi/ops/compat/fill_diagonal_tensor_sig.cc delete mode 100644 paddle/phi/ops/compat/fold_sig.cc delete mode 100644 paddle/phi/ops/compat/gelu_sig.cc delete mode 100644 paddle/phi/ops/compat/gumbel_softmax_sig.cc delete mode 100644 paddle/phi/ops/compat/unfold_sig.cc diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index bafdd26cf5..f9359d0b58 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -153,7 +153,7 @@ if(WITH_MKLDNN) mkldnn_placement_pass op_registry elementwise_add_op - gelu_op + generated_op activation_op softmax_op softmax @@ -418,7 +418,7 @@ if(WITH_MKLDNN) im2col vol2col batch_norm_op - gelu_op + generated_op activation_op elementwise_add_op concat_and_split diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 4024c7d4a2..5293604160 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -248,10 +248,6 @@ if(WITH_UNITY_BUILD) target_link_libraries(paddle_operators_unity ${OP_HEADER_DEPS} ${COMMON_OP_DEPS}) endif() -if(WITH_ASCEND_CL) -cc_test(gelu_op_npu_test SRCS gelu_op_npu_test.cc DEPS op_registry gelu_op scope device_context enforce executor) -endif() - if (WITH_GPU OR WITH_ASCEND_CL) cc_test(copy_cross_scope_test SRCS copy_cross_scope_test.cc DEPS op_registry copy_cross_scope_op scope device_context enforce executor) endif() diff --git a/paddle/fluid/operators/fill_diagonal_tensor_op.cc b/paddle/fluid/operators/fill_diagonal_tensor_op.cc deleted file mode 100644 index 5a7f56cbfd..0000000000 --- a/paddle/fluid/operators/fill_diagonal_tensor_op.cc +++ /dev/null @@ -1,127 +0,0 @@ -/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/infermeta/backward.h" -#include "paddle/phi/infermeta/binary.h" - -namespace paddle { -namespace operators { - -class FillDiagonalTensorOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddComment(R"DOC(Fill replace operator - Fill the diagonal of an tensor with `Y` Tensor. - )DOC"); - AddInput("X", "(Tensor) The input tensor."); - AddInput("Y", "(Tensor) The input tensor to fill in."); - AddOutput("Out", - "Tensor, the output tensor, with the same shape and data type " - "as input(x)"); - AddAttr("dim1", "the first dim to figure out the diagonal") - .SetDefault(0); - AddAttr("dim2", "the second dim to figure out the diagonal") - .SetDefault(1); - AddAttr("offset", - "offset of diagonal, zero means no offset, positive means " - "offset to up-right corner; negtive means offset to " - "bottom-left corner") - .SetDefault(0); - } -}; - -class FillDiagonalTensorOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext &ctx) const override { - return framework::OpKernelType( - OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace()); - } -}; - -class FillDiagonalTensorOpVarTypeInference - : public framework::VarTypeInference { - public: - void operator()(framework::InferVarTypeContext *ctx) const override { - auto var_type = ctx->GetInputType("X", 0); - auto data_type = ctx->GetInputDataType("X", 0); - ctx->SetOutputType("Out", var_type, framework::ALL_ELEMENTS); - ctx->SetOutputDataType("Out", data_type, framework::ALL_ELEMENTS); - } -}; - -class FillDiagonalTensorGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext &ctx) const override { - // Note: don't get data type from ctx.Input("Input"); - auto dtype = - ctx.Input(framework::GradVarName("Out"))->type(); - return framework::OpKernelType(framework::TransToProtoVarType(dtype), - ctx.GetPlace()); - } -}; - -template -class FillDiagonalTensorGradOpMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr retv) const override { - retv->SetType("fill_diagonal_tensor_grad"); - retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - retv->SetAttrMap(this->Attrs()); - } -}; - -DECLARE_INPLACE_OP_INFERER(FillDiagonalTensorOpInplaceInferer, {"X", "Out"}); -DECLARE_INPLACE_OP_INFERER(FillDiagonalTensorGradOpInplaceInferer, - {framework::GradVarName("Out"), - framework::GradVarName("X")}); - -} // namespace operators -} // namespace paddle -namespace ops = paddle::operators; - -DECLARE_INFER_SHAPE_FUNCTOR(fill_diagonal_tensor, - FillDiagonalTensorInferShapeFunctor, - PD_INFER_META(phi::FillDiagonalTensorInferMeta)); -DECLARE_INFER_SHAPE_FUNCTOR( - fill_diagonal_tensor_grad, - FillDiagonalTensorGradInferShapeFunctor, - PD_INFER_META(phi::FillDiagonalTensorGradInferMeta)); - -REGISTER_OPERATOR( - fill_diagonal_tensor, - ops::FillDiagonalTensorOp, - ops::FillDiagonalTensorGradOpMaker, - ops::FillDiagonalTensorGradOpMaker, - ops::FillDiagonalTensorOpMaker, - ops::FillDiagonalTensorOpInplaceInferer, - ops::FillDiagonalTensorOpVarTypeInference, - FillDiagonalTensorInferShapeFunctor); - -REGISTER_OPERATOR(fill_diagonal_tensor_grad, - ops::FillDiagonalTensorGradOp, - ops::FillDiagonalTensorGradOpInplaceInferer, - FillDiagonalTensorGradInferShapeFunctor); diff --git a/paddle/fluid/operators/fold_op.cc b/paddle/fluid/operators/fold_op.cc deleted file mode 100644 index 1c4127b6fb..0000000000 --- a/paddle/fluid/operators/fold_op.cc +++ /dev/null @@ -1,130 +0,0 @@ -/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ - -#include -#include - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/infermeta/backward.h" -#include "paddle/phi/infermeta/unary.h" - -namespace paddle { -namespace operators { - -class FoldOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - OperatorWithKernel::IndicateVarDataType(ctx, "X"), - ctx.device_context()); - } -}; - -class FoldOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", - "Tensor, " - "the input of fold op. " - "The format of X is [N, C_in, L], " - "where N is the batch size, C_in is the input channels, " - "L is the length"); - AddOutput("Y", - "Tensor, " - "the output of unfold op. " - "The format of Y is [N, C_out, output_height, output_width], " - "where N is the batch size, " - "C_in is the output channels of Y, output_height and " - "output_width " - "is the calculated height and width of output feature map."); - AddAttr>( - "output_sizes", - "vector, the output sizes of the convolution operator."); - AddAttr>( - "kernel_sizes", - "vector, the kernel sizes of the convolution operator."); - AddAttr>( - "strides", "vector, the strides of the convolution operator."); - AddAttr>( - "paddings", - "vector, the paddings applied to pad the feature map."); - AddAttr>( - "dilations", "vector, the dilations of the convolution operator."); - AddComment(R"DOC( -**Fold Operator** - -This Operator is used to combines an array of sliding local blocks into a large containing -tensor. also known as col2im when operated on batched 2D image tensor. Fold calculates each -combined value in the resulting large tensor by summing all values from all containing blocks. -Unfold extracts the values in the local blocks by copying from the large tensor. So, if the -blocks overlap, they are not inverses of each other. - )DOC"); - } -}; - -class FoldGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType( - ctx, framework::GradVarName("Y")), - ctx.device_context()); - } -}; - -template -class FoldGradMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("fold_grad"); - op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y")); - op->SetInput("X", this->Input("X")); - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - op->SetAttrMap(this->Attrs()); - } -}; - -DECLARE_NO_NEED_BUFFER_VARS_INFERER(FoldGradOpNoNeedBufferVarsInferer, "X"); - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -DECLARE_INFER_SHAPE_FUNCTOR(fold, - FoldInferShapeFunctor, - PD_INFER_META(phi::FoldInferMeta)); -REGISTER_OPERATOR(fold, - ops::FoldOp, - ops::FoldOpMaker, - ops::FoldGradMaker, - ops::FoldGradMaker, - FoldInferShapeFunctor); -DECLARE_INFER_SHAPE_FUNCTOR(fold_grad, - FoldGradInferShapeFunctor, - PD_INFER_META(phi::UnchangedInferMeta)); -REGISTER_OPERATOR(fold_grad, - ops::FoldGradOp, - ops::FoldGradOpNoNeedBufferVarsInferer, - FoldGradInferShapeFunctor); diff --git a/paddle/fluid/operators/gather_tree_op.cc b/paddle/fluid/operators/gather_tree_op.cc deleted file mode 100644 index 50de5835b3..0000000000 --- a/paddle/fluid/operators/gather_tree_op.cc +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/binary.h" - -namespace paddle { -namespace operators { - -class GatherTreeOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - OperatorWithKernel::IndicateVarDataType(ctx, "Ids"), - ctx.device_context()); - } -}; - -class GatherTreeOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("Ids", - "The Tensor with shape [length, batch_size, beam_size] containing " - "the selected ids of all time steps."); - AddInput("Parents", - "The Tensor has the same shape as Ids and contains the parents " - "corresponding to selected ids when searching among beams."); - AddOutput( - "Out", - "A Tensor with shape [length, batch_size, beam_size] containing the " - "full sequences. The sequences is collected by backtracing from the " - "last time step of Ids."); - AddComment(R"DOC( -GatherTree Operator. - -Backtrace from the last time step and generate the full sequences by collecting beam search -selected ids. - -)DOC"); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -DECLARE_INFER_SHAPE_FUNCTOR(gather_tree, - GatherTreeInferShapeFunctor, - PD_INFER_META(phi::GatherTreeMeta)); - -REGISTER_OPERATOR(gather_tree, - ops::GatherTreeOp, - ops::GatherTreeOpMaker, - GatherTreeInferShapeFunctor); diff --git a/paddle/fluid/operators/gelu_op.cc b/paddle/fluid/operators/gelu_op.cc deleted file mode 100644 index eb3c557116..0000000000 --- a/paddle/fluid/operators/gelu_op.cc +++ /dev/null @@ -1,127 +0,0 @@ -/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/operator.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/unary.h" - -namespace paddle { -namespace operators { - -class GeluOp : public framework::OperatorWithKernel { - public: - GeluOp(const std::string &type, - const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : OperatorWithKernel(type, inputs, outputs, attrs) {} - - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext &ctx) const override { - auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); - return framework::OpKernelType(data_type, ctx.GetPlace()); - } -}; - -class GeluGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_EQ( - ctx->HasInput(framework::GradVarName("Out")), - true, - platform::errors::InvalidArgument( - "Input(%s) of GeluGradOp should not be null.", "DOut")); - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), - true, - platform::errors::InvalidArgument( - "Input(%s) of GeluGradOp should not be null.", "X")); - PADDLE_ENFORCE_EQ( - ctx->HasOutput(framework::GradVarName("X")), - true, - platform::errors::InvalidArgument( - "Output(%s) of GeluGradOp should not be null.", "DX")); - auto x_grad_name = framework::GradVarName("X"); - ctx->SetOutputDim(x_grad_name, ctx->GetInputDim("X")); - ctx->ShareLoD("X", /*->*/ x_grad_name); - } - - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext &ctx) const override { - auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); - return framework::OpKernelType(data_type, ctx.GetPlace()); - } -}; - -class GeluOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", "Input of Gelu operator"); - AddOutput("Out", "Output of Gelu operator"); - AddAttr("approximate", - "(bool, default false) use approximation of gelu") - .SetDefault(false); - AddComment(R"DOC( -Gelu Activation Operator. - -For more details, please refer to [Gaussian Error Linear Units](https://arxiv.org/pdf/1606.08415.pdf). - -when using approximation -$out = \\frac{1}{2}x(1+tanh(\\sqrt{\\frac{2}{\\pi}}(x+0.044715x^{3}))$ - -or else -$out = \\frac{1 + erf(\\frac{x}{\\sqrt{2}})}{2} x$ - -)DOC"); - } -}; - -template -class GeluGradOpMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr grad_op) const override { - grad_op->SetType("gelu_grad"); - grad_op->SetInput("X", this->Input("X")); - grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - grad_op->SetAttrMap(this->Attrs()); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; - -DECLARE_INFER_SHAPE_FUNCTOR(gelu, - GeluInferShapeFunctor, - PD_INFER_META(phi::UnchangedInferMeta)); -REGISTER_OPERATOR(gelu, - ops::GeluOp, - ops::GeluOpMaker, - ops::GeluGradOpMaker, - ops::GeluGradOpMaker, - GeluInferShapeFunctor); -REGISTER_OPERATOR(gelu_grad, ops::GeluGradOp); diff --git a/paddle/fluid/operators/gumbel_softmax_op.cc b/paddle/fluid/operators/gumbel_softmax_op.cc deleted file mode 100644 index 345a8bdd07..0000000000 --- a/paddle/fluid/operators/gumbel_softmax_op.cc +++ /dev/null @@ -1,108 +0,0 @@ -/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/backward.h" -#include "paddle/phi/infermeta/unary.h" - -namespace paddle { -namespace operators { -class GumbelSoftmaxOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - OperatorWithKernel::IndicateVarDataType(ctx, "X"), - ctx.device_context()); - } -}; - -class GumbelSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", - "(Tensor) An N-D Tensor, N >= 1," - "The first N - 1 dimensions index into a batch of independent " - "distributions " - "and the last dimension represents a vector of probabilities for " - "each class."); - AddOutput("Out", "The sampled tensor with the same shape as X."); - AddAttr("temperature", - "(float, default 1.0) non-negative scalar temperature.") - .SetDefault(1.0); - AddAttr( - "hard", - "(bool, default false) " - "if True, the returned samples will be discretized as one-hot vectors, " - "but will be differentiated as if it is the soft sample in autograd.") - .SetDefault(false); - AddAttr("axis", - "(int, default -1)" - "The dimension index of Input(x) to perform gumbel_softmax.") - .SetDefault(-1); - AddComment(R"DOC( -GumbelSoftmax Operator. - -Samples from the Gumbel-Softmax distribution and optionally discretizes. - -)DOC"); - } -}; - -class GumbelSoftmaxGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; -}; - -template -class GumbelSoftmaxGradOpMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("gumbel_softmax_grad"); - op->SetInput("Out", this->Output("Out")); - op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - op->SetAttrMap(this->Attrs()); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; - -DECLARE_INFER_SHAPE_FUNCTOR(gumbel_softmax, - GumbelSoftmaxInferShapeFunctor, - PD_INFER_META(phi::GumbelSoftmaxInferMeta)); -DECLARE_INFER_SHAPE_FUNCTOR(gumbel_softmax_grad, - GumbelSoftmaxGradInferShapeFunctor, - PD_INFER_META(phi::GumbelSoftmaxGradInferMeta)); - -REGISTER_OPERATOR(gumbel_softmax, - ops::GumbelSoftmaxOp, - ops::GumbelSoftmaxOpMaker, - ops::GumbelSoftmaxGradOpMaker, - ops::GumbelSoftmaxGradOpMaker, - GumbelSoftmaxInferShapeFunctor); -REGISTER_OPERATOR(gumbel_softmax_grad, - ops::GumbelSoftmaxGradOp, - GumbelSoftmaxGradInferShapeFunctor); diff --git a/paddle/fluid/operators/unfold_op.cc b/paddle/fluid/operators/unfold_op.cc deleted file mode 100644 index b8de9df202..0000000000 --- a/paddle/fluid/operators/unfold_op.cc +++ /dev/null @@ -1,136 +0,0 @@ -/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/infermeta/unary.h" - -namespace paddle { -namespace operators { - -class UnfoldOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", - "Tensor, " - "the input of unfold op. " - "The format of X is [N, C_in, H, W], " - "where N is the batch size, C_in is the input channels, " - "H is the height and W is the width"); - AddOutput( - "Y", - "Tensor, " - "the output of unfold op. " - "The format of Y is [N, C_in*filter_height*filter_width, " - "output_height*output_width], where N is the batch size, " - "C_in is the input channels of X, filter_height and filter_width is " - "height and width of the filtering kernel, output_height and " - "output_width " - "is the calculated height and width of output feature map."); - AddAttr>( - "kernel_sizes", - "vector, the kernel sizes of the convolution operator."); - AddAttr>( - "strides", "vector, the strides of the convolution operator."); - AddAttr>( - "paddings", - "vector, the paddings applied to pad the feature map."); - AddAttr>( - "dilations", "vector, the dilations of the convolution operator."); - AddComment(R"DOC( -**Unfold Operator** - -This Operator is used to extract sliding local blocks from a batched input tensor, also known -as im2col when operated on batched 2D image tensor. For each block under the convolution filter, -all element will be rearranged as a column. While the convolution filter sliding over the input -feature map, a series of such columns will be formed. - )DOC"); - } -}; - -class UnfoldOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - OperatorWithKernel::IndicateVarDataType(ctx, "X"), - ctx.device_context()); - } -}; - -class UnfoldGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ( - ctx->HasInput(framework::GradVarName("Y")), - true, - platform::errors::NotFound("The gradient of Y should not be null")); - PADDLE_ENFORCE_EQ( - ctx->HasInput("X"), - true, - platform::errors::NotFound("The input X should not be null")); - PADDLE_ENFORCE_EQ( - ctx->HasOutput(framework::GradVarName("X")), - true, - platform::errors::NotFound("The gradient of X should not be null")); - ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); - } - - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType( - ctx, framework::GradVarName("Y")), - ctx.device_context()); - } -}; - -template -class UnfoldGradMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("unfold_grad"); - op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y")); - op->SetInput("X", this->Input("X")); - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - op->SetAttrMap(this->Attrs()); - } -}; - -DECLARE_NO_NEED_BUFFER_VARS_INFERER(UnfoldGradOpNoNeedBufferVarsInferer, "X"); - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -DECLARE_INFER_SHAPE_FUNCTOR(unfold, - UnfoldInferShapeFunctor, - PD_INFER_META(phi::UnfoldInferMeta)); -REGISTER_OPERATOR(unfold, - ops::UnfoldOp, - ops::UnfoldOpMaker, - ops::UnfoldGradMaker, - ops::UnfoldGradMaker, - UnfoldInferShapeFunctor); -REGISTER_OPERATOR(unfold_grad, - ops::UnfoldGradOp, - ops::UnfoldGradOpNoNeedBufferVarsInferer); diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index dc3a76dd09..82c2b12b17 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -390,6 +390,16 @@ data_type: out_grad no_need_buffer: x +- backward_op : fill_diagonal_tensor_grad + forward : fill_diagonal_tensor (Tensor x, Tensor y, int64_t offset, int dim1, int dim2) -> Tensor(out) + args : (Tensor out_grad, int64_t offset, int dim1, int dim2) + output : Tensor(x_grad) + infer_meta : + func : FillDiagonalTensorGradInferMeta + kernel : + func : fill_diagonal_tensor_grad + inplace : (out_grad -> x_grad) + - backward_op : flip_grad forward : flip (Tensor x, int[] axis) -> Tensor(out) args : (Tensor out_grad, int[] axis) @@ -407,6 +417,25 @@ func : floor_grad inplace : (out_grad -> x_grad) +- backward_op : gelu_grad + forward : gelu(Tensor x, bool approximate) -> Tensor(out) + args : (Tensor x, Tensor out_grad, bool approximate) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + kernel : + func : gelu_grad + +- backward_op : gumbel_softmax_grad + forward : gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out) + args : (Tensor out, Tensor out_grad, int axis) + output : Tensor(x_grad) + infer_meta : + func : GumbelSoftmaxGradInferMeta + kernel : + func : gumbel_softmax_grad + - backward_op : hardshrink_grad forward : hardshrink (Tensor x, float threshold) -> Tensor(out) args : (Tensor x, Tensor out_grad, float threshold) @@ -909,3 +938,27 @@ param : [out_grad] kernel : func : trunc_grad + +- backward_op : unfold_grad + forward : unfold (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out) + args : (Tensor x, Tensor out_grad, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : unfold_grad + data_type : out_grad + no_need_buffer : x + +- backward_op: fold_grad + forward: fold (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out) + args: (Tensor x, Tensor out_grad, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) + output: Tensor(x_grad) + infer_meta: + func: UnchangedInferMeta + param : [x] + kernel: + func: fold_grad + data_type : out_grad + no_need_buffer : x diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 4b9920f2dc..855ea1b48b 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -541,16 +541,6 @@ kernel : func : fill_diagonal_grad -- backward_op : fill_diagonal_tensor_grad - forward : fill_diagonal_tensor (Tensor x, Tensor y, int64_t offset, int dim1, int dim2) -> Tensor(out) - args : (Tensor out_grad, int64_t offset, int dim1, int dim2) - output : Tensor(x_grad) - infer_meta : - func : FillDiagonalTensorGradInferMeta - kernel : - func : fill_diagonal_tensor_grad - inplace : (out_grad -> x_grad) - - backward_op : fill_grad forward : fill (Tensor x, Scalar value) -> Tensor(out) args : (Tensor out_grad, Scalar value) @@ -639,16 +629,6 @@ func : gather_nd_grad no_need_buffer : x -- backward_op : gelu_grad - forward : gelu(Tensor x, bool approximate) -> Tensor(out) - args : (Tensor x, Tensor out_grad, bool approximate) - output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - param: [x] - kernel : - func : gelu_grad - - backward_op : grid_sample_grad forward : grid_sample (Tensor x, Tensor grid, str mode, str padding_mode, bool align_corners) -> Tensor(out) args : (Tensor x, Tensor grid, Tensor out_grad, str mode, str padding_mode, bool align_corners) @@ -673,16 +653,6 @@ optional: scale, bias inplace : (y_grad -> x_grad) -- backward_op : gumbel_softmax_grad - forward : gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out) - args : (Tensor out, Tensor out_grad, int axis) - output : Tensor(x_grad) - infer_meta : - func : GumbelSoftmaxGradInferMeta - param : [out, out_grad, axis] - kernel : - func : gumbel_softmax_grad - - backward_op : hardswish_grad forward : hardswish (Tensor x) -> Tensor(out) args : (Tensor x, Tensor out_grad, float threshold = 6.0, float scale = 6.0, float offset = 3.0) @@ -1935,17 +1905,6 @@ output : Tensor(input_grad) invoke : stack(out_grad, axis) -- backward_op : unfold_grad - forward : unfold (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out) - args : (Tensor x, Tensor out_grad, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) - output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : unfold_grad - no_need_buffer : x - - backward_op : uniform_inplace_grad forward : uniform_inplace(Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val) -> Tensor(out) args : (Tensor out_grad, float min, float max, int seed, int diag_num, int diag_step, float diag_val) @@ -2018,17 +1977,6 @@ func : yolo_loss_grad optional : gt_score -- backward_op: fold_grad - forward: fold (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out) - args: (Tensor x, Tensor out_grad, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) - output: Tensor(x_grad) - infer_meta: - func: UnchangedInferMeta - param : [x] - kernel: - func: fold_grad - no_need_buffer : x - - backward_op: unpool3d_grad forward: unpool3d (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, int[] output_size, str data_format) -> Tensor(out) args: (Tensor x, Tensor indices, Tensor out, Tensor out_grad, int[] ksize, int[] strides, int[] padding, int[] output_size, str data_format) diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index 5d54b5c940..03f80b7934 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -287,6 +287,15 @@ optional : bias backward : bilinear_tensor_product_grad +- op : bincount + args: (Tensor x, Tensor weights, Scalar minlength) + output: Tensor(out) + infer_meta: + func: BincountInferMeta + kernel: + func: bincount + optional: weights + - op : bitwise_and args : (Tensor x, Tensor y) output : Tensor(out) @@ -328,6 +337,15 @@ func : box_coder optional : prior_box_var +- op : broadcast_tensors + args: (Tensor[] input) + output: Tensor[]{input.size()} + infer_meta: + func: BroadcastTensorsInferMeta + kernel: + func: broadcast_tensors + backward: broadcast_tensors_grad + - op : cast args : (Tensor x, DataType dtype) output : Tensor @@ -543,6 +561,14 @@ func : depthwise_conv2d_transpose backward : depthwise_conv2d_transpose_grad +- op : dirichlet + args: (Tensor alpha) + output: Tensor(out) + infer_meta: + func: DirichletInferMeta + kernel: + func: dirichlet + - op : distribute_fpn_proposals args : (Tensor fpn_rois, Tensor rois_num, int min_level, int max_level, int refer_level, int refer_scale, bool pixel_offset) output : Tensor[](multi_fpn_rois){max_level - min_level + 1}, Tensor[](multi_level_rois_num){max_level - min_level + 1}, Tensor(restore_index) @@ -719,16 +745,6 @@ inplace : (x -> out) backward : fill_diagonal_grad -- op : fill_diagonal_tensor - args : (Tensor x, Tensor y, int64_t offset, int dim1, int dim2) - output : Tensor(out) - infer_meta : - func : FillDiagonalTensorInferMeta - kernel : - func : fill_diagonal_tensor - inplace : (x -> out) - backward : fill_diagonal_tensor_grad - - op : flatten args : (Tensor x, int start_axis, int stop_axis) output : Tensor(out), Tensor(xshape) @@ -859,14 +875,6 @@ data_type : x backward : gather_nd_grad -- op : gather_tree - args : (Tensor ids, Tensor parents) - output : Tensor(out) - infer_meta : - func : GatherTreeMeta - kernel : - func : gather_tree - - op : gaussian args : (IntArray shape, float mean, float std, int seed, DataType dtype, Place place={}) output: Tensor(out) @@ -879,16 +887,6 @@ data_type : dtype backend : place -- op : gelu - args : (Tensor x, bool approximate) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param: [x] - kernel : - func : gelu - backward : gelu_grad - - op : generate_proposals args : (Tensor scores, Tensor bbox_deltas, Tensor im_shape, Tensor anchors, Tensor variances, int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size, float eta, bool pixel_offset=true) output : Tensor(rpn_rois), Tensor(rpn_roi_probs), Tensor(rpn_rois_num) @@ -935,15 +933,6 @@ intermediate : mean, variance backward : group_norm_grad -- op : gumbel_softmax - args : (Tensor x, float temperature, bool hard, int axis) - output : Tensor - infer_meta : - func : GumbelSoftmaxInferMeta - kernel : - func : gumbel_softmax - backward : gumbel_softmax_grad - - op : hardswish args : (Tensor x) output : Tensor @@ -1658,6 +1647,15 @@ output : Tensor(out) invoke : full_like(x, 1, dtype, place) +- op : overlap_add + args: (Tensor x, int hop_length, int axis) + output: Tensor + infer_meta: + func: OverlapAddInferMeta + kernel: + func: overlap_add + backward: overlap_add_grad + - op : p_norm args : (Tensor x, float porder, int axis, float epsilon, bool keepdim, bool asvector=false) output : Tensor(out) @@ -1899,6 +1897,21 @@ optional : mean_grad inplace : (param -> param_out), (moment -> moment_out), (mean_square -> mean_square_out), (mean_grad -> mean_grad_out) +- op : rnn + args: (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, Tensor dropout_state_in, float dropout_prob=0.0, bool is_bidirec=false, int input_size=10, int hidden_size=100, int num_layers=1, str mode="RNN_TANH", int seed=0, bool is_test=false) + output: Tensor(out), Tensor(dropout_state_out), Tensor[](state){pre_state.size()}, Tensor(reserve) + infer_meta: + func: RnnInferMeta + param : [x, pre_state, weight_list, sequence_length, dropout_prob, is_bidirec, input_size, hidden_size, num_layers, mode, seed, is_test] + kernel: + func: rnn + param : [x, pre_state, weight_list, sequence_length, dropout_prob, is_bidirec, input_size, hidden_size, num_layers, mode, seed, is_test] + data_type: x + backward: rnn_grad + optional : sequence_length + intermediate : reserve + view : (dropout_state_in -> dropout_state_out) + - op : roi_align args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned) output : Tensor @@ -2335,15 +2348,6 @@ func : unbind backward : unbind_grad -- op : unfold - args : (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) - output : Tensor - infer_meta : - func : UnfoldInferMeta - kernel : - func : unfold - backward : unfold_grad - - op : uniform args : (IntArray shape, DataType dtype, Scalar min, Scalar max, int seed, Place place={}) output : Tensor(out) @@ -2356,6 +2360,17 @@ data_type : dtype backend : place +- op : uniform_inplace + args: (Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val) + output: Tensor(out) + infer_meta: + func: UniformRandomInplaceInferMeta + kernel: + func: uniform_inplace + data_type: x + inplace: (x -> out) + backward: uniform_inplace_grad + # The `axis` argument of Python API paddle.unique is not vector - op : unique args : (Tensor x, bool return_index, bool return_inverse, bool return_counts, int[] axis, DataType dtype=DataType::INT64) @@ -2375,6 +2390,26 @@ func : unique_consecutive data_type : x +- op : unpool + args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format) + output: Tensor(out) + infer_meta: + func: UnpoolInferMeta + kernel: + func: unpool + data_type: x + backward: unpool_grad + +- op : unpool3d + args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, int[] output_size, str data_format) + output: Tensor(out) + infer_meta: + func: Unpool3dInferMeta + kernel: + func: unpool3d + data_type: x + backward: unpool3d_grad + - op : unsqueeze args : (Tensor x, IntArray axis) output : Tensor(out), Tensor(xshape) @@ -2466,93 +2501,3 @@ args : (Tensor x, DataType dtype=DataType::UNDEFINED, Place place = {}) output : Tensor(out) invoke : full_like(x, 0, dtype, place) - -- op: bincount - args: (Tensor x, Tensor weights, Scalar minlength) - output: Tensor(out) - infer_meta: - func: BincountInferMeta - kernel: - func: bincount - optional: weights - -- op: broadcast_tensors - args: (Tensor[] input) - output: Tensor[]{input.size()} - infer_meta: - func: BroadcastTensorsInferMeta - kernel: - func: broadcast_tensors - backward: broadcast_tensors_grad - -- op: dirichlet - args: (Tensor alpha) - output: Tensor(out) - infer_meta: - func: DirichletInferMeta - kernel: - func: dirichlet - -- op: fold - args: (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) - output: Tensor(out) - infer_meta: - func: FoldInferMeta - kernel: - func: fold - backward: fold_grad - -- op: overlap_add - args: (Tensor x, int hop_length, int axis) - output: Tensor - infer_meta: - func: OverlapAddInferMeta - kernel: - func: overlap_add - backward: overlap_add_grad - -- op: rnn - args: (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, Tensor dropout_state_in, float dropout_prob=0.0, bool is_bidirec=false, int input_size=10, int hidden_size=100, int num_layers=1, str mode="RNN_TANH", int seed=0, bool is_test=false) - output: Tensor(out), Tensor(dropout_state_out), Tensor[](state){pre_state.size()}, Tensor(reserve) - infer_meta: - func: RnnInferMeta - param : [x, pre_state, weight_list, sequence_length, dropout_prob, is_bidirec, input_size, hidden_size, num_layers, mode, seed, is_test] - kernel: - func: rnn - param : [x, pre_state, weight_list, sequence_length, dropout_prob, is_bidirec, input_size, hidden_size, num_layers, mode, seed, is_test] - data_type: x - backward: rnn_grad - optional : sequence_length - intermediate : reserve - view : (dropout_state_in -> dropout_state_out) - -- op: uniform_inplace - args: (Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val) - output: Tensor(out) - infer_meta: - func: UniformRandomInplaceInferMeta - kernel: - func: uniform_inplace - data_type: x - inplace: (x -> out) - backward: uniform_inplace_grad - -- op: unpool - args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format) - output: Tensor(out) - infer_meta: - func: UnpoolInferMeta - kernel: - func: unpool - data_type: x - backward: unpool_grad - -- op: unpool3d - args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, int[] output_size, str data_format) - output: Tensor(out) - infer_meta: - func: Unpool3dInferMeta - kernel: - func: unpool3d - data_type: x - backward: unpool3d_grad diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index eb877f3a7a..0af8731d5a 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -468,6 +468,12 @@ inputs: {x: X} outputs: {out: Out} +- op : fill_diagonal_tensor + inputs : + {x : X, y : Y} + outputs : + out : Out + - op : flip inputs : x : X @@ -500,6 +506,12 @@ attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] +- op : fold + inputs : + x : X + outputs : + out : Y + - op : frobenius_norm backward : frobenius_norm_grad extra : @@ -514,8 +526,18 @@ extra : attrs : [bool overwrite = true] +- op : gather_tree + inputs : + {ids : Ids, parents : Parents} + outputs : + out : Out + - op : gelu backward : gelu_grad + inputs : + x : X + outputs : + out : Out extra : attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] @@ -534,6 +556,12 @@ extra : attrs : [bool is_test = false] +- op : gumbel_softmax + inputs : + x : X + outputs : + out : Out + - op : hard_swish backward : hard_swish_grad extra : @@ -1052,6 +1080,12 @@ outputs : out : Out +- op : unfold + inputs : + x : X + outputs : + out : Y + - op : while backward : while_grad extra : diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index d8d7ca9c52..4645be6b4c 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -354,6 +354,16 @@ func : fft_r2c backward : fft_r2c_grad +- op : fill_diagonal_tensor + args : (Tensor x, Tensor y, int64_t offset = 0, int dim1 = 0, int dim2 = 1) + output : Tensor(out) + infer_meta : + func : FillDiagonalTensorInferMeta + kernel : + func : fill_diagonal_tensor + inplace : (x -> out) + backward : fill_diagonal_tensor_grad + - op : flip args : (Tensor x, int[] axis) output : Tensor (out) @@ -373,6 +383,43 @@ inplace : (x -> out) backward : floor_grad +- op : fold + args: (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) + output: Tensor(out) + infer_meta: + func: FoldInferMeta + kernel: + func: fold + backward: fold_grad + +- op : gather_tree + args : (Tensor ids, Tensor parents) + output : Tensor(out) + infer_meta : + func : GatherTreeMeta + kernel : + func : gather_tree + data_type : ids + +- op : gelu + args : (Tensor x, bool approximate = false) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param: [x] + kernel : + func : gelu + backward : gelu_grad + +- op : gumbel_softmax + args : (Tensor x, float temperature = 1.0, bool hard = false, int axis = -1) + output : Tensor + infer_meta : + func : GumbelSoftmaxInferMeta + kernel : + func : gumbel_softmax + backward : gumbel_softmax_grad + - op : hardshrink args : (Tensor x, float threshold = 0.5) output : Tensor (out) @@ -687,3 +734,12 @@ kernel : func : trunc backward : trunc_grad + +- op : unfold + args : (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) + output : Tensor(out) + infer_meta : + func : UnfoldInferMeta + kernel : + func : unfold + backward : unfold_grad diff --git a/paddle/phi/ops/compat/fill_diagonal_tensor_sig.cc b/paddle/phi/ops/compat/fill_diagonal_tensor_sig.cc deleted file mode 100644 index 56b3c2ab81..0000000000 --- a/paddle/phi/ops/compat/fill_diagonal_tensor_sig.cc +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature FillDiagonalTensorOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature( - "fill_diagonal_tensor", {"X", "Y"}, {"offset", "dim1", "dim2"}, {"Out"}); -} - -KernelSignature FillDiagonalTensorGradOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("fill_diagonal_tensor_grad", - {"Out@GRAD"}, - {"offset", "dim1", "dim2"}, - {"X@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(fill_diagonal_tensor, - phi::FillDiagonalTensorOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(fill_diagonal_tensor_grad, - phi::FillDiagonalTensorGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/fold_sig.cc b/paddle/phi/ops/compat/fold_sig.cc deleted file mode 100644 index ed8ac084ba..0000000000 --- a/paddle/phi/ops/compat/fold_sig.cc +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature FoldGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "fold_grad", - {"X", "Y@GRAD"}, - {"output_sizes", "kernel_sizes", "strides", "paddings", "dilations"}, - {"X@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(fold_grad, phi::FoldGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/gelu_sig.cc b/paddle/phi/ops/compat/gelu_sig.cc deleted file mode 100644 index 45a0ecea71..0000000000 --- a/paddle/phi/ops/compat/gelu_sig.cc +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature GeluOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("gelu", {"X"}, {"approximate"}, {"Out"}); -} - -KernelSignature GeluGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "gelu_grad", {"X", "Out@GRAD"}, {"approximate"}, {"X@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(gelu_grad, phi::GeluGradOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(gelu, phi::GeluOpArgumentMapping); diff --git a/paddle/phi/ops/compat/gumbel_softmax_sig.cc b/paddle/phi/ops/compat/gumbel_softmax_sig.cc deleted file mode 100644 index 54d3d55bf5..0000000000 --- a/paddle/phi/ops/compat/gumbel_softmax_sig.cc +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature GumbelSoftmaxOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature( - "gumbel_softmax", {"X"}, {"temperature", "hard", "axis"}, {"Out"}); -} - -KernelSignature GumbelSoftmaxGradOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature( - "gumbel_softmax_grad", {"Out", "Out@GRAD"}, {"axis"}, {"X@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(gumbel_softmax, phi::GumbelSoftmaxOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(gumbel_softmax_grad, - phi::GumbelSoftmaxGradOpArgumentMapping); diff --git a/paddle/phi/ops/compat/unfold_sig.cc b/paddle/phi/ops/compat/unfold_sig.cc deleted file mode 100644 index 45415616f2..0000000000 --- a/paddle/phi/ops/compat/unfold_sig.cc +++ /dev/null @@ -1,28 +0,0 @@ -/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature UnfoldGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("unfold_grad", - {"X", "Y@GRAD"}, - {"kernel_sizes", "strides", "paddings", "dilations"}, - {"X@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(unfold_grad, phi::UnfoldGradOpArgumentMapping); -- GitLab