diff --git a/paddle/fluid/operators/generator/filters.py b/paddle/fluid/operators/generator/filters.py index f5609c868b4c746f0e46556034166d791d1c7b6f..23f6540a41269df0556d2c1bfaf87a935fca197d 100644 --- a/paddle/fluid/operators/generator/filters.py +++ b/paddle/fluid/operators/generator/filters.py @@ -33,7 +33,7 @@ from type_mapping import ( def get_infer_var_type_func(op_name): if op_name == "assign": return f""" - class {to_pascal_case(op_name)}InferVarType : public framework::VarTypeInference {{ +class {to_pascal_case(op_name)}InferVarType : public framework::VarTypeInference {{ public: void operator()(framework::InferVarTypeContext *ctx) const override {{ ctx->SyncTypeAndDataType("X", "Out"); @@ -64,16 +64,37 @@ class {to_pascal_case(op_name)}InferVarType : public framework::VarTypeInference """ elif op_name == "merge_selected_rows": return f""" - class {to_pascal_case(op_name)}InferVarType - : public framework::PassInDtypeAndVarTypeToOutput {{ - protected: - std::unordered_map& GetInputOutputWithSameType() - const override {{ - static std::unordered_map m{{{{"X", /*->*/ "Out"}}}}; - return m; - }} - }}; - """ +class {to_pascal_case(op_name)}InferVarType : public framework::PassInDtypeAndVarTypeToOutput {{ + protected: + std::unordered_map& GetInputOutputWithSameType() const override {{ + static std::unordered_map m{{{{"X", /*->*/ "Out"}}}}; + return m; + }} +}}; +""" + elif op_name == "strided_slice": + return f""" +class {to_pascal_case(op_name)}InferVarType : public framework::VarTypeInference {{ + public: + void operator()(framework::InferVarTypeContext *ctx) const override {{ + ctx->SetOutputType("Out", ctx->GetInputType("Input")); + ctx->SetOutputDataType("Out", ctx->GetInputDataType("Input")); + }} +}}; +""" + elif op_name == "strided_slice_grad": + return f""" +class {to_pascal_case(op_name)}InferVarType : public framework::VarTypeInference {{ + public: + void operator()(framework::InferVarTypeContext *ctx) const override {{ + ctx->SetOutputType(framework::GradVarName("Input"), + ctx->GetInputType(framework::GradVarName("Out"))); + ctx->SetOutputDataType( + framework::GradVarName("Input"), + ctx->GetInputDataType(framework::GradVarName("Out"))); + }} +}}; +""" else: return None diff --git a/paddle/fluid/operators/generator/get_expected_kernel_func.cc b/paddle/fluid/operators/generator/get_expected_kernel_func.cc index 0520af7c505b725ee9608de5e0799da59f5d31f0..c9aeb7356d2e64b0d2160dc857e08d7cd91e3f06 100644 --- a/paddle/fluid/operators/generator/get_expected_kernel_func.cc +++ b/paddle/fluid/operators/generator/get_expected_kernel_func.cc @@ -178,6 +178,47 @@ phi::KernelKey GetSoftmaxGradExpectedKernelType( ctx.GetPlace(), layout_, phi::TransToPhiDataType(input_data_type)); } +phi::KernelKey GetStridedSliceExpectedKernelType( + const framework::ExecutionContext& ctx, + const framework::OperatorWithKernel* op_ptr) { + auto* in_var = ctx.InputVar("Input"); + auto is_in_var_array = in_var->IsType(); + if (is_in_var_array) { + auto& tensor_array = in_var->Get(); + for (auto& tensor : tensor_array) { + if (!platform::is_cuda_pinned_place(tensor.place())) { + PADDLE_ENFORCE_EQ( + platform::is_same_place(tensor.place(), + ctx.device_context().GetPlace()), + true, + platform::errors::InvalidArgument( + "Place of context is %s. Place of input tensor is %s. They " + "are should be same, but reveived different place.", + string::to_string(ctx.device_context().GetPlace()), + string::to_string(tensor.place()))); + } + } + return phi::KernelKey(op_ptr->IndicateVarDataType(ctx, "Input"), + ctx.GetPlace()); + } + // NOTE: cuda pinned tensor need to copy its data to target place + auto in_tensor = ctx.Input("Input"); + if (platform::is_cuda_pinned_place(in_tensor->place())) { + return phi::KernelKey(framework::TransToProtoVarType(in_tensor->dtype()), + ctx.GetPlace()); + } + return phi::KernelKey(op_ptr->IndicateVarDataType(ctx, "Input"), + in_tensor->place()); +} + +phi::KernelKey GetStridedSliceGradExpectedKernelType( + const framework::ExecutionContext& ctx, + const framework::OperatorWithKernel* op_ptr) { + return phi::KernelKey( + op_ptr->IndicateVarDataType(ctx, framework::GradVarName("Out")), + ctx.GetPlace()); +} + phi::KernelKey GetUpdateLossScalingExpectedKernelType( const framework::ExecutionContext& ctx, const framework::OperatorWithKernel* op_ptr) { diff --git a/paddle/fluid/operators/generator/get_expected_kernel_func.h b/paddle/fluid/operators/generator/get_expected_kernel_func.h index bf7c691fffa46154bec9cea00e8819ee15b93d7f..10f198b580fcc49462460064222c36a2176ddd3e 100644 --- a/paddle/fluid/operators/generator/get_expected_kernel_func.h +++ b/paddle/fluid/operators/generator/get_expected_kernel_func.h @@ -44,6 +44,14 @@ phi::KernelKey GetSoftmaxGradExpectedKernelType( const framework::ExecutionContext& ctx, const framework::OperatorWithKernel* op_ptr); +phi::KernelKey GetStridedSliceExpectedKernelType( + const framework::ExecutionContext& ctx, + const framework::OperatorWithKernel* op_ptr); + +phi::KernelKey GetStridedSliceGradExpectedKernelType( + const framework::ExecutionContext& ctx, + const framework::OperatorWithKernel* op_ptr); + phi::KernelKey GetUpdateLossScalingExpectedKernelType( const framework::ExecutionContext& ctx, const framework::OperatorWithKernel* op_ptr); diff --git a/paddle/fluid/operators/strided_slice_op.cc b/paddle/fluid/operators/strided_slice_op.cc deleted file mode 100644 index fffd99ae76b34042ff1587d114907dcc6440cfe9..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/strided_slice_op.cc +++ /dev/null @@ -1,255 +0,0 @@ -/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include -#include -#include - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/backward.h" -#include "paddle/phi/kernels/funcs/strided_slice.h" - -namespace paddle { -namespace operators { - -class StridedSliceOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext &ctx) const override { - auto *in_var = ctx.InputVar("Input"); - auto is_in_var_array = in_var->IsType(); - if (is_in_var_array) { - auto &tensor_array = in_var->Get(); - for (auto &tensor : tensor_array) { - if (!platform::is_cuda_pinned_place(tensor.place())) { - PADDLE_ENFORCE_EQ( - platform::is_same_place(tensor.place(), - ctx.device_context().GetPlace()), - true, - platform::errors::InvalidArgument( - "Place of context is %s. Place of input tensor is %s. They " - "are should be same, but reveived different place.", - string::to_string(ctx.device_context().GetPlace()), - string::to_string(tensor.place()))); - } - } - return phi::KernelKey( - OperatorWithKernel::IndicateVarDataType(ctx, "Input"), - ctx.GetPlace()); - } - // NOTE: cuda pinned tensor need to copy its data to target place - auto in_tensor = ctx.Input("Input"); - if (platform::is_cuda_pinned_place(in_tensor->place())) { - return phi::KernelKey(framework::TransToProtoVarType(in_tensor->dtype()), - ctx.GetPlace()); - } - return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(ctx, "Input"), - in_tensor->place()); - } - phi::KernelKey GetKernelTypeForVar( - const std::string &var_name, - const phi::DenseTensor &tensor, - const phi::KernelKey &expected_kernel_type) const override { - if (var_name == "StartsTensor" || var_name == "EndsTensor" || - var_name == "StridesTensor") { - return phi::KernelKey(phi::Backend::ALL_BACKEND, - expected_kernel_type.layout(), - expected_kernel_type.dtype()); - } - if (var_name == "StartsTensorList" || var_name == "EndsTensorList" || - var_name == "StridesTensorList") { - return phi::KernelKey(phi::Backend::ALL_BACKEND, - expected_kernel_type.layout(), - expected_kernel_type.dtype()); - } - return phi::KernelKey( - tensor.place(), tensor.layout(), expected_kernel_type.dtype()); - } -}; - -class StridedSliceOpVarTypeInference : public framework::VarTypeInference { - public: - void operator()(framework::InferVarTypeContext *ctx) const override { - ctx->SetOutputType("Out", ctx->GetInputType("Input")); - ctx->SetOutputDataType("Out", ctx->GetInputDataType("Input")); - } -}; - -class StridedSliceOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("Input", "Tensor of data to extract slices from."); - AddOutput("Out", "Strided Sliced data tensor."); - - AddInput("StartsTensor", - "(Tensor, optional) If provided, slice will use this." - "It has the highest priority of StartsTensor, StartsTensorList " - "and attr(starts).") - .AsDispensable(); - AddInput("EndsTensor", - "(Tensor, optional) If provided, slice will use this." - "It has the highest priority of EndsTensor, EndsTensorList and " - "attr(ends).") - .AsDispensable(); - AddInput( - "StridesTensor", - "(Tensor, optional) If provided, slice will use this." - "It has the highest priority of StridesTensor, StridesTensorList and " - "attr(ends).") - .AsDispensable(); - AddInput( - "StartsTensorList", - "(vector>, optional) If provided, slice will use this." - "The shape of the tensor in vector MUST BE [1]." - "It has higher priority compare with attr(starts).") - .AsDuplicable() - .AsDispensable(); - AddInput( - "EndsTensorList", - "(vector>, optional) If provided, slice will use this." - "The shape of the tensor in vector MUST BE [1]." - "It has higher priority compare with attr(ends).") - .AsDuplicable() - .AsDispensable(); - AddInput( - "StridesTensorList", - "(vector>, optional) If provided, slice will use this." - "The shape of the tensor in vector MUST BE [1]." - "It has higher priority compare with attr(strides).") - .AsDuplicable() - .AsDispensable(); - AddAttr>( - "axes", "(list) Axes that `starts` and `ends` apply to."); - AddAttr>( - "starts", "(list) Start indices for the strided slice start.") - .SetDefault({}); - AddAttr>("ends", - "(list) End indices the tensor slice end") - .SetDefault({}); - AddAttr>( - "strides", "(list Stride step from the start to the end)") - .SetDefault({}); - AddAttr>( - "infer_flags", "(list) Flags of inferring dims in attributes.") - .SetDefault({}); - AddAttr>("decrease_axis", "(list) decrease_axis") - .SetDefault({}); - AddComment(R"DOC( -Strided Slice Operator. -Instead of calling this op directly most users will want to use the -NumPy-style slicing syntax. -For Example: -data = fluid.layers.fill_constant(shape=[3, 3], value=0, dtype='int64') -y = fluid.layers.strided_slice(data, [0, 1], [1,0], [2, 3], [1, 1]) -)DOC"); - } -}; - -class StridedSliceOpGrad : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext &ctx) const override { - return phi::KernelKey(OperatorWithKernel::IndicateVarDataType( - ctx, framework::GradVarName("Out")), - ctx.GetPlace()); - } - phi::KernelKey GetKernelTypeForVar( - const std::string &var_name, - const phi::DenseTensor &tensor, - const phi::KernelKey &expected_kernel_type) const override { - if (var_name == "StartsTensor" || var_name == "EndsTensor" || - var_name == "StridesTensor") { - return phi::KernelKey(phi::Backend::ALL_BACKEND, - expected_kernel_type.layout(), - expected_kernel_type.dtype()); - } - if (var_name == "StartsTensorList" || var_name == "EndsTensorList" || - var_name == "StridesTensorList") { - return phi::KernelKey(phi::Backend::ALL_BACKEND, - expected_kernel_type.layout(), - expected_kernel_type.dtype()); - } - return phi::KernelKey( - tensor.place(), tensor.layout(), expected_kernel_type.dtype()); - } -}; - -template -class StridedSliceOpGradMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr bind) const override { - bind->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - bind->SetInput("Input", this->Input("Input")); - bind->SetInput("StartsTensor", this->Input("StartsTensor")); - bind->SetInput("EndsTensor", this->Input("EndsTensor")); - bind->SetInput("StridesTensor", this->Input("StridesTensor")); - bind->SetInput("StartsTensorList", this->Input("StartsTensorList")); - bind->SetInput("EndsTensorList", this->Input("EndsTensorList")); - bind->SetInput("StridesTensorList", this->Input("StridesTensorList")); - bind->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input")); - bind->SetAttrMap(this->Attrs()); - bind->SetType("strided_slice_grad"); - } -}; -class StridedSliceGradOpVarTypeInference : public framework::VarTypeInference { - public: - void operator()(framework::InferVarTypeContext *ctx) const override { - ctx->SetOutputType(framework::GradVarName("Input"), - ctx->GetInputType(framework::GradVarName("Out"))); - ctx->SetOutputDataType( - framework::GradVarName("Input"), - ctx->GetInputDataType(framework::GradVarName("Out"))); - } -}; - -DECLARE_NO_NEED_BUFFER_VARS_INFERER(StridedSliceOpGradNoNeedBufferVarsInferer, - "Input"); - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; - -DECLARE_INFER_SHAPE_FUNCTOR(strided_slice, - StridedSliceInferShape, - PD_INFER_META(phi::StridedSliceRawInferMeta)); - -REGISTER_OPERATOR(strided_slice, - ops::StridedSliceOp, - ops::StridedSliceOpMaker, - ops::StridedSliceOpGradMaker, - ops::StridedSliceOpGradMaker, - ops::StridedSliceOpVarTypeInference, - StridedSliceInferShape); - -DECLARE_INFER_SHAPE_FUNCTOR(strided_slice_grad, - StridedSliceGradInferShape, - PD_INFER_META(phi::GeneralUnaryGradInferMeta)); - -REGISTER_OPERATOR(strided_slice_grad, - ops::StridedSliceOpGrad, - ops::StridedSliceOpGradNoNeedBufferVarsInferer, - ops::StridedSliceGradOpVarTypeInference, - StridedSliceGradInferShape); diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 1cfeac6e0fc6664a11c791880f50f7d0af76b3c6..66a1c3e91e49339379c30cfaf31fd18fe6e2ea5c 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -2267,6 +2267,30 @@ outputs : out : Out +- op : strided_slice + backward : strided_slice_grad + inputs : + x : Input + outputs : + out : Out + int_array : + starts : + data_type : int + tensor_name : StartsTensor + tensors_name : StartsTensorList + ends : + data_type : int + tensor_name : EndsTensor + tensors_name : EndsTensorList + strides : + data_type : int + tensor_name : StridesTensor + tensors_name : StridesTensorList + manual_signature : [strided_slice, strided_slice_grad] + get_expected_kernel_type : + strided_slice : GetStridedSliceExpectedKernelType + strided_slice_grad : GetStridedSliceGradExpectedKernelType + - op : subtract (elementwise_sub) backward : subtract_grad (elementwise_sub_grad) inputs : diff --git a/paddle/phi/api/yaml/static_backward.yaml b/paddle/phi/api/yaml/static_backward.yaml index a512342311cb8e3ecf2e75f2763233438830c903..3107ea73571cdc7b6c4f88f68d456c43e9020183 100755 --- a/paddle/phi/api/yaml/static_backward.yaml +++ b/paddle/phi/api/yaml/static_backward.yaml @@ -76,6 +76,19 @@ func : softmax_grad composite : softmax_grad(out, out_grad, axis, x_grad) +- backward_op : strided_slice_grad + forward : strided_slice (Tensor x, int[] axes, IntArray starts={}, IntArray ends={}, IntArray strides={}, int[] infer_flags={}, int[] decrease_axis={}) -> Tensor(out) + args : (Tensor x, Tensor out_grad, int[] axes, IntArray starts, IntArray ends, IntArray strides, int[] infer_flags, int[] decrease_axis) + output : Tensor(x_grad) + infer_meta : + func : GeneralUnaryGradInferMeta + param : [x] + kernel : + func : strided_slice_grad + param : [x, axes, starts, ends, strides] + data_type : out_grad + no_need_buffer : x + - backward_op : tril_triu_grad forward : tril_triu (Tensor x, int diagonal = 0, bool lower = false) -> Tensor(out) args : (Tensor out_grad, int diagonal, bool lower) diff --git a/paddle/phi/api/yaml/static_ops.yaml b/paddle/phi/api/yaml/static_ops.yaml index bcdf38e85fea8961e515fad022bd3f2f4f796c48..a88b0089366ab0cde910e40896ec8aeaea1c5ebc 100755 --- a/paddle/phi/api/yaml/static_ops.yaml +++ b/paddle/phi/api/yaml/static_ops.yaml @@ -355,6 +355,16 @@ inplace : (x -> out) backward : softmax_grad +- op : strided_slice + args : (Tensor x, int[] axes, IntArray starts={}, IntArray ends={}, IntArray strides={}, int[] infer_flags={}, int[] decrease_axis={}) + output : Tensor + infer_meta : + func : StridedSliceRawInferMeta + kernel : + func : strided_slice + param : [x, axes, starts, ends, strides] + backward : strided_slice_grad + - op : tril_indices args : (int rows = 0, int cols = 0, int offset = 0, DataType dtype = DataType::INT64) output : Tensor(out)