diff --git a/paddle/fluid/operators/strided_slice_op.cc b/paddle/fluid/operators/strided_slice_op.cc index 0ff7d654fc29d1e739147a5fc37fe76c9fcf5e71..6f092bbef067ed89b436c8ca5678e1599d063c78 100644 --- a/paddle/fluid/operators/strided_slice_op.cc +++ b/paddle/fluid/operators/strided_slice_op.cc @@ -228,7 +228,7 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(StridedSliceOpGradNoNeedBufferVarsInferer, namespace ops = paddle::operators; DECLARE_INFER_SHAPE_FUNCTOR(strided_slice, StridedSliceInferShape, - PD_INFER_META(phi::StridedSliceInferMeta)); + PD_INFER_META(phi::StridedSliceRawInferMeta)); REGISTER_OPERATOR(strided_slice, ops::StridedSliceOp, ops::StridedSliceOpMaker, ops::StridedSliceOpGradMaker, diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index d763b23ef5c3583310c48a7aeab5eef19ebde468..6bf7a36b0653453f9c3f0cd82ff7a8ccce1139da 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -1922,15 +1922,15 @@ void SqueezeInferMeta(const MetaTensor& x, out->set_dtype(x.dtype()); } -void StridedSliceInferMeta(const MetaTensor& x, - const std::vector& axes, - const IntArray& starts, - const IntArray& ends, - const IntArray& strides, - const std::vector& infer_flags, - const std::vector& decrease_axis, - MetaTensor* out, - MetaConfig config) { +void StridedSliceRawInferMeta(const MetaTensor& x, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + const std::vector& infer_flags, + const std::vector& decrease_axis, + MetaTensor* out, + MetaConfig config) { auto in_dims = x.dims(); PADDLE_ENFORCE_LT( in_dims.size(), @@ -2052,6 +2052,19 @@ void StridedSliceInferMeta(const MetaTensor& x, out->set_dtype(x.dtype()); } +void StridedSliceInferMeta(const MetaTensor& x, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + MetaTensor* out, + MetaConfig config) { + std::vector infer_flags(axes.size(), 1); + std::vector decrease_axis; + StridedSliceRawInferMeta( + x, axes, starts, ends, strides, infer_flags, decrease_axis, out, config); +} + /* Why not use SumRawInferMeta directly? Because we need make InferMetaFunction's args follow the design of api.yaml */ diff --git a/paddle/phi/infermeta/unary.h b/paddle/phi/infermeta/unary.h index 7ab0f3df2af323ab50b2e0a6f637ca3811131ba5..54f70d8d55405407ad239e185be0421c6718ba0b 100644 --- a/paddle/phi/infermeta/unary.h +++ b/paddle/phi/infermeta/unary.h @@ -284,13 +284,21 @@ void SqueezeInferMeta(const MetaTensor& x, MetaTensor* xshape, MetaTensor* out); +void StridedSliceRawInferMeta(const MetaTensor& x, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + const std::vector& infer_flags, + const std::vector& decrease_axis, + MetaTensor* out, + MetaConfig config = MetaConfig()); + void StridedSliceInferMeta(const MetaTensor& x, const std::vector& axes, const IntArray& starts, const IntArray& ends, const IntArray& strides, - const std::vector& infer_flags, - const std::vector& decrease_axis, MetaTensor* out, MetaConfig config = MetaConfig()); diff --git a/paddle/phi/kernels/cpu/strided_slice_grad_kernel.cc b/paddle/phi/kernels/cpu/strided_slice_grad_kernel.cc index cdc5534d63c085263450036cfcff073fb271909f..e6c812cf6bd5aa3b4d5119b380986ecc2802e073 100644 --- a/paddle/phi/kernels/cpu/strided_slice_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/strided_slice_grad_kernel.cc @@ -19,10 +19,10 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/strided_slice_grad_kernel_impl.h" -PD_REGISTER_KERNEL(strided_slice_grad, +PD_REGISTER_KERNEL(strided_slice_raw_grad, CPU, ALL_LAYOUT, - phi::StridedSliceGradKernel, + phi::StridedSliceRawGradKernel, bool, int, int64_t, diff --git a/paddle/phi/kernels/cpu/strided_slice_kernel.cc b/paddle/phi/kernels/cpu/strided_slice_kernel.cc index f34a3301fcb42be52f707a84d51bd167ed4cde18..d0aa7b2f4cee62e2611f7a509053142353d746a9 100644 --- a/paddle/phi/kernels/cpu/strided_slice_kernel.cc +++ b/paddle/phi/kernels/cpu/strided_slice_kernel.cc @@ -19,10 +19,10 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/strided_slice_kernel_impl.h" -PD_REGISTER_KERNEL(strided_slice, +PD_REGISTER_KERNEL(strided_slice_raw, CPU, ALL_LAYOUT, - phi::StridedSliceKernel, + phi::StridedSliceRawKernel, bool, int, int64_t, diff --git a/paddle/phi/kernels/gpu/strided_slice_grad_kernel.cu b/paddle/phi/kernels/gpu/strided_slice_grad_kernel.cu index 5f31d488533a6e082bea9809b7623243ceea5056..90d9f1d9865773556df338a230097b977974de2e 100644 --- a/paddle/phi/kernels/gpu/strided_slice_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/strided_slice_grad_kernel.cu @@ -19,10 +19,10 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/strided_slice_grad_kernel_impl.h" -PD_REGISTER_KERNEL(strided_slice_grad, +PD_REGISTER_KERNEL(strided_slice_raw_grad, GPU, ALL_LAYOUT, - phi::StridedSliceGradKernel, + phi::StridedSliceRawGradKernel, bool, int, int64_t, diff --git a/paddle/phi/kernels/gpu/strided_slice_kernel.cu b/paddle/phi/kernels/gpu/strided_slice_kernel.cu index ff10718edb323e482627666e58fadaf50a99e22b..716150ff47dea9b8f8166ac0fbd918ff2dbc8133 100644 --- a/paddle/phi/kernels/gpu/strided_slice_kernel.cu +++ b/paddle/phi/kernels/gpu/strided_slice_kernel.cu @@ -19,10 +19,10 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/strided_slice_kernel_impl.h" -PD_REGISTER_KERNEL(strided_slice, +PD_REGISTER_KERNEL(strided_slice_raw, GPU, ALL_LAYOUT, - phi::StridedSliceKernel, + phi::StridedSliceRawKernel, bool, int, int64_t, diff --git a/paddle/phi/kernels/impl/strided_slice_grad_kernel_impl.h b/paddle/phi/kernels/impl/strided_slice_grad_kernel_impl.h index f0fddce6b55472f8afe03579b3bf5b1ca7b93afe..95780682c98dd0fc27df7035bf51c11fa44c90be 100644 --- a/paddle/phi/kernels/impl/strided_slice_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/strided_slice_grad_kernel_impl.h @@ -20,16 +20,16 @@ namespace phi { template -void StridedSliceGradKernel(const Context& dev_ctx, - const DenseTensor& x, - const DenseTensor& out_grad, - const std::vector& axes, - const IntArray& starts, - const IntArray& ends, - const IntArray& strides, - const std::vector& infer_flags, - const std::vector& decrease_axis, - DenseTensor* x_grad) { +void StridedSliceRawGradKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& out_grad, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + const std::vector& infer_flags, + const std::vector& decrease_axis, + DenseTensor* x_grad) { int rank = x.dims().size(); #define SLICE_CASE(Rank) \ case Rank: \ diff --git a/paddle/phi/kernels/impl/strided_slice_kernel_impl.h b/paddle/phi/kernels/impl/strided_slice_kernel_impl.h index 2df937524ef20109a22c8ba00b93ebd00d58e7a1..81e6d5056267ac04d3b2a70836c531cff86c6f70 100644 --- a/paddle/phi/kernels/impl/strided_slice_kernel_impl.h +++ b/paddle/phi/kernels/impl/strided_slice_kernel_impl.h @@ -20,15 +20,15 @@ namespace phi { template -void StridedSliceKernel(const Context& dev_ctx, - const DenseTensor& x, - const std::vector& axes, - const IntArray& starts, - const IntArray& ends, - const IntArray& strides, - const std::vector& infer_flags, - const std::vector& decrease_axis, - DenseTensor* out) { +void StridedSliceRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + const std::vector& infer_flags, + const std::vector& decrease_axis, + DenseTensor* out) { int rank = x.dims().size(); #define SLICE_CASE(Rank) \ case Rank: \ diff --git a/paddle/phi/kernels/strided_slice_grad_kernel.cc b/paddle/phi/kernels/strided_slice_grad_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..38dd360ea66c21efc9e532260f99f1e71b5fb11e --- /dev/null +++ b/paddle/phi/kernels/strided_slice_grad_kernel.cc @@ -0,0 +1,69 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/strided_slice_grad_kernel.h" + +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { + +template +void StridedSliceGradKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& out_grad, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + DenseTensor* x_grad) { + std::vector infer_flags(axes.size(), 1); + std::vector decrease_axis; + StridedSliceRawGradKernel(dev_ctx, + x, + out_grad, + axes, + starts, + ends, + strides, + infer_flags, + decrease_axis, + x_grad); +} + +} // namespace phi + +PD_REGISTER_KERNEL(strided_slice_grad, + CPU, + ALL_LAYOUT, + phi::StridedSliceGradKernel, + bool, + int, + int64_t, + float, + double, + phi::dtype::complex, + phi::dtype::complex) {} +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) +PD_REGISTER_KERNEL(strided_slice_grad, + GPU, + ALL_LAYOUT, + phi::StridedSliceGradKernel, + bool, + int, + int64_t, + float, + double, + phi::dtype::complex, + phi::dtype::complex) {} +#endif diff --git a/paddle/phi/kernels/strided_slice_grad_kernel.h b/paddle/phi/kernels/strided_slice_grad_kernel.h index 07fba9d27bfe90fe4c94bf2c3f03b0e080fc8da0..21d01310b662f4c919d7008f2ddc7f6d5ea836ff 100644 --- a/paddle/phi/kernels/strided_slice_grad_kernel.h +++ b/paddle/phi/kernels/strided_slice_grad_kernel.h @@ -19,6 +19,18 @@ namespace phi { +template +void StridedSliceRawGradKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& out_grad, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + const std::vector& infer_flags, + const std::vector& decrease_axis, + DenseTensor* x_grad); + template void StridedSliceGradKernel(const Context& dev_ctx, const DenseTensor& x, @@ -27,8 +39,6 @@ void StridedSliceGradKernel(const Context& dev_ctx, const IntArray& starts, const IntArray& ends, const IntArray& strides, - const std::vector& infer_flags, - const std::vector& decrease_axis, DenseTensor* x_grad); template diff --git a/paddle/phi/kernels/strided_slice_kernel.cc b/paddle/phi/kernels/strided_slice_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..547d574cd78d040987b05b382319e3d1c68c0a84 --- /dev/null +++ b/paddle/phi/kernels/strided_slice_kernel.cc @@ -0,0 +1,60 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/strided_slice_kernel.h" + +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { + +template +void StridedSliceKernel(const Context& dev_ctx, + const DenseTensor& x, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + DenseTensor* out) { + std::vector infer_flags(axes.size(), 1); + std::vector decrease_axis; + StridedSliceRawKernel( + dev_ctx, x, axes, starts, ends, strides, infer_flags, decrease_axis, out); +} + +} // namespace phi + +PD_REGISTER_KERNEL(strided_slice, + CPU, + ALL_LAYOUT, + phi::StridedSliceKernel, + bool, + int, + int64_t, + float, + double, + phi::dtype::complex, + phi::dtype::complex) {} +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) +PD_REGISTER_KERNEL(strided_slice, + GPU, + ALL_LAYOUT, + phi::StridedSliceKernel, + bool, + int, + int64_t, + float, + double, + phi::dtype::complex, + phi::dtype::complex) {} +#endif diff --git a/paddle/phi/kernels/strided_slice_kernel.h b/paddle/phi/kernels/strided_slice_kernel.h index fd90d81b8556c210c05474219a0635b27d1a4223..2c8b373bf03a85a73cb4341756ea1f4e51033e65 100644 --- a/paddle/phi/kernels/strided_slice_kernel.h +++ b/paddle/phi/kernels/strided_slice_kernel.h @@ -19,6 +19,17 @@ namespace phi { +template +void StridedSliceRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + const std::vector& infer_flags, + const std::vector& decrease_axis, + DenseTensor* out); + template void StridedSliceKernel(const Context& dev_ctx, const DenseTensor& x, @@ -26,8 +37,6 @@ void StridedSliceKernel(const Context& dev_ctx, const IntArray& starts, const IntArray& ends, const IntArray& strides, - const std::vector& infer_flags, - const std::vector& decrease_axis, DenseTensor* out); template diff --git a/paddle/phi/ops/compat/strided_slice_sig.cc b/paddle/phi/ops/compat/strided_slice_sig.cc index 70ce2e3e07ce900289256dea2caec047c017fc7c..9fb70af0dea515f73dd755a2e2619f6981de455a 100644 --- a/paddle/phi/ops/compat/strided_slice_sig.cc +++ b/paddle/phi/ops/compat/strided_slice_sig.cc @@ -57,14 +57,14 @@ KernelSignature StridedSliceOpArgumentMapping( "decrease_axis"}; paddle::SmallVector outputs = {"Out"}; - std::string op_type; + std::string kernel_name; if (ctx.IsDenseTensorVectorInput("Input")) { - op_type = "strided_slice_array"; + kernel_name = "strided_slice_array"; } else { - op_type = "strided_slice"; + kernel_name = "strided_slice_raw"; } // NOTE(dev): Use this to avoid regularization. - KernelSignature sig(op_type, inputs, attrs, outputs); + KernelSignature sig(kernel_name, inputs, attrs, outputs); return sig; } @@ -106,15 +106,15 @@ KernelSignature StridedSliceGradOpArgumentMapping( "decrease_axis"}; paddle::SmallVector outputs = {GradVarName("Input")}; - std::string op_type; + std::string kernel_name; if (ctx.IsDenseTensorVectorInput("Input")) { - op_type = "strided_slice_array_grad"; + kernel_name = "strided_slice_array_grad"; } else { - op_type = "strided_slice_grad"; + kernel_name = "strided_slice_raw_grad"; } // NOTE(dev): Use this to avoid regularization. - KernelSignature sig(op_type, inputs, attrs, outputs); + KernelSignature sig(kernel_name, inputs, attrs, outputs); return sig; } @@ -132,573 +132,273 @@ NOTE: The following codes are for 'get_compat_kernel_signature.py' ############################ Forward ############################ -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "EndsTensor", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "EndsTensor", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "EndsTensor", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "EndsTensorList", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "EndsTensorList", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "EndsTensorList", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "ends", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "ends", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "ends", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "EndsTensor", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "EndsTensor", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "EndsTensor", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "EndsTensorList", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "EndsTensorList", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "EndsTensorList", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "ends", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "ends", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "ends", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "EndsTensor", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "EndsTensor", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "EndsTensor", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "EndsTensorList", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "EndsTensorList", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "EndsTensorList", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "ends", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "ends", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "ends", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "EndsTensor", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "EndsTensor", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "EndsTensor", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "EndsTensorList", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "EndsTensorList", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "EndsTensorList", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "ends", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "ends", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "ends", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "EndsTensor", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "EndsTensor", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "EndsTensor", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "EndsTensorList", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "EndsTensorList", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "EndsTensorList", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "ends", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "ends", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "ends", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "EndsTensor", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "EndsTensor", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "EndsTensor", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "EndsTensorList", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "EndsTensorList", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "EndsTensorList", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "ends", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "ends", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "ends", "starts","infer_flags", "decrease_axis"}, {"Out"}); - -############################ Backward ############################ - - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensor", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensor", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensor", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensorList", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensorList", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensorList", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "ends", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "ends", "StartsTensorList","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "ends", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensor", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensor", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensor", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensorList", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensorList", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensorList", -"starts","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "ends", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "ends", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "ends", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "EndsTensor", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "EndsTensor", "StartsTensorList","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "EndsTensor", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "EndsTensorList", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "EndsTensorList", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "EndsTensorList", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "ends", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "ends", "StartsTensorList","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "ends", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensor", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensor", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensor", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensorList", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensorList", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensorList", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "ends", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "ends", "StartsTensorList","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "ends", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensor", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensor", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensor", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensorList", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensorList", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensorList", -"starts","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "ends", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "ends", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "ends", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "EndsTensor", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "EndsTensor", "StartsTensorList","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "EndsTensor", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "EndsTensorList", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "EndsTensorList", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "EndsTensorList", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "ends", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "ends", "StartsTensorList","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "ends", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); */ diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index cb3781d5c299b8325967b65658f77af944cb003a..0be014394f851b4e9127f603dc6c931a9161e2e8 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -11426,6 +11426,10 @@ def strided_slice(input, axes, starts, ends, strides): sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2) # sliced_2 is input[:, 0:3:1, 0:2:1, 2:4:2]. """ + if in_dygraph_mode(): + return _C_ops.final_state_strided_slice(input, axes, starts, ends, + strides) + helper = LayerHelper('strided_slice', **locals()) check_variable_and_dtype(input, 'input', @@ -11590,7 +11594,11 @@ def shape(input): res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) print(res) # [array([ 3, 100, 100], dtype=int32)] """ - if _non_static_mode(): + if in_dygraph_mode(): + out = _C_ops.final_state_shape(input) + out.stop_gradient = True + return out + if _in_legacy_dygraph(): out = _C_ops.shape(input) out.stop_gradient = True return out diff --git a/python/paddle/fluid/tests/unittests/test_shape_op.py b/python/paddle/fluid/tests/unittests/test_shape_op.py index bada62e3239eadfb75da47eb85e73a3ac67e8e41..3d961a7413ca00a2ac7a174e3729a9ed79db9dcf 100644 --- a/python/paddle/fluid/tests/unittests/test_shape_op.py +++ b/python/paddle/fluid/tests/unittests/test_shape_op.py @@ -17,6 +17,7 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle from paddle.fluid import core from paddle.fluid.op import Operator @@ -24,6 +25,7 @@ from paddle.fluid.op import Operator class TestShapeOp(OpTest): def setUp(self): self.op_type = "shape" + self.python_api = paddle.shape self.config() self.shape = [2, 3] input = np.zeros(self.shape) @@ -34,7 +36,7 @@ class TestShapeOp(OpTest): self.shape = [2, 3] def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) class case1(TestShapeOp): diff --git a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py index e9be6b338fb86390bfa006d7fb4b6d5f34894d4d..ae17cb9b1b57caad1e5c20aa984d2309335c6842 100644 --- a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py @@ -58,6 +58,7 @@ class TestStrideSliceOp(OpTest): def setUp(self): self.initTestCase() self.op_type = 'strided_slice' + self.python_api = paddle.strided_slice self.output = strided_slice_native_forward( self.input, self.axes, self.starts, self.ends, self.strides) @@ -72,10 +73,10 @@ class TestStrideSliceOp(OpTest): } def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(set(['Input']), 'Out') + self.check_grad(set(['Input']), 'Out', check_eager=True) def initTestCase(self): self.input = np.random.rand(100) @@ -704,7 +705,7 @@ class TestStridedSliceTensorArray(unittest.TestCase): l2.sum().backward() grads_static = net.get_all_grads() net.clear_all_grad() - # compare result of dygraph and static + # compare result of dygraph and static self.is_grads_equal(grads_static, grads_dy) self.assertTrue( np.array_equal(s1, s2), diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 5499c81c7ecd9754b91af5b0bfd0512bb0d39af5..c89e519f80f7aaef949d240010570d19e38a12da 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -951,6 +951,14 @@ func : selu backward : selu_grad +- api : shape + args : (Tensor input) + output : Tensor + infer_meta : + func : ShapeInferMeta + kernel : + func : shape, shape_sr + # shard_index - api : shard_index args : (Tensor in, int index_num, int nshards, int shard_id, int ignore_value) @@ -1070,6 +1078,15 @@ func : square backward : square_grad +- api : strided_slice + args : (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides) + output : Tensor + infer_meta : + func : StridedSliceInferMeta + kernel : + func : strided_slice + backward : strided_slice_grad + - api : subtract args : (Tensor x, Tensor y) output : Tensor diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index 5efe6e7451782e5a81b0f4c83877b9564619e467..3830d7f92689bec9c49ed2c504a0fe279fe53760 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -660,6 +660,16 @@ kernel : func : square_grad +- backward_api : strided_slice_grad + forward : strided_slice (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides) -> Tensor(out) + args : (Tensor x, Tensor out_grad, int[] axes, IntArray starts, IntArray ends, IntArray strides) + output : Tensor(x_grad) + infer_meta : + func : GeneralUnaryGradInferMeta + param : [x] + kernel : + func : strided_slice_grad + - backward_api : subtract_grad forward : subtract (Tensor x, Tensor y) -> Tensor(out) args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1)