/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include #include #include #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/slice_op.h" namespace paddle { namespace operators { static void StridedSliceOutDims( const std::vector& starts, const std::vector& ends, const std::vector& strides, const std::vector& axes, const std::vector& infer_flags, const framework::DDim in_dims, int* out_dims_vector, const size_t size, bool infer_shape) { for (int i = 0; i < in_dims.size(); i++) { out_dims_vector[i] = in_dims[i]; } int stride_index, start_index, end_index; for (size_t i = 0; i < size; i++) { int axes_index = axes[i]; if (infer_shape && infer_flags[i] == -1) { out_dims_vector[axes_index] = -1; continue; } PADDLE_ENFORCE_NE(strides[i], 0, "stride must not to be zero"); start_index = starts[i]; end_index = ends[i]; stride_index = strides[i]; int axis_size = in_dims[axes_index]; if (axis_size < 0) { continue; } if (start_index < 0) { start_index = start_index + axis_size; } if (end_index < 0) { end_index = end_index + axis_size; } if (stride_index < 0) { start_index = start_index + 1; end_index = end_index + 1; } bool zero_dim_condition = ((stride_index < 0 && (start_index <= end_index)) || (stride_index > 0 && (start_index >= end_index))); PADDLE_ENFORCE_EQ(zero_dim_condition, false, "starts and end must meet requirement in different " "stride conditiont"); int left = std::max(0, std::min(start_index, end_index)); int right = std::min(axis_size, std::max(start_index, end_index)); int step = std::abs(stride_index); auto out_dims_index = (std::abs(right - left) + step - 1) / step; out_dims_vector[axes_index] = out_dims_index; } } static void StridedSliceFunctor(int* starts, int* ends, int* strides, int* axes, int* reverse_axis, const framework::DDim dims, const size_t size) { for (size_t axis = 0; axis < size; axis++) { int axis_size = dims[axes[axis]]; int axis_index = axis; if (axis_size < 0) { starts[axis_index] = 0; ends[axis_index] = 1; strides[axis_index] = 1; } // stride must not be zero if (starts[axis_index] < 0) { starts[axis_index] = starts[axis_index] + axis_size; } if (ends[axis_index] < 0) { ends[axis_index] = ends[axis_index] + axis_size; } if (strides[axis_index] < 0) { reverse_axis[axis_index] = 1; strides[axis_index] = -strides[axis_index]; if (starts[axis_index] > ends[axis_index]) { // swap the reverse starts[axis_index] = starts[axis_index] + 1; ends[axis_index] = ends[axis_index] + 1; } std::swap(starts[axis_index], ends[axis_index]); } else { reverse_axis[axis_index] = 0; strides[axis_index] = strides[axis_index]; } } } template class StridedSliceKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { int rank = ctx.Input("Input")->dims().size(); switch (rank) { case 1: StridedSliceCompute<1>(ctx); break; case 2: StridedSliceCompute<2>(ctx); break; case 3: StridedSliceCompute<3>(ctx); break; case 4: StridedSliceCompute<4>(ctx); break; case 5: StridedSliceCompute<5>(ctx); break; case 6: StridedSliceCompute<6>(ctx); break; } } private: template void StridedSliceCompute(const framework::ExecutionContext& context) const { auto& place = *context.template device_context().eigen_device(); auto in = context.Input("Input"); auto out = context.Output("Out"); auto in_dims = in->dims(); auto starts = context.Attr>("starts"); auto ends = context.Attr>("ends"); auto strides = context.Attr>("strides"); auto axes = context.Attr>("axes"); auto infer_flags = context.Attr>("infer_flags"); auto starts_indices = Eigen::DSizes(); auto ends_indices = Eigen::DSizes(); auto strides_indices = Eigen::DSizes(); auto reverse_axis = Eigen::array(); auto list_new_ends_tensor = context.MultiInput("EndsTensorList"); auto list_new_starts_tensor = context.MultiInput("StartsTensorList"); auto list_new_strides_tensor = context.MultiInput("StridesTensorList"); if (list_new_starts_tensor.size() > 0) { starts = get_new_data_from_tensorlist(list_new_starts_tensor); } else if (context.HasInput("StartsTensor")) { auto* starts_tensor = context.Input("StartsTensor"); starts = get_new_data_from_tensor(starts_tensor); } if (list_new_ends_tensor.size() > 0) { ends = get_new_data_from_tensorlist(list_new_ends_tensor); } else if (context.HasInput("EndsTensor")) { auto* ends_tensor = context.Input("EndsTensor"); ends = get_new_data_from_tensor(ends_tensor); } if (list_new_strides_tensor.size() > 0) { strides = get_new_data_from_tensorlist(list_new_strides_tensor); } else if (context.HasInput("StridesTensor")) { auto* strides_tensor = context.Input("StridesTensor"); strides = get_new_data_from_tensor(strides_tensor); } std::vector out_dims_vector(in_dims.size(), -1); StridedSliceOutDims(starts, ends, strides, axes, infer_flags, in_dims, out_dims_vector.data(), axes.size(), false); framework::DDim out_dims(framework::make_ddim(out_dims_vector)); std::vector reverse_vector(starts.size(), 0); StridedSliceFunctor(starts.data(), ends.data(), strides.data(), axes.data(), reverse_vector.data(), in_dims, starts.size()); for (size_t axis = 0; axis < D; axis++) { starts_indices[axis] = 0; ends_indices[axis] = out_dims[axis]; strides_indices[axis] = 1; reverse_axis[axis] = false; } for (size_t axis = 0; axis < axes.size(); axis++) { int axis_index = axes[axis]; starts_indices[axis_index] = starts[axis]; ends_indices[axis_index] = ends[axis]; strides_indices[axis_index] = strides[axis]; reverse_axis[axis_index] = (reverse_vector[axis] == 1) ? true : false; } framework::Tensor tmp; tmp.mutable_data(out_dims, context.GetPlace()); out->Resize(out_dims); out->mutable_data(context.GetPlace()); auto in_t = framework::EigenTensor::From( *in); auto tmp_t = framework::EigenTensor::From( tmp); auto out_t = framework::EigenTensor::From( *out, out_dims); tmp_t.device(place) = in_t.stridedSlice(starts_indices, ends_indices, strides_indices); out_t.device(place) = tmp_t.reverse(reverse_axis); } }; template class StridedSliceGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { size_t rank = ctx.Input("Input")->dims().size(); switch (rank) { case 1: StridedSliceGradCompute<1>(ctx); break; case 2: StridedSliceGradCompute<2>(ctx); break; case 3: StridedSliceGradCompute<3>(ctx); break; case 4: StridedSliceGradCompute<4>(ctx); break; case 5: StridedSliceGradCompute<5>(ctx); break; case 6: StridedSliceGradCompute<6>(ctx); break; } } private: template void StridedSliceGradCompute( const framework::ExecutionContext& context) const { auto& place = *context.template device_context().eigen_device(); auto* d_input = context.Input(framework::GradVarName("Out")); auto* d_out = context.Output(framework::GradVarName("Input")); d_out->mutable_data(context.GetPlace()); auto& dev_ctx = context.template device_context(); math::SetConstant set_zero; set_zero(dev_ctx, d_out, static_cast(0)); auto out_dims = d_out->dims(); auto in_dims = d_input->dims(); auto starts = context.Attr>("starts"); auto ends = context.Attr>("ends"); auto strides = context.Attr>("strides"); auto axes = context.Attr>("axes"); auto list_new_ends_tensor = context.MultiInput("EndsTensorList"); auto list_new_starts_tensor = context.MultiInput("StartsTensorList"); auto list_new_strides_tensor = context.MultiInput("StridesTensorList"); if (list_new_starts_tensor.size() > 0) { starts = get_new_data_from_tensorlist(list_new_starts_tensor); } else if (context.HasInput("StartsTensor")) { auto* starts_tensor = context.Input("StartsTensor"); starts = get_new_data_from_tensor(starts_tensor); } if (list_new_ends_tensor.size() > 0) { ends = get_new_data_from_tensorlist(list_new_ends_tensor); } else if (context.HasInput("EndsTensor")) { auto* ends_tensor = context.Input("EndsTensor"); ends = get_new_data_from_tensor(ends_tensor); } if (list_new_strides_tensor.size() > 0) { strides = get_new_data_from_tensorlist(list_new_strides_tensor); } else if (context.HasInput("StridesTensor")) { auto* strides_tensor = context.Input("StridesTensor"); strides = get_new_data_from_tensor(strides_tensor); } auto starts_indices = Eigen::DSizes(); auto ends_indices = Eigen::DSizes(); auto strides_indices = Eigen::DSizes(); auto reverse_axis = Eigen::array(); std::vector reverse_vector(starts.size(), 0); StridedSliceFunctor(starts.data(), ends.data(), strides.data(), axes.data(), reverse_vector.data(), out_dims, starts.size()); for (size_t axis = 0; axis < D; axis++) { starts_indices[axis] = 0; ends_indices[axis] = out_dims[axis]; strides_indices[axis] = 1; } for (size_t axis = 0; axis < axes.size(); axis++) { int axis_index = axes[axis]; starts_indices[axis_index] = starts[axis]; ends_indices[axis_index] = ends[axis]; strides_indices[axis_index] = strides[axis]; reverse_axis[axis_index] = (reverse_vector[axis] == 1) ? true : false; } framework::Tensor reverse_input; reverse_input.mutable_data(in_dims, context.GetPlace()); auto in_t = framework::EigenTensor::From( *d_input); auto reverse_in_t = framework::EigenTensor::From( reverse_input); auto out_t = framework::EigenTensor::From( *d_out, out_dims); reverse_in_t.device(place) = in_t.reverse(reverse_axis); out_t.stridedSlice(starts_indices, ends_indices, strides_indices) .device(place) = reverse_in_t; } }; } // namespace operators } // namespace paddle