/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/strided_memcpy.h" namespace paddle { namespace operators { // Internal template using EigenTensor = framework::EigenTensor; using framework::Tensor; inline std::vector get_new_data( const std::vector& list_new_tensor) { // get tensor from std::vector vec_new_data; for (size_t i = 0; i < list_new_tensor.size(); ++i) { auto tensor = list_new_tensor[i]; PADDLE_ENFORCE_EQ( tensor->dims(), framework::make_ddim({1}), "The tensor's shape in list of Op(crop_tensor) should be [1]."); if (platform::is_gpu_place(tensor->place())) { framework::Tensor temp; TensorCopySync(*tensor, platform::CPUPlace(), &temp); vec_new_data.push_back(static_cast(*temp.data())); } else { vec_new_data.push_back(static_cast(*tensor->data())); } } return vec_new_data; } static framework::DDim ValidateShape(const std::vector shape, const std::vector offsets, const framework::DDim& in_dims) { auto in_dim_size = in_dims.size(); auto shape_size = shape.size(); PADDLE_ENFORCE_EQ( in_dim_size, shape_size, "Attr(shape)'s size of Op(crop_tensor) should be equal " "to that of input Tensor. " "Please check the Attr(shape)'s size of Op(fluid.layers.crop_tensor)."); std::vector output_shape(shape.size(), 0); for (size_t i = 0; i < shape.size(); ++i) { if (shape[i] <= 0 && in_dims[i] > 0) { PADDLE_ENFORCE_NE( shape[i], 0, "The element in Attr(shape) of Op(crop_tensor) should not be zero."); PADDLE_ENFORCE_EQ(shape[i], -1, "When the element in Attr(shape) of Op(crop_tensor) is " "negative, only -1 is supported."); output_shape[i] = in_dims[i] - offsets[i]; } else { output_shape[i] = static_cast(shape[i]); } } return framework::make_ddim(output_shape); } static std::vector GetShape(const framework::ExecutionContext& ctx) { std::vector res; int rank = ctx.Input("X")->dims().size(); auto list_new_shape_tensor = ctx.MultiInput("ShapeTensor"); if (list_new_shape_tensor.size() > 0) { // have offsets tensor list PADDLE_ENFORCE_EQ(list_new_shape_tensor.size(), rank, "Input(ShapeTensor)'s length of Op(crop_tensor) should " "be equal to dimension size of input tensor."); res = get_new_data(list_new_shape_tensor); return res; } auto* shape_tensor = ctx.HasInput("Shape") ? ctx.Input("Shape") : nullptr; if (shape_tensor) { auto* shape_data = shape_tensor->data(); framework::Tensor cpu_shape_tensor; if (platform::is_gpu_place(shape_tensor->place())) { TensorCopySync(*shape_tensor, platform::CPUPlace(), &cpu_shape_tensor); shape_data = cpu_shape_tensor.data(); } res = std::vector(shape_data, shape_data + shape_tensor->numel()); } return res; } static std::vector GetOffsets(const framework::ExecutionContext& ctx) { std::vector res; int rank = ctx.Input("X")->dims().size(); auto list_new_offsets_tensor = ctx.MultiInput("OffsetsTensor"); if (list_new_offsets_tensor.size() > 0) { // have offsets tensor list res = get_new_data(list_new_offsets_tensor); return res; } if (ctx.HasInput("Offsets")) { PADDLE_ENFORCE_EQ( ctx.Attr>("offsets").empty(), true, "Input 'Offsets' and attribute 'offsets' should not be used " "at the same time."); const auto* offsets_tensor = ctx.Input("Offsets"); PADDLE_ENFORCE_EQ(offsets_tensor->dims().size(), 1); PADDLE_ENFORCE_EQ( rank, offsets_tensor->dims()[0], "Offsets size should be equal to dimension size of input tensor."); const int* offsets_data; framework::Tensor cpu_tmp_tensor; if (platform::is_cpu_place(offsets_tensor->place())) { offsets_data = offsets_tensor->data(); } else { framework::TensorCopySync(*offsets_tensor, platform::CPUPlace(), &cpu_tmp_tensor); offsets_data = cpu_tmp_tensor.data(); } res = std::vector(offsets_data, offsets_data + rank); } else { res = ctx.Attr>("offsets"); PADDLE_ENFORCE_EQ( rank, static_cast(res.size()), "Offsets size should be equal to dimension size of input tensor."); } return res; } template void CropTensorFunction(const framework::ExecutionContext& context) { auto* x = context.Input("X"); auto* out = context.Output("Out"); auto x_dims = x->dims(); auto out_dims = out->dims(); // get shape from Input(ShapeTensor) of Input(Shape) std::vector shape = GetShape(context); // out_dims setted by arrt(shape) if (shape.size() == 0) { for (size_t i = 0; i < out_dims.size(); ++i) { shape.push_back(out_dims[i]); } } auto offsets = GetOffsets(context); out_dims = ValidateShape(shape, offsets, x->dims()); out->mutable_data(out_dims, context.GetPlace()); for (size_t i = 0; i < offsets.size(); ++i) { PADDLE_ENFORCE_LE( offsets[i] + shape[i], x_dims[i], "The sum of the Attr(offsets) and Attr(shape) of Op(crop_tensor) " "should be less than or equal to corresponding input dimension size."); } auto x_tensor = EigenTensor::From(*x); auto out_tensor = EigenTensor::From(*out); Eigen::array e_offsets; Eigen::array e_shape; for (size_t i = 0; i < D; ++i) { e_offsets[i] = offsets[i]; e_shape[i] = out->dims()[i]; } auto& place = *context.template device_context().eigen_device(); out_tensor.device(place) = x_tensor.slice(e_offsets, e_shape); } template class CropTensorKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { int rank = context.Input("X")->dims().size(); switch (rank) { case 1: CropTensorFunction(context); break; case 2: CropTensorFunction(context); break; case 3: CropTensorFunction(context); break; case 4: CropTensorFunction(context); break; case 5: CropTensorFunction(context); break; case 6: CropTensorFunction(context); break; default: PADDLE_THROW( "CropTensorOp only support tensors with no more than 6 " "dimensions."); } } }; template void CropTensorGradFunction(const framework::ExecutionContext& context) { auto* d_x = context.Output(framework::GradVarName("X")); auto* x = context.Input("X"); if (d_x != nullptr) { auto* d_out = context.Input(framework::GradVarName("Out")); d_x->mutable_data(x->dims(), context.GetPlace()); auto offsets = GetOffsets(context); Eigen::array, D> paddings; for (size_t i = 0; i < D; ++i) { paddings[i].first = offsets[i]; paddings[i].second = d_x->dims()[i] - d_out->dims()[i] - offsets[i]; } auto d_x_tensor = EigenTensor::From(*d_x); auto d_out_tensor = EigenTensor::From(*d_out); d_x_tensor.device( *context.template device_context().eigen_device()) = d_out_tensor.pad(paddings, 0); } } template class CropTensorGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { size_t rank = context.Input(framework::GradVarName("Out"))->dims().size(); switch (rank) { case 1: CropTensorGradFunction(context); break; case 2: CropTensorGradFunction(context); break; case 3: CropTensorGradFunction(context); break; case 4: CropTensorGradFunction(context); break; case 5: CropTensorGradFunction(context); break; case 6: CropTensorGradFunction(context); break; default: PADDLE_THROW( "CropTensorOp only support tensors with no more than 6 " "dimensions."); } } }; } // namespace operators } // namespace paddle