// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include #include #include "paddle/fluid/lite/core/compatible_tensor.h" #include "paddle/fluid/lite/core/kernel.h" #include "paddle/fluid/lite/core/op_lite.h" #include "paddle/fluid/lite/core/scope.h" #include "paddle/fluid/lite/operators/op_params.h" #include "paddle/fluid/lite/utils/all.h" namespace paddle { namespace lite { namespace operators { inline int ConvOutputSize(int input_size, int filter_size, int dilation, int padding, int stride) { const int dkernel = dilation * (filter_size - 1) + 1; int output_size = (input_size + 2 * padding - dkernel) / stride + 1; CHECK_OR_FALSE(output_size > 0); return output_size; } inline bool IsExpand(const std::vector& filter_dim, const std::vector& strides, const std::vector& paddings, const std::vector& dilations) { bool filter_1 = true, strides_1 = true, padding_0 = true, dilation_1 = true; for (size_t j = 0; j < strides.size(); ++j) { filter_1 = filter_1 && (static_cast(filter_dim[j + 2]) == 1); strides_1 = strides_1 && (strides[j] == 1); padding_0 = padding_0 && (paddings[j] == 0); dilation_1 = dilation_1 && (dilations[j] == 1); } return !(filter_1 && strides_1 && padding_0 && dilation_1); } class ConvOpLite : public OpLite { public: ConvOpLite() {} explicit ConvOpLite(const std::string& type) : OpLite(type) {} bool CheckShape() const override; bool InferShape() const override; void AttachKernel(KernelBase* kernel) override { kernel->SetParam(param_); } // TODO(Superjomn) replace framework::OpDesc with a lite one. bool AttachImpl(const cpp::OpDesc& op_desc, lite::Scope* scope) override { auto X = op_desc.Input("Input").front(); auto Filter = op_desc.Input("Filter").front(); auto Out = op_desc.Output("Output").front(); param_.x = scope->FindVar(X)->GetMutable(); param_.filter = scope->FindVar(Filter)->GetMutable(); param_.output = scope->FindVar(Out)->GetMutable(); std::vector input_arg_names = op_desc.InputArgumentNames(); if (std::find(input_arg_names.begin(), input_arg_names.end(), "Bias") != input_arg_names.end()) { auto bias_arguments = op_desc.Input("Bias"); if (bias_arguments.size() != 0) { auto bias_var = scope->FindVar(bias_arguments.front()); if (bias_var != nullptr) { param_.bias = &bias_var->Get(); } } } if (std::find(input_arg_names.begin(), input_arg_names.end(), "ResidualData") != input_arg_names.end()) { auto res_argument = op_desc.Input("ResidualData"); if (res_argument.size() != 0) { auto residual_data_var = scope->FindVar(res_argument.front()); if (residual_data_var != nullptr) { param_.residualData = &residual_data_var->Get(); } } } param_.strides = op_desc.GetAttr>("strides"); param_.paddings = op_desc.GetAttr>("paddings"); param_.groups = op_desc.GetAttr("groups"); param_.dilations = op_desc.GetAttr>("dilations"); return true; } std::string DebugString() const override { return "conv2d"; } private: mutable ConvParam param_; }; } // namespace operators } // namespace lite } // namespace paddle