// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include #include #include "paddle/fluid/lite/core/compatible_tensor.h" #include "paddle/fluid/lite/core/framework.pb.h" #include "paddle/fluid/lite/utils/all.h" /* * This file contains all the argument parameter data structure for operators. */ namespace paddle { namespace lite { namespace operators { using param_t = Any; /// ----------------------- Functional operators ------------------------------ struct FeedParam { const std::vector* feed_list{}; lite::Tensor* out{}; int col; }; struct FetchParam { const lite::Tensor* input{}; std::vector* fetch_list{}; int col; }; // Helper op for lite framework struct IoCopyParam { const lite::Tensor* x{}; lite::Tensor* y{}; }; /// -------------------------- NN operators ------------------------------------ struct FcParam { lite::Tensor* input{}; lite::Tensor* w{}; lite::Tensor* bias{}; lite::Tensor* output{}; lite::DDim in_mat_dims; int in_num_col_dims{1}; }; struct ReluParam { lite::Tensor* input{}; lite::Tensor* output{}; }; // For Mul Op struct MulParam { lite::Tensor* x{}; lite::Tensor* y{}; lite::Tensor* output{}; int x_num_col_dims{1}; int y_num_col_dims{1}; }; struct MulGradParam { const lite::Tensor* x{}; const lite::Tensor* y{}; const lite::Tensor* output_grad{}; lite::Tensor* x_grad{}; lite::Tensor* y_grad{}; int x_num_col_dims{1}; int y_num_col_dims{1}; }; // For Scale Op struct ScaleParam { lite::Tensor* x{}; lite::Tensor* output{}; float scale{1.}; float bias{}; bool bias_after_scale{true}; }; // For Softmax op struct SoftmaxParam { lite::Tensor* x{}; lite::Tensor* output{}; int axis{-1}; }; // For Reshape and Reshape2 Op struct ReshapeParam { const lite::Tensor* x{}; const lite::Tensor* actual_shape{nullptr}; lite::Tensor* output{}; lite::Tensor* xshape{}; std::vector shape{}; bool inplace{false}; }; // For Concat op struct ConcatParam { std::vector x{}; lite::Tensor* output{}; int axis{0}; }; // For Convolution op struct ConvParam { lite::Tensor* x{}; lite::Tensor* filter{}; const lite::Tensor* bias{}; const lite::Tensor* residualData{}; lite::Tensor* output{}; std::vector strides{1, 1}; std::vector paddings{0, 0}; int groups{1}; std::vector dilations{1, 1}; bool fuse_relu_before_depthwise_conv{false}; bool use_mkldnn{false}; bool fuse_relu{false}; // only used in mkldnn kernel bool use_quantizer{ false}; // set true for op that should be quantized, only used for cpu bool fuse_residual_connection{false}; float scale_in{1.0f}; // only used with mkl-dnn int8 float scale_out{1.0f}; // only used with mkl-dnn int8 float scale_in_eltwise{1.0f}; // only used with mkl-dnn int8 float scale_weights{1.0f}; // only used with mkl-dnn int8 bool force_fp32_output{false}; // only used in mkl-dnn int8 std::string data_format{"Anylayout"}; }; // For Pooling op struct PoolParam { lite::Tensor* x{}; lite::Tensor* output{}; std::string pooling_type{""}; std::vector ksize{}; bool global_pooling{ false}; // if true, knernel size and paddings will be ignored std::vector strides{1, 1}; std::vector paddings{0, 0}; bool exclusive{true}; bool adaptive{false}; bool ceil_mode{false}; bool use_quantizer{false}; std::string data_format{"AnyLayout"}; }; // For Dropout op struct DropoutParam { const lite::Tensor* x{}; lite::Tensor* output{}; lite::Tensor* mask{}; float dropout_prob{.5f}; bool is_test{false}; bool fix_seed{false}; int seed{0}; std::string dropout_implementation{"downgrade_in_infer"}; }; /// ----------------------- element wise operators ---------------------- struct ElementwiseParam { const lite::Tensor* X{}; const lite::Tensor* Y{}; lite::Tensor* Out{}; int axis{-1}; // for broadcasting. }; struct ElementwiseGradParam { const lite::Tensor* Y{}; const lite::Tensor* Out_grad{}; lite::Tensor* X_grad{}; lite::Tensor* Y_grad{}; int axis{-1}; // for broadcasting. }; /// ----------------------- activation operators ---------------------- struct ActivationParam { const lite::Tensor* X{}; lite::Tensor* Out{}; }; struct ActivationGradParam { const lite::Tensor* X{}; const lite::Tensor* Out{}; // for backward lite::Tensor* X_grad{}; const lite::Tensor* Out_grad{}; }; /// ----------------------- mean operators ---------------------- struct MeanParam { const lite::Tensor* X{}; lite::Tensor* Out{}; }; struct MeanGradParam { const lite::Tensor* X{}; const lite::Tensor* Out_grad{}; // for backward lite::Tensor* X_grad{}; }; /// ----------------------- fill_constant operators ---------------------- struct FillConstantParam { int dtype{framework::proto::VarType::FP32}; std::vector shape{}; float value{0.0f}; // useless for x86, keep it for compatibility bool force_cpu{false}; lite::Tensor* Out{}; }; /// ----------------------- sgd operators ---------------------- struct SGDParam { int dtype{framework::proto::VarType::FP32}; const lite::Tensor* Param{}; const lite::Tensor* LearningRate{}; const lite::Tensor* Grad{}; lite::Tensor* ParamOut{}; }; // struct BatchNormParam { lite::Tensor* x{}; lite::Tensor* bias{}; lite::Tensor* mean{}; lite::Tensor* scale{}; lite::Tensor* var{}; lite::Tensor* out{}; lite::Tensor* mean_out{}; lite::Tensor* var_out{}; lite::Tensor* saved_mean{}; lite::Tensor* saved_var{}; float eps{1e-5}; }; } // namespace operators } // namespace lite } // namespace paddle