paddle_mkldnn_quantizer_config.h 5.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
14 15 16 17 18 19 20 21 22 23 24

///
/// \file paddle_mkldnn_quantizer_config.h
///
/// \brief Mkldnn quantizer config.
///
/// \author paddle-infer@baidu.com
/// \date 2020-01-01
/// \since 1.7.0
///

25 26 27 28 29 30 31 32 33
#pragma once

#include <cassert>
#include <map>
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>

34 35
#include "paddle_api.h"            // NOLINT
#include "paddle_infer_declare.h"  // NOLINT
36 37 38

namespace paddle {

39 40 41
///
/// \brief Algorithms for finding scale of quantized Tensors.
///
42
enum class ScaleAlgo {
43 44 45 46 47
  NONE,      ///< Do not compute scale
  MAX,       ///< Find scale based on the max absolute value
  MAX_CH,    ///< Find scale based on the max absolute value per output channel
  MAX_CH_T,  ///< Find scale based on the max absolute value per output channel
             ///< of a transposed tensor
48 49 50
  MAX_CH_GRU,  ///< Find scale based on the max absolute value per output
               /// channel for fusion_gru/multi_gru operators
  KL,          ///< Find scale based on KL Divergence
51 52
};

53 54 55 56 57 58 59 60 61 62 63 64
///
/// \class MkldnnQuantizerConfig
///
/// \brief Config for mkldnn quantize.
///
/// The MkldnnQuantizerConfig is used to configure Mkldnn's quantization
/// parameters, including scale algorithm, warmup data, warmup batch size,
/// quantized op list, etc.
///
/// It is not recommended to use this config directly, please refer to
/// AnalysisConfig::mkldnn_quantizer_config()
///
65
struct PD_INFER_DECL MkldnnQuantizerConfig {
66 67 68
  ///
  /// \brief Construct a new Mkldnn Quantizer Config object
  ///
69 70
  MkldnnQuantizerConfig();

71 72 73 74 75 76 77 78 79 80
  ///
  /// \brief Set the scale algo
  ///
  /// Specify a quantization algorithm for a connection (input/output) of the
  /// operator type.
  /// \param[in] op_type_name the operator's name.
  /// \param[in] conn_name name of the connection (input/output) of the
  /// operator.
  /// \param[in] algo the algorithm for computing scale.
  ///
81 82 83 84 85
  void SetScaleAlgo(std::string op_type_name, std::string conn_name,
                    ScaleAlgo algo) {
    rules_[op_type_name][conn_name] = algo;
  }

86 87 88 89 90 91 92 93 94 95 96
  ///
  /// \brief Get the scale algo
  ///
  /// Get the quantization algorithm for a connection (input/output) of the
  /// operator type.
  ///
  /// \param[in] op_type_name the operator's name.
  /// \param[in] conn_name name of the connection (input/output) of the
  /// operator.
  /// \return the scale algo.
  ///
97 98 99
  ScaleAlgo scale_algo(const std::string& op_type_name,
                       const std::string& conn_name) const;

100 101 102 103 104 105 106
  ///
  /// \brief Set the warmup data
  ///
  /// Set the batch of data to be used for warm-up iteration.
  ///
  /// \param[in] data batch of data.
  ///
107 108 109 110
  void SetWarmupData(std::shared_ptr<std::vector<PaddleTensor>> data) {
    warmup_data_ = data;
  }

111 112 113 114 115 116 117
  ///
  /// \brief Get the warmup data
  ///
  /// Get the batch of data used for warm-up iteration.
  ///
  /// \return the warm up data
  ///
118 119 120 121
  std::shared_ptr<std::vector<PaddleTensor>> warmup_data() const {
    return warmup_data_;
  }

122 123 124 125 126 127 128
  ///
  /// \brief Set the warmup batch size
  ///
  /// Set the batch size for warm-up iteration.
  ///
  /// \param[in] batch_size warm-up batch size
  ///
129 130
  void SetWarmupBatchSize(int batch_size) { warmup_bs_ = batch_size; }

131 132 133 134 135 136
  ///
  /// \brief Get the warmup batch size
  ///
  /// Get the batch size for warm-up iteration.
  ///
  /// \return the warm up batch size
137 138
  int warmup_batch_size() const { return warmup_bs_; }

139 140 141 142 143 144 145
  ///
  /// \brief Set quantized op list
  ///
  /// In the quantization process, set the op list that supports quantization
  ///
  /// \param[in] op_list List of quantized ops
  ///
146 147 148 149
  void SetEnabledOpTypes(std::unordered_set<std::string> op_list) {
    enabled_op_types_ = op_list;
  }

150 151 152 153 154
  ///
  /// \brief Get quantized op list
  ///
  /// \return list of quantized ops
  ///
155 156 157 158
  const std::unordered_set<std::string>& enabled_op_types() const {
    return enabled_op_types_;
  }

159 160 161 162 163
  ///
  /// \brief Set the excluded op ids
  ///
  /// \param[in] op_ids_list excluded op ids
  ///
164 165 166 167
  void SetExcludedOpIds(std::unordered_set<int> op_ids_list) {
    excluded_op_ids_ = op_ids_list;
  }

168 169 170 171 172
  ///
  /// \brief Get the excluded op ids
  ///
  /// \return exclude op ids
  ///
173 174 175 176
  const std::unordered_set<int>& excluded_op_ids() const {
    return excluded_op_ids_;
  }

177 178 179 180 181
  ///
  /// \brief Set default scale algorithm
  ///
  /// \param[in] algo Method for calculating scale in quantization process
  ///
182 183
  void SetDefaultScaleAlgo(ScaleAlgo algo) { default_scale_algo_ = algo; }

184 185 186 187 188 189
  ///
  /// \brief Get default scale algorithm
  ///
  /// \return Method for calculating scale in quantization
  /// process
  ///
190 191 192 193 194 195 196 197 198 199 200 201
  ScaleAlgo default_scale_algo() const { return default_scale_algo_; }

 protected:
  std::map<std::string, std::map<std::string, ScaleAlgo>> rules_;
  std::unordered_set<std::string> enabled_op_types_;
  std::unordered_set<int> excluded_op_ids_;
  std::shared_ptr<std::vector<PaddleTensor>> warmup_data_;
  int warmup_bs_{1};
  ScaleAlgo default_scale_algo_{ScaleAlgo::MAX};
};

}  // namespace paddle