paddle_mkldnn_quantizer_config.h 5.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
14 15 16 17 18 19 20 21 22 23 24

///
/// \file paddle_mkldnn_quantizer_config.h
///
/// \brief Mkldnn quantizer config.
///
/// \author paddle-infer@baidu.com
/// \date 2020-01-01
/// \since 1.7.0
///

25 26 27 28 29 30 31 32 33 34 35 36 37
#pragma once

#include <cassert>
#include <map>
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>

#include "paddle_api.h"  // NOLINT

namespace paddle {

38 39 40
///
/// \brief Algorithms for finding scale of quantized Tensors.
///
41
enum class ScaleAlgo {
42 43 44 45 46 47
  NONE,      ///< Do not compute scale
  MAX,       ///< Find scale based on the max absolute value
  MAX_CH,    ///< Find scale based on the max absolute value per output channel
  MAX_CH_T,  ///< Find scale based on the max absolute value per output channel
             ///< of a transposed tensor
  KL,        ///< Find scale based on KL Divergence
48 49
};

50 51 52 53 54 55 56 57 58 59 60 61
///
/// \class MkldnnQuantizerConfig
///
/// \brief Config for mkldnn quantize.
///
/// The MkldnnQuantizerConfig is used to configure Mkldnn's quantization
/// parameters, including scale algorithm, warmup data, warmup batch size,
/// quantized op list, etc.
///
/// It is not recommended to use this config directly, please refer to
/// AnalysisConfig::mkldnn_quantizer_config()
///
62
struct MkldnnQuantizerConfig {
63 64 65
  ///
  /// \brief Construct a new Mkldnn Quantizer Config object
  ///
66 67
  MkldnnQuantizerConfig();

68 69 70 71 72 73 74 75 76 77
  ///
  /// \brief Set the scale algo
  ///
  /// Specify a quantization algorithm for a connection (input/output) of the
  /// operator type.
  /// \param[in] op_type_name the operator's name.
  /// \param[in] conn_name name of the connection (input/output) of the
  /// operator.
  /// \param[in] algo the algorithm for computing scale.
  ///
78 79 80 81 82
  void SetScaleAlgo(std::string op_type_name, std::string conn_name,
                    ScaleAlgo algo) {
    rules_[op_type_name][conn_name] = algo;
  }

83 84 85 86 87 88 89 90 91 92 93
  ///
  /// \brief Get the scale algo
  ///
  /// Get the quantization algorithm for a connection (input/output) of the
  /// operator type.
  ///
  /// \param[in] op_type_name the operator's name.
  /// \param[in] conn_name name of the connection (input/output) of the
  /// operator.
  /// \return the scale algo.
  ///
94 95 96
  ScaleAlgo scale_algo(const std::string& op_type_name,
                       const std::string& conn_name) const;

97 98 99 100 101 102 103
  ///
  /// \brief Set the warmup data
  ///
  /// Set the batch of data to be used for warm-up iteration.
  ///
  /// \param[in] data batch of data.
  ///
104 105 106 107
  void SetWarmupData(std::shared_ptr<std::vector<PaddleTensor>> data) {
    warmup_data_ = data;
  }

108 109 110 111 112 113 114
  ///
  /// \brief Get the warmup data
  ///
  /// Get the batch of data used for warm-up iteration.
  ///
  /// \return the warm up data
  ///
115 116 117 118
  std::shared_ptr<std::vector<PaddleTensor>> warmup_data() const {
    return warmup_data_;
  }

119 120 121 122 123 124 125
  ///
  /// \brief Set the warmup batch size
  ///
  /// Set the batch size for warm-up iteration.
  ///
  /// \param[in] batch_size warm-up batch size
  ///
126 127
  void SetWarmupBatchSize(int batch_size) { warmup_bs_ = batch_size; }

128 129 130 131 132 133
  ///
  /// \brief Get the warmup batch size
  ///
  /// Get the batch size for warm-up iteration.
  ///
  /// \return the warm up batch size
134 135
  int warmup_batch_size() const { return warmup_bs_; }

136 137 138 139 140 141 142
  ///
  /// \brief Set quantized op list
  ///
  /// In the quantization process, set the op list that supports quantization
  ///
  /// \param[in] op_list List of quantized ops
  ///
143 144 145 146
  void SetEnabledOpTypes(std::unordered_set<std::string> op_list) {
    enabled_op_types_ = op_list;
  }

147 148 149 150 151
  ///
  /// \brief Get quantized op list
  ///
  /// \return list of quantized ops
  ///
152 153 154 155
  const std::unordered_set<std::string>& enabled_op_types() const {
    return enabled_op_types_;
  }

156 157 158 159 160
  ///
  /// \brief Set the excluded op ids
  ///
  /// \param[in] op_ids_list excluded op ids
  ///
161 162 163 164
  void SetExcludedOpIds(std::unordered_set<int> op_ids_list) {
    excluded_op_ids_ = op_ids_list;
  }

165 166 167 168 169
  ///
  /// \brief Get the excluded op ids
  ///
  /// \return exclude op ids
  ///
170 171 172 173
  const std::unordered_set<int>& excluded_op_ids() const {
    return excluded_op_ids_;
  }

174 175 176 177 178
  ///
  /// \brief Set default scale algorithm
  ///
  /// \param[in] algo Method for calculating scale in quantization process
  ///
179 180
  void SetDefaultScaleAlgo(ScaleAlgo algo) { default_scale_algo_ = algo; }

181 182 183 184 185 186
  ///
  /// \brief Get default scale algorithm
  ///
  /// \return Method for calculating scale in quantization
  /// process
  ///
187 188 189 190 191 192 193 194 195 196 197 198
  ScaleAlgo default_scale_algo() const { return default_scale_algo_; }

 protected:
  std::map<std::string, std::map<std::string, ScaleAlgo>> rules_;
  std::unordered_set<std::string> enabled_op_types_;
  std::unordered_set<int> excluded_op_ids_;
  std::shared_ptr<std::vector<PaddleTensor>> warmup_data_;
  int warmup_bs_{1};
  ScaleAlgo default_scale_algo_{ScaleAlgo::MAX};
};

}  // namespace paddle