quanter.py 14.9 KB
Newer Older
F
ftian 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2019  PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import copy
16 17
import logging

F
ftian 已提交
18 19 20 21 22 23 24
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
S
slf12 已提交
25
from paddle.fluid.contrib.slim.quantization import PostTrainingQuantization
S
slf12 已提交
26
from paddle.fluid.contrib.slim.quantization import AddQuantDequantPass
F
ftian 已提交
27 28
from paddle.fluid import core

29 30 31
from ..common import get_logger
_logger = get_logger(__name__, level=logging.INFO)

S
slf12 已提交
32 33 34 35
WEIGHT_QUANTIZATION_TYPES = [
    'abs_max', 'channel_wise_abs_max', 'range_abs_max',
    'moving_average_abs_max'
]
36 37
WEIGHT_QUANTIZATION_TYPES_TENSORRT = ['channel_wise_abs_max']

S
slf12 已提交
38 39 40
ACTIVATION_QUANTIZATION_TYPES = [
    'abs_max', 'range_abs_max', 'moving_average_abs_max'
]
41 42 43 44 45

ACTIVATION_QUANTIZATION_TYPES_TENSORRT = [
    'range_abs_max', 'moving_average_abs_max'
]

F
ftian 已提交
46
VALID_DTYPES = ['int8']
47 48 49 50 51 52 53
TRANSFORM_PASS_OP_TYPES = QuantizationTransformPass._supported_quantizable_op_type
QUANT_DEQUANT_PASS_OP_TYPES = AddQuantDequantPass._supported_quantizable_op_type + \
        AddQuantDequantPass._activation_type
TENSORRT_OP_TYPES = [
    'mul', 'conv2d', 'pool2d', 'depthwise_conv2d', 'elementwise_add',
    'leaky_relu'
]
F
ftian 已提交
54 55

_quant_config_default = {
56 57 58 59
    # weight quantize type, default is 'channel_wise_abs_max'
    'weight_quantize_type': 'channel_wise_abs_max',
    # activation quantize type, default is 'moving_average_abs_max'
    'activation_quantize_type': 'moving_average_abs_max',
F
ftian 已提交
60 61 62 63 64 65 66
    # weight quantize bit num, default is 8
    'weight_bits': 8,
    # activation quantize bit num, default is 8
    'activation_bits': 8,
    # ops of name_scope in not_quant_pattern list, will not be quantized
    'not_quant_pattern': ['skip_quant'],
    # ops of type in quantize_op_types, will be quantized
67
    'quantize_op_types': ['conv2d', 'depthwise_conv2d', 'mul'],
F
ftian 已提交
68 69 70 71 72 73
    # data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
    'dtype': 'int8',
    # window size for 'range_abs_max' quantization. defaulf is 10000
    'window_size': 10000,
    # The decay coefficient of moving average, default is 0.9
    'moving_rate': 0.9,
74 75 76 77
    # if True, 'quantize_op_types' will be TENSORRT_OP_TYPES
    'for_tensorrt': False,
    # if True, 'quantoze_op_types' will be TRANSFORM_PASS_OP_TYPES + QUANT_DEQUANT_PASS_OP_TYPES 
    'is_full_quantize': False
F
ftian 已提交
78 79 80 81 82
}


def _parse_configs(user_config):
    """
83
    check if user's configs are valid.
F
ftian 已提交
84
    Args:
85
        user_config(dict): user's config.
F
ftian 已提交
86 87 88 89 90 91 92
    Return:
        configs(dict): final configs will be used.
    """

    configs = copy.deepcopy(_quant_config_default)
    configs.update(user_config)

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
    assert isinstance(configs['for_tensorrt'], bool) and isinstance(
        configs['is_full_quantize'],
        bool), "'for_tensorrt' and 'is_full_quantize' must both be bool'"

    # check if configs is valid
    if configs['for_tensorrt']:
        weight_types = WEIGHT_QUANTIZATION_TYPES_TENSORRT
        activation_types = ACTIVATION_QUANTIZATION_TYPES_TENSORRT
        platform = 'TensorRT'
    else:
        weight_types = WEIGHT_QUANTIZATION_TYPES
        activation_types = WEIGHT_QUANTIZATION_TYPES
        platform = 'PaddleLite'
    assert configs['weight_quantize_type'] in weight_types, \
        "Unknown weight_quantize_type: {}. {} only supports {} ".format(configs['weight_quantize_type'],
                platform, weight_types)
F
ftian 已提交
109

110 111 112
    assert configs['activation_quantize_type'] in activation_types, \
        "Unknown activation_quantize_type: {}. {} only supports {}".format(configs['activation_quantize_type'],
                platform, activation_types)
F
ftian 已提交
113 114 115 116 117 118 119 120 121 122 123 124 125

    assert isinstance(configs['weight_bits'], int), \
        "weight_bits must be int value."

    assert (configs['weight_bits'] >= 1 and configs['weight_bits'] <= 16), \
        "weight_bits should be between 1 and 16."

    assert isinstance(configs['activation_bits'], int), \
        "activation_bits must be int value."

    assert (configs['activation_bits'] >= 1 and configs['activation_bits'] <= 16), \
        "activation_bits should be between 1 and 16."

126 127
    assert isinstance(configs['not_quant_pattern'], (list, str)), \
        "not_quant_pattern must be list or str"
F
ftian 已提交
128 129 130 131

    assert isinstance(configs['quantize_op_types'], list), \
        "quantize_op_types must be a list"

132 133 134 135 136 137 138 139 140 141 142 143
    if configs['for_tensorrt']:
        configs['quantize_op_types'] = TENSORRT_OP_TYPES
    elif configs['is_full_quantize']:
        configs[
            'quantize_op_types'] = TRANSFORM_PASS_OP_TYPES + QUANT_DEQUANT_PASS_OP_TYPES
    else:
        for op_type in configs['quantize_op_types']:
            assert (op_type in QUANT_DEQUANT_PASS_OP_TYPES) or (
                op_type in TRANSFORM_PASS_OP_TYPES), "{} is not support, \
                        now support op types are {}".format(
                    op_type,
                    TRANSFORM_PASS_OP_TYPES + QUANT_DEQUANT_PASS_OP_TYPES)
S
slf12 已提交
144

F
ftian 已提交
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
    assert isinstance(configs['dtype'], str), \
        "dtype must be a str."

    assert (configs['dtype'] in VALID_DTYPES), \
        "dtype can only be " + " ".join(VALID_DTYPES)

    assert isinstance(configs['window_size'], int), \
        "window_size must be int value, window size for 'range_abs_max' quantization, default is 10000."

    assert isinstance(configs['moving_rate'], float), \
        "moving_rate must be float value, The decay coefficient of moving average, default is 0.9."

    return configs


160
def quant_aware(program, place, config=None, scope=None, for_test=False):
F
ftian 已提交
161 162 163
    """
    add trainable quantization ops in program.
    Args:
164 165 166 167 168 169
        program(fluid.Program): program to quant
        place(fluid.CPUPlace or fluid.CUDAPlace): CPU or CUDA device
        config(dict, optional): configs for quantization. if None, will use default config. Default is None.
        scope(fluid.Scope): the scope to store var, it should be program's scope. if None, will use fluid.global_scope().
            default is None.
        for_test(bool): if program is test program, set True when program is for test, False when program is for train. Default is False.
F
ftian 已提交
170 171 172 173 174
    Return:
        fluid.Program: user can finetune this quantization program to enhance the accuracy.
    """

    scope = fluid.global_scope() if not scope else scope
175 176 177 178 179 180
    if config is None:
        config = _quant_config_default
    else:
        assert isinstance(config, dict), "config must be dict"
        config = _parse_configs(config)
    _logger.info("quant_aware config {}".format(config))
F
ftian 已提交
181 182 183

    main_graph = IrGraph(core.Graph(program.desc), for_test=for_test)

S
slf12 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
    transform_pass_ops = []
    quant_dequant_ops = []
    for op_type in config['quantize_op_types']:
        if op_type in TRANSFORM_PASS_OP_TYPES:
            transform_pass_ops.append(op_type)
        elif op_type in QUANT_DEQUANT_PASS_OP_TYPES:
            quant_dequant_ops.append(op_type)
    if len(transform_pass_ops) > 0:
        transform_pass = QuantizationTransformPass(
            scope=scope,
            place=place,
            weight_bits=config['weight_bits'],
            activation_bits=config['activation_bits'],
            activation_quantize_type=config['activation_quantize_type'],
            weight_quantize_type=config['weight_quantize_type'],
            window_size=config['window_size'],
            moving_rate=config['moving_rate'],
            quantizable_op_type=transform_pass_ops,
            skip_pattern=config['not_quant_pattern'])

        transform_pass.apply(main_graph)

    if len(quant_dequant_ops) > 0:
        quant_dequant_pass = AddQuantDequantPass(
            scope=scope,
            place=place,
            moving_rate=config['moving_rate'],
            quant_bits=config['activation_bits'],
            skip_pattern=config['not_quant_pattern'],
            quantizable_op_type=quant_dequant_ops)
        quant_dequant_pass.apply(main_graph)
F
ftian 已提交
215 216 217 218 219 220 221 222

    if for_test:
        quant_program = main_graph.to_program()
    else:
        quant_program = fluid.CompiledProgram(main_graph.graph)
    return quant_program


S
slf12 已提交
223 224
def quant_post(executor,
               model_dir,
S
slf12 已提交
225
               quantize_model_path,
S
slf12 已提交
226 227 228 229
               sample_generator,
               model_filename=None,
               params_filename=None,
               batch_size=16,
S
slf12 已提交
230 231 232
               batch_nums=None,
               scope=None,
               algo='KL',
233 234 235 236
               quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
               is_full_quantize=False,
               is_use_cache_file=False,
               cache_dir="./temp_post_training"):
F
ftian 已提交
237
    """
S
slf12 已提交
238
    The function utilizes post training quantization method to quantize the 
S
slf12 已提交
239 240 241 242
    fp32 model. It uses calibrate data to calculate the scale factor of 
    quantized variables, and inserts fake quant/dequant op to obtain the 
    quantized model.

F
ftian 已提交
243
    Args:
S
slf12 已提交
244 245
        executor(fluid.Executor): The executor to load, run and save the 
            quantized model.
S
slf12 已提交
246 247 248 249 250 251 252
        model_dir(str): The path of fp32 model that will be quantized, and 
            the model and params that saved by fluid.io.save_inference_model 
            are under the path.
        quantize_model_path(str): The path to save quantized model using api
            fluid.io.save_inference_model.
        sample_generator(Python Generator): The sample generator provides 
            calibrate data for DataLoader, and it only returns a sample every time.
S
slf12 已提交
253 254 255
        model_filename(str, optional): The name of model file. If parameters 
            are saved in separate files, set it as 'None'. Default is 'None'.
        params_filename(str, optional): The name of params file.
S
slf12 已提交
256 257
                When all parameters are saved in a single file, set it 
                as filename. If parameters are saved in separate files, 
S
slf12 已提交
258 259
                set it as 'None'. Default is 'None'.
        batch_size(int, optional): The batch size of DataLoader, default is 16.
S
slf12 已提交
260
        batch_nums(int, optional): If batch_nums is not None, the number of calibrate 
S
slf12 已提交
261 262 263 264
                        data is 'batch_size*batch_nums'. If batch_nums is None, use all data
                        generated by sample_generator  as calibrate data.
        scope(fluid.Scope, optional): The scope to run program, use it to load 
                        and save variables. If scope is None, will use fluid.global_scope().
S
slf12 已提交
265 266
        algo(str, optional): If algo=KL, use KL-divergenc method to 
                        get the more precise scale factor. If algo='direct', use 
S
slf12 已提交
267 268
                        abs_max method to get the scale factor. Default is 'KL'.
        quantizable_op_type(list[str], optional): The list of op types
S
slf12 已提交
269
                        that will be quantized. Default is ["conv2d", "depthwise_conv2d", 
S
slf12 已提交
270
                        "mul"].
271 272 273 274 275
        is_full_quantize(bool): if True, apply quantization to all supported quantizable op type.
                        If False, only apply quantization to the input quantizable_op_type. Default is False.
        is_use_cache_file(bool): If False, all temp data will be saved in memory. If True,
                                all temp data will be saved to disk. Defalut is False.
        cache_dir(str): When 'is_use_cache_file' is True, temp data will be save in 'cache_dir'. Default is './temp_post_training'.
S
slf12 已提交
276 277
    Returns:
        None
F
ftian 已提交
278
    """
S
slf12 已提交
279
    post_training_quantization = PostTrainingQuantization(
S
slf12 已提交
280 281 282 283 284 285 286 287 288 289
        executor=executor,
        sample_generator=sample_generator,
        model_dir=model_dir,
        model_filename=model_filename,
        params_filename=params_filename,
        batch_size=batch_size,
        batch_nums=batch_nums,
        scope=scope,
        algo=algo,
        quantizable_op_type=quantizable_op_type,
290 291 292
        is_full_quantize=is_full_quantize,
        is_use_cache_file=is_use_cache_file,
        cache_dir=cache_dir)
S
slf12 已提交
293 294
    post_training_quantization.quantize()
    post_training_quantization.save_quantized_model(quantize_model_path)
F
ftian 已提交
295 296


297
def convert(program, place, config=None, scope=None, save_int8=False):
F
ftian 已提交
298
    """
299
    change quantization ops order in program. return program that can used by Paddle-Lite.
F
ftian 已提交
300
    Args:
301 302 303 304 305 306 307 308
        program(fluid.Program): program that returned by quant_aware
        place(fluid.CPUPlace or fluid.CUDAPlace): CPU or CUDA device
        scope(fluid.Scope, optional):  the scope to store var, it should be program's scope. if None, will use fluid.global_scope().
            default is None.
        config(dict, optional): configs for convert. if set None, will use default config. Default is None.\
                It must be same with config that used in 'quant_aware'.
        save_int8: if return int8 freezed program. Int8 program can only be used to check size of model weights. \
                It cannot be used in Fluid or Paddle-Lite.
F
ftian 已提交
309
    Return:
310
        freezed_program(fluid.Program): freezed program which can be used for inference.
F
ftian 已提交
311
                       parameters is float32 type, but it's value in int8 range.
312 313 314
        freezed_program_int8(fluid.Program): freezed int8 program.
        when save_int8 is False, return freezed_program.
        when save_int8 is True, return freezed_program and freezed_program_int8
F
ftian 已提交
315
    """
S
slf12 已提交
316
    scope = fluid.global_scope() if not scope else scope
317 318 319 320 321 322 323 324

    if config is None:
        config = _quant_config_default
    else:
        assert isinstance(config, dict), "config must be dict"
        config = _parse_configs(config)
    _logger.info("convert config {}".format(config))

F
ftian 已提交
325
    test_graph = IrGraph(core.Graph(program.desc), for_test=True)
326 327 328 329
    support_op_types = []
    for op in config['quantize_op_types']:
        if op in QuantizationFreezePass._supported_quantizable_op_type:
            support_op_types.append(op)
F
ftian 已提交
330 331 332 333 334 335

    # Freeze the graph after training by adjusting the quantize
    # operators' order for the inference.
    freeze_pass = QuantizationFreezePass(
        scope=scope,
        place=place,
336 337 338 339
        weight_bits=config['weight_bits'],
        activation_bits=config['activation_bits'],
        weight_quantize_type=config['weight_quantize_type'],
        quantizable_op_type=support_op_types)
F
ftian 已提交
340 341 342 343 344
    freeze_pass.apply(test_graph)
    freezed_program = test_graph.to_program()

    if save_int8:
        convert_int8_pass = ConvertToInt8Pass(
345 346 347
            scope=fluid.global_scope(),
            place=place,
            quantizable_op_type=support_op_types)
F
ftian 已提交
348 349 350 351 352
        convert_int8_pass.apply(test_graph)
        freezed_program_int8 = test_graph.to_program()
        return freezed_program, freezed_program_int8
    else:
        return freezed_program