quanter.py 21.0 KB
Newer Older
F
ftian 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2019  PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import copy
16 17
import logging

F
ftian 已提交
18 19 20 21 22 23 24
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
S
slf12 已提交
25
from paddle.fluid.contrib.slim.quantization import PostTrainingQuantization
S
slf12 已提交
26
from paddle.fluid.contrib.slim.quantization import AddQuantDequantPass
27 28
from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass
from paddle.fluid.contrib.slim.quantization import OutScaleForInferencePass
F
ftian 已提交
29
from paddle.fluid import core
L
Liufang Sang 已提交
30
from paddle.fluid.contrib.slim.quantization import WeightQuantization
F
ftian 已提交
31

32 33 34
from ..common import get_logger
_logger = get_logger(__name__, level=logging.INFO)

S
slf12 已提交
35 36 37 38
WEIGHT_QUANTIZATION_TYPES = [
    'abs_max', 'channel_wise_abs_max', 'range_abs_max',
    'moving_average_abs_max'
]
39 40
WEIGHT_QUANTIZATION_TYPES_TENSORRT = ['channel_wise_abs_max']

S
slf12 已提交
41 42 43
ACTIVATION_QUANTIZATION_TYPES = [
    'abs_max', 'range_abs_max', 'moving_average_abs_max'
]
44 45 46 47 48

ACTIVATION_QUANTIZATION_TYPES_TENSORRT = [
    'range_abs_max', 'moving_average_abs_max'
]

F
ftian 已提交
49
VALID_DTYPES = ['int8']
50
TRANSFORM_PASS_OP_TYPES = QuantizationTransformPass._supported_quantizable_op_type
51 52
QUANT_DEQUANT_PASS_OP_TYPES = AddQuantDequantPass._supported_quantizable_op_type

53 54 55 56
TENSORRT_OP_TYPES = [
    'mul', 'conv2d', 'pool2d', 'depthwise_conv2d', 'elementwise_add',
    'leaky_relu'
]
F
ftian 已提交
57 58

_quant_config_default = {
59 60 61 62
    # weight quantize type, default is 'channel_wise_abs_max'
    'weight_quantize_type': 'channel_wise_abs_max',
    # activation quantize type, default is 'moving_average_abs_max'
    'activation_quantize_type': 'moving_average_abs_max',
F
ftian 已提交
63 64 65 66 67 68 69
    # weight quantize bit num, default is 8
    'weight_bits': 8,
    # activation quantize bit num, default is 8
    'activation_bits': 8,
    # ops of name_scope in not_quant_pattern list, will not be quantized
    'not_quant_pattern': ['skip_quant'],
    # ops of type in quantize_op_types, will be quantized
70
    'quantize_op_types': ['conv2d', 'depthwise_conv2d', 'mul'],
F
ftian 已提交
71 72 73 74 75 76
    # data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
    'dtype': 'int8',
    # window size for 'range_abs_max' quantization. defaulf is 10000
    'window_size': 10000,
    # The decay coefficient of moving average, default is 0.9
    'moving_rate': 0.9,
77 78 79 80
    # if True, 'quantize_op_types' will be TENSORRT_OP_TYPES
    'for_tensorrt': False,
    # if True, 'quantoze_op_types' will be TRANSFORM_PASS_OP_TYPES + QUANT_DEQUANT_PASS_OP_TYPES 
    'is_full_quantize': False
F
ftian 已提交
81 82 83 84 85
}


def _parse_configs(user_config):
    """
86
    check if user's configs are valid.
F
ftian 已提交
87
    Args:
88
        user_config(dict): user's config.
F
ftian 已提交
89 90 91 92 93 94 95
    Return:
        configs(dict): final configs will be used.
    """

    configs = copy.deepcopy(_quant_config_default)
    configs.update(user_config)

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
    assert isinstance(configs['for_tensorrt'], bool) and isinstance(
        configs['is_full_quantize'],
        bool), "'for_tensorrt' and 'is_full_quantize' must both be bool'"

    # check if configs is valid
    if configs['for_tensorrt']:
        weight_types = WEIGHT_QUANTIZATION_TYPES_TENSORRT
        activation_types = ACTIVATION_QUANTIZATION_TYPES_TENSORRT
        platform = 'TensorRT'
    else:
        weight_types = WEIGHT_QUANTIZATION_TYPES
        activation_types = WEIGHT_QUANTIZATION_TYPES
        platform = 'PaddleLite'
    assert configs['weight_quantize_type'] in weight_types, \
        "Unknown weight_quantize_type: {}. {} only supports {} ".format(configs['weight_quantize_type'],
                platform, weight_types)
F
ftian 已提交
112

113 114 115
    assert configs['activation_quantize_type'] in activation_types, \
        "Unknown activation_quantize_type: {}. {} only supports {}".format(configs['activation_quantize_type'],
                platform, activation_types)
F
ftian 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128

    assert isinstance(configs['weight_bits'], int), \
        "weight_bits must be int value."

    assert (configs['weight_bits'] >= 1 and configs['weight_bits'] <= 16), \
        "weight_bits should be between 1 and 16."

    assert isinstance(configs['activation_bits'], int), \
        "activation_bits must be int value."

    assert (configs['activation_bits'] >= 1 and configs['activation_bits'] <= 16), \
        "activation_bits should be between 1 and 16."

129 130
    assert isinstance(configs['not_quant_pattern'], (list, str)), \
        "not_quant_pattern must be list or str"
F
ftian 已提交
131 132 133 134

    assert isinstance(configs['quantize_op_types'], list), \
        "quantize_op_types must be a list"

135 136 137 138 139 140 141 142 143 144 145 146
    if configs['for_tensorrt']:
        configs['quantize_op_types'] = TENSORRT_OP_TYPES
    elif configs['is_full_quantize']:
        configs[
            'quantize_op_types'] = TRANSFORM_PASS_OP_TYPES + QUANT_DEQUANT_PASS_OP_TYPES
    else:
        for op_type in configs['quantize_op_types']:
            assert (op_type in QUANT_DEQUANT_PASS_OP_TYPES) or (
                op_type in TRANSFORM_PASS_OP_TYPES), "{} is not support, \
                        now support op types are {}".format(
                    op_type,
                    TRANSFORM_PASS_OP_TYPES + QUANT_DEQUANT_PASS_OP_TYPES)
S
slf12 已提交
147

F
ftian 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
    assert isinstance(configs['dtype'], str), \
        "dtype must be a str."

    assert (configs['dtype'] in VALID_DTYPES), \
        "dtype can only be " + " ".join(VALID_DTYPES)

    assert isinstance(configs['window_size'], int), \
        "window_size must be int value, window size for 'range_abs_max' quantization, default is 10000."

    assert isinstance(configs['moving_rate'], float), \
        "moving_rate must be float value, The decay coefficient of moving average, default is 0.9."

    return configs


163
def quant_aware(program, place, config=None, scope=None, for_test=False):
164 165 166
    """Add quantization  and dequantization operators to "program" 
    for quantization training or testing.

F
ftian 已提交
167
    Args:
168 169 170 171 172 173 174 175 176 177 178 179 180
        program(fluid.Program): training or testing ``program``.
        place(fluid.CPUPlace or fluid.CUDAPlace): This parameter represents 
            the executor run on which device.
        config(dict, optional): configs for quantization. if None, will use default config. 
            Default: None.
        scope(fluid.Scope): Scope records the mapping between variable names and variables, 
            similar to brackets in programming languages. Usually users can use 
            `fluid.global_scope <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/executor_cn/global_scope_cn.html>`_.              When ``None`` will use `fluid.global_scope() <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/executor_cn/global_scope_cn.html>`_ . Default: ``None``.
        for_test(bool): If the 'program' parameter is a test program, this parameter should be set to ``True``. 
            Otherwise, set to ``False``.Default: False
    
    Returns:
        fluid.CompiledProgram | fluid.Program: Program with quantization and dequantization ``operators``
F
ftian 已提交
181 182 183
    """

    scope = fluid.global_scope() if not scope else scope
184 185 186 187 188 189
    if config is None:
        config = _quant_config_default
    else:
        assert isinstance(config, dict), "config must be dict"
        config = _parse_configs(config)
    _logger.info("quant_aware config {}".format(config))
F
ftian 已提交
190 191 192

    main_graph = IrGraph(core.Graph(program.desc), for_test=for_test)

S
slf12 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
    transform_pass_ops = []
    quant_dequant_ops = []
    for op_type in config['quantize_op_types']:
        if op_type in TRANSFORM_PASS_OP_TYPES:
            transform_pass_ops.append(op_type)
        elif op_type in QUANT_DEQUANT_PASS_OP_TYPES:
            quant_dequant_ops.append(op_type)
    if len(transform_pass_ops) > 0:
        transform_pass = QuantizationTransformPass(
            scope=scope,
            place=place,
            weight_bits=config['weight_bits'],
            activation_bits=config['activation_bits'],
            activation_quantize_type=config['activation_quantize_type'],
            weight_quantize_type=config['weight_quantize_type'],
            window_size=config['window_size'],
            moving_rate=config['moving_rate'],
            quantizable_op_type=transform_pass_ops,
            skip_pattern=config['not_quant_pattern'])

        transform_pass.apply(main_graph)

    if len(quant_dequant_ops) > 0:
        quant_dequant_pass = AddQuantDequantPass(
            scope=scope,
            place=place,
            moving_rate=config['moving_rate'],
            quant_bits=config['activation_bits'],
            skip_pattern=config['not_quant_pattern'],
            quantizable_op_type=quant_dequant_ops)
        quant_dequant_pass.apply(main_graph)
F
ftian 已提交
224

225 226 227 228
    out_scale_training_pass = OutScaleForTrainingPass(
        scope=scope, place=place, moving_rate=config['moving_rate'])
    out_scale_training_pass.apply(main_graph)

F
ftian 已提交
229 230 231 232 233 234 235
    if for_test:
        quant_program = main_graph.to_program()
    else:
        quant_program = fluid.CompiledProgram(main_graph.graph)
    return quant_program


S
slf12 已提交
236 237
def quant_post(executor,
               model_dir,
S
slf12 已提交
238
               quantize_model_path,
239 240
               batch_generator=None,
               sample_generator=None,
S
slf12 已提交
241 242
               model_filename=None,
               params_filename=None,
243 244
               save_model_filename='__model__',
               save_params_filename='__params__',
S
slf12 已提交
245
               batch_size=16,
S
slf12 已提交
246 247 248
               batch_nums=None,
               scope=None,
               algo='KL',
249 250
               quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
               is_full_quantize=False,
L
Liufang Sang 已提交
251 252
               weight_bits=8,
               activation_bits=8,
253 254
               activation_quantize_type='range_abs_max',
               weight_quantize_type='channel_wise_abs_max',
255 256
               is_use_cache_file=False,
               cache_dir="./temp_post_training"):
F
ftian 已提交
257
    """
S
slf12 已提交
258
    The function utilizes post training quantization method to quantize the 
S
slf12 已提交
259
    fp32 model. It uses calibrate data to calculate the scale factor of 
260 261
    quantized variables, and inserts fake quantization and dequantization 
    operators to obtain the quantized model.
S
slf12 已提交
262

F
ftian 已提交
263
    Args:
S
slf12 已提交
264 265
        executor(fluid.Executor): The executor to load, run and save the 
            quantized model.
S
slf12 已提交
266
        model_dir(str): The path of fp32 model that will be quantized, and 
267
            the model and params that saved by ``fluid.io.save_inference_model`` 
S
slf12 已提交
268 269
            are under the path.
        quantize_model_path(str): The path to save quantized model using api
270
            ``fluid.io.save_inference_model``.
271 272 273 274
        batch_generator(Python Generator): The batch generator provides 
                calibrate data for DataLoader, and it returns a batch every
                time. For sample_generator and batch_generator, only one
                can be set. Beisdes, batch_generator supports lod tensor.
S
slf12 已提交
275 276
        sample_generator(Python Generator): The sample generator provides 
            calibrate data for DataLoader, and it only returns a sample every time.
S
slf12 已提交
277
        model_filename(str, optional): The name of model file. If parameters 
278
            are saved in separate files, set it as 'None'. Default: 'None'.
S
slf12 已提交
279
        params_filename(str, optional): The name of params file.
S
slf12 已提交
280 281
                When all parameters are saved in a single file, set it 
                as filename. If parameters are saved in separate files, 
282
                set it as 'None'. Default : 'None'.
283 284 285
        save_model_filename(str): The name of model file to save the quantized inference program.  Default: '__model__'.
        save_params_filename(str): The name of file to save all related parameters. 
                If it is set None, parameters will be saved in separate files. Default: '__params__'.
S
slf12 已提交
286
        batch_size(int, optional): The batch size of DataLoader, default is 16.
S
slf12 已提交
287
        batch_nums(int, optional): If batch_nums is not None, the number of calibrate 
S
slf12 已提交
288 289 290 291
                        data is 'batch_size*batch_nums'. If batch_nums is None, use all data
                        generated by sample_generator  as calibrate data.
        scope(fluid.Scope, optional): The scope to run program, use it to load 
                        and save variables. If scope is None, will use fluid.global_scope().
S
slf12 已提交
292 293
        algo(str, optional): If algo=KL, use KL-divergenc method to 
                        get the more precise scale factor. If algo='direct', use 
294
                        abs_max method to get the scale factor. Default: 'KL'.
S
slf12 已提交
295
        quantizable_op_type(list[str], optional): The list of op types
296
                        that will be quantized. Default: ["conv2d", "depthwise_conv2d", 
S
slf12 已提交
297
                        "mul"].
L
Liufang Sang 已提交
298 299
        weight_bits(int, optional): quantization bit number for weights.
        activation_bits(int): quantization bit number for activation.
300 301 302 303 304 305 306 307 308
	activation_quantize_type(str): quantization type for activation,
                now support 'range_abs_max', 'moving_average_abs_max' and 'abs_max'.
                This parameter only specifies the fake ops in quantized model.
                If it is 'range_abs_max' or 'moving_average_abs_max', we save the scale
                obtained by post training quantization in fake ops. If it
                is 'abs_max', the scale will not be saved in fake ops.
        weight_quantize_type(str): quantization type for weights,
                support 'abs_max' and 'channel_wise_abs_max'. Compared to 'abs_max',
                the model accuracy is usually higher when using 'channel_wise_abs_max'.
309 310 311
        is_full_quantize(bool): if True, apply quantization to all supported quantizable op type.
                        If False, only apply quantization to the input quantizable_op_type. Default is False.
        is_use_cache_file(bool): If False, all temp data will be saved in memory. If True,
312
                                all temp data will be saved to disk. Defalut: False.
313
        cache_dir(str): When 'is_use_cache_file' is True, temp data will be save in 'cache_dir'. Default is './temp_post_training'.
314
    
S
slf12 已提交
315 316
    Returns:
        None
F
ftian 已提交
317
    """
S
slf12 已提交
318
    post_training_quantization = PostTrainingQuantization(
S
slf12 已提交
319 320
        executor=executor,
        sample_generator=sample_generator,
321
        batch_generator=batch_generator,
S
slf12 已提交
322 323 324 325 326 327 328 329
        model_dir=model_dir,
        model_filename=model_filename,
        params_filename=params_filename,
        batch_size=batch_size,
        batch_nums=batch_nums,
        scope=scope,
        algo=algo,
        quantizable_op_type=quantizable_op_type,
330
        is_full_quantize=is_full_quantize,
L
Liufang Sang 已提交
331 332
        weight_bits=weight_bits,
        activation_bits=activation_bits,
333 334
        activation_quantize_type=activation_quantize_type,
        weight_quantize_type=weight_quantize_type,
335 336
        is_use_cache_file=is_use_cache_file,
        cache_dir=cache_dir)
S
slf12 已提交
337
    post_training_quantization.quantize()
338 339 340 341
    post_training_quantization.save_quantized_model(
        quantize_model_path,
        model_filename=save_model_filename,
        params_filename=save_params_filename)
F
ftian 已提交
342 343


344
def convert(program, place, config=None, scope=None, save_int8=False):
F
ftian 已提交
345
    """
346 347
    convert quantized and well-trained ``program`` to final  quantized ``program`` that can be used to  save ``inference model``.
    
F
ftian 已提交
348
    Args:
349 350 351 352 353 354 355 356 357 358 359 360 361 362
        program(fluid.Program): quantized and well-trained ``test program``.
        place(fluid.CPUPlace or fluid.CUDAPlace): This parameter represents the executor run on which device.
        config(dict, optional): configs for convert. if set None, will use default config. 
            It must be same with config that used in 'quant_aware'. Default: None.
        scope(fluid.Scope, optional):  Scope records the mapping between variable names and variables, 
            similar to brackets in programming languages. Usually users can use 
            `fluid.global_scope <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/executor_cn/global_scope_cn.html>`_.              When ``None`` will use `fluid.global_scope() <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/executor_cn/global_scope_cn.html>`_ . Default: ``None``.
        save_int8: Whether to return ``program`` which model parameters' dtype is ``int8``. 
            This parameter can only be used to get model size. Default: ``False``.

    Returns:
        Tuple : freezed program which can be used for inference.
        when ``save_int8`` is False, return ``freezed_program(fluid.Program)``.
        when ``save_int8`` is True, return ``freezed_program(fluid.Program)`` and ``freezed_program_int8(fluid.Program)``
F
ftian 已提交
363
    """
S
slf12 已提交
364
    scope = fluid.global_scope() if not scope else scope
365 366 367 368 369 370 371

    if config is None:
        config = _quant_config_default
    else:
        assert isinstance(config, dict), "config must be dict"
        config = _parse_configs(config)
    _logger.info("convert config {}".format(config))
F
ftian 已提交
372
    test_graph = IrGraph(core.Graph(program.desc), for_test=True)
373 374 375

    out_scale_infer_pass = OutScaleForInferencePass(scope=scope)
    out_scale_infer_pass.apply(test_graph)
F
ftian 已提交
376 377 378 379 380 381

    # Freeze the graph after training by adjusting the quantize
    # operators' order for the inference.
    freeze_pass = QuantizationFreezePass(
        scope=scope,
        place=place,
382 383
        weight_bits=config['weight_bits'],
        activation_bits=config['activation_bits'],
384 385
        weight_quantize_type=config['weight_quantize_type'])

F
ftian 已提交
386 387 388 389
    freeze_pass.apply(test_graph)
    freezed_program = test_graph.to_program()

    if save_int8:
390
        convert_int8_pass = ConvertToInt8Pass(scope=scope, place=place)
F
ftian 已提交
391 392 393 394 395
        convert_int8_pass.apply(test_graph)
        freezed_program_int8 = test_graph.to_program()
        return freezed_program, freezed_program_int8
    else:
        return freezed_program
L
Liufang Sang 已提交
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451


def quant_post_only_weight(model_dir,
                           save_model_dir,
                           model_filename=None,
                           params_filename=None,
                           save_model_filename=None,
                           save_params_filename=None,
                           quantizable_op_type=["conv2d", "mul"],
                           weight_bits=8,
                           generate_test_model=False):
    '''
    In order to reduce the size of model, this api quantizes the weight
    of some ops from float32 to int8/16. In the inference stage, the 
    quantized weight will be dequantized to float32 again.
        
    Args:
        model_dir(str): The path of the fp32 model that will be quantized,
                    and the model and params files are under the path.
        save_model_dir(str): The path to save the quantized model.
        model_filename(str, optional): The name of file used to load the inference
                    program. If it is None, the default filename '__model__' will be used. Default is 'None'.
        params_filename(str, optional): The name of file used to load all parameters. When all parameters were saved 
                in a single binary file, set it as the real filename. If parameters were saved in separate files,
                set it as 'None'. Default is 'None'.
        save_model_dir(str): The path used to save the quantized model.
        save_model_filename(str, optional): The name of file to 
                save the inference program. If it is None, the default 
                filename '__model__' will be used. Default is 'None'.
        save_params_filename(str, optional): The name of file to 
                save all parameters. If it is None, parameters were 
                saved in separate files. If it is not None, all 
                parameters were saved in a single binary file.
        quantizable_op_type(list[str], optional): The list of ops 
                that will be quantized, and the quantized ops should be
                contained in ["conv2d", "depthwise_conv2d", "mul"]. 
                Default is ["conv2d", "depthwise_conv2d", "mul"].
        weight_bits(int, optional): The bits for the quantized weight, 
                and it should be 8 or 16. Default is 8.
        generate_test_model(bool, optional): If set generate_test_model 
                as True, it saves a fake quantized model, in which the weights 
                are quantized and dequantized. We can use PaddlePaddle to load 
                the fake quantized model and test the accuracy on GPU or CPU.
    '''

    weight_quant = WeightQuantization(
        model_dir=model_dir,
        model_filename=model_filename,
        params_filename=params_filename)
    weight_quant.quantize_weight_to_int(
        save_model_dir=save_model_dir,
        save_model_filename=save_model_filename,
        save_params_filename=save_params_filename,
        quantizable_op_type=quantizable_op_type,
        weight_bits=weight_bits,
        generate_test_model=generate_test_model)