quanter.py 20.7 KB
Newer Older
F
ftian 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2019  PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import copy
16 17
import logging

F
ftian 已提交
18 19 20 21 22 23 24
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
S
slf12 已提交
25
from paddle.fluid.contrib.slim.quantization import PostTrainingQuantization
S
slf12 已提交
26
from paddle.fluid.contrib.slim.quantization import AddQuantDequantPass
F
ftian 已提交
27
from paddle.fluid import core
L
Liufang Sang 已提交
28
from paddle.fluid.contrib.slim.quantization import WeightQuantization
F
ftian 已提交
29

30 31 32
from ..common import get_logger
_logger = get_logger(__name__, level=logging.INFO)

S
slf12 已提交
33 34 35 36
WEIGHT_QUANTIZATION_TYPES = [
    'abs_max', 'channel_wise_abs_max', 'range_abs_max',
    'moving_average_abs_max'
]
37 38
WEIGHT_QUANTIZATION_TYPES_TENSORRT = ['channel_wise_abs_max']

S
slf12 已提交
39 40 41
ACTIVATION_QUANTIZATION_TYPES = [
    'abs_max', 'range_abs_max', 'moving_average_abs_max'
]
42 43 44 45 46

ACTIVATION_QUANTIZATION_TYPES_TENSORRT = [
    'range_abs_max', 'moving_average_abs_max'
]

F
ftian 已提交
47
VALID_DTYPES = ['int8']
48
TRANSFORM_PASS_OP_TYPES = QuantizationTransformPass._supported_quantizable_op_type
49 50
QUANT_DEQUANT_PASS_OP_TYPES = AddQuantDequantPass._supported_quantizable_op_type

51 52 53 54
TENSORRT_OP_TYPES = [
    'mul', 'conv2d', 'pool2d', 'depthwise_conv2d', 'elementwise_add',
    'leaky_relu'
]
F
ftian 已提交
55 56

_quant_config_default = {
57 58 59 60
    # weight quantize type, default is 'channel_wise_abs_max'
    'weight_quantize_type': 'channel_wise_abs_max',
    # activation quantize type, default is 'moving_average_abs_max'
    'activation_quantize_type': 'moving_average_abs_max',
F
ftian 已提交
61 62 63 64 65 66 67
    # weight quantize bit num, default is 8
    'weight_bits': 8,
    # activation quantize bit num, default is 8
    'activation_bits': 8,
    # ops of name_scope in not_quant_pattern list, will not be quantized
    'not_quant_pattern': ['skip_quant'],
    # ops of type in quantize_op_types, will be quantized
68
    'quantize_op_types': ['conv2d', 'depthwise_conv2d', 'mul'],
F
ftian 已提交
69 70 71 72 73 74
    # data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
    'dtype': 'int8',
    # window size for 'range_abs_max' quantization. defaulf is 10000
    'window_size': 10000,
    # The decay coefficient of moving average, default is 0.9
    'moving_rate': 0.9,
75 76 77 78
    # if True, 'quantize_op_types' will be TENSORRT_OP_TYPES
    'for_tensorrt': False,
    # if True, 'quantoze_op_types' will be TRANSFORM_PASS_OP_TYPES + QUANT_DEQUANT_PASS_OP_TYPES 
    'is_full_quantize': False
F
ftian 已提交
79 80 81 82 83
}


def _parse_configs(user_config):
    """
84
    check if user's configs are valid.
F
ftian 已提交
85
    Args:
86
        user_config(dict): user's config.
F
ftian 已提交
87 88 89 90 91 92 93
    Return:
        configs(dict): final configs will be used.
    """

    configs = copy.deepcopy(_quant_config_default)
    configs.update(user_config)

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
    assert isinstance(configs['for_tensorrt'], bool) and isinstance(
        configs['is_full_quantize'],
        bool), "'for_tensorrt' and 'is_full_quantize' must both be bool'"

    # check if configs is valid
    if configs['for_tensorrt']:
        weight_types = WEIGHT_QUANTIZATION_TYPES_TENSORRT
        activation_types = ACTIVATION_QUANTIZATION_TYPES_TENSORRT
        platform = 'TensorRT'
    else:
        weight_types = WEIGHT_QUANTIZATION_TYPES
        activation_types = WEIGHT_QUANTIZATION_TYPES
        platform = 'PaddleLite'
    assert configs['weight_quantize_type'] in weight_types, \
        "Unknown weight_quantize_type: {}. {} only supports {} ".format(configs['weight_quantize_type'],
                platform, weight_types)
F
ftian 已提交
110

111 112 113
    assert configs['activation_quantize_type'] in activation_types, \
        "Unknown activation_quantize_type: {}. {} only supports {}".format(configs['activation_quantize_type'],
                platform, activation_types)
F
ftian 已提交
114 115 116 117 118 119 120 121 122 123 124 125 126

    assert isinstance(configs['weight_bits'], int), \
        "weight_bits must be int value."

    assert (configs['weight_bits'] >= 1 and configs['weight_bits'] <= 16), \
        "weight_bits should be between 1 and 16."

    assert isinstance(configs['activation_bits'], int), \
        "activation_bits must be int value."

    assert (configs['activation_bits'] >= 1 and configs['activation_bits'] <= 16), \
        "activation_bits should be between 1 and 16."

127 128
    assert isinstance(configs['not_quant_pattern'], (list, str)), \
        "not_quant_pattern must be list or str"
F
ftian 已提交
129 130 131 132

    assert isinstance(configs['quantize_op_types'], list), \
        "quantize_op_types must be a list"

133 134 135 136 137 138 139 140 141 142 143 144
    if configs['for_tensorrt']:
        configs['quantize_op_types'] = TENSORRT_OP_TYPES
    elif configs['is_full_quantize']:
        configs[
            'quantize_op_types'] = TRANSFORM_PASS_OP_TYPES + QUANT_DEQUANT_PASS_OP_TYPES
    else:
        for op_type in configs['quantize_op_types']:
            assert (op_type in QUANT_DEQUANT_PASS_OP_TYPES) or (
                op_type in TRANSFORM_PASS_OP_TYPES), "{} is not support, \
                        now support op types are {}".format(
                    op_type,
                    TRANSFORM_PASS_OP_TYPES + QUANT_DEQUANT_PASS_OP_TYPES)
S
slf12 已提交
145

F
ftian 已提交
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
    assert isinstance(configs['dtype'], str), \
        "dtype must be a str."

    assert (configs['dtype'] in VALID_DTYPES), \
        "dtype can only be " + " ".join(VALID_DTYPES)

    assert isinstance(configs['window_size'], int), \
        "window_size must be int value, window size for 'range_abs_max' quantization, default is 10000."

    assert isinstance(configs['moving_rate'], float), \
        "moving_rate must be float value, The decay coefficient of moving average, default is 0.9."

    return configs


161
def quant_aware(program, place, config=None, scope=None, for_test=False):
162 163 164
    """Add quantization  and dequantization operators to "program" 
    for quantization training or testing.

F
ftian 已提交
165
    Args:
166 167 168 169 170 171 172 173 174 175 176 177 178
        program(fluid.Program): training or testing ``program``.
        place(fluid.CPUPlace or fluid.CUDAPlace): This parameter represents 
            the executor run on which device.
        config(dict, optional): configs for quantization. if None, will use default config. 
            Default: None.
        scope(fluid.Scope): Scope records the mapping between variable names and variables, 
            similar to brackets in programming languages. Usually users can use 
            `fluid.global_scope <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/executor_cn/global_scope_cn.html>`_.              When ``None`` will use `fluid.global_scope() <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/executor_cn/global_scope_cn.html>`_ . Default: ``None``.
        for_test(bool): If the 'program' parameter is a test program, this parameter should be set to ``True``. 
            Otherwise, set to ``False``.Default: False
    
    Returns:
        fluid.CompiledProgram | fluid.Program: Program with quantization and dequantization ``operators``
F
ftian 已提交
179 180 181
    """

    scope = fluid.global_scope() if not scope else scope
182 183 184 185 186 187
    if config is None:
        config = _quant_config_default
    else:
        assert isinstance(config, dict), "config must be dict"
        config = _parse_configs(config)
    _logger.info("quant_aware config {}".format(config))
F
ftian 已提交
188 189 190

    main_graph = IrGraph(core.Graph(program.desc), for_test=for_test)

S
slf12 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
    transform_pass_ops = []
    quant_dequant_ops = []
    for op_type in config['quantize_op_types']:
        if op_type in TRANSFORM_PASS_OP_TYPES:
            transform_pass_ops.append(op_type)
        elif op_type in QUANT_DEQUANT_PASS_OP_TYPES:
            quant_dequant_ops.append(op_type)
    if len(transform_pass_ops) > 0:
        transform_pass = QuantizationTransformPass(
            scope=scope,
            place=place,
            weight_bits=config['weight_bits'],
            activation_bits=config['activation_bits'],
            activation_quantize_type=config['activation_quantize_type'],
            weight_quantize_type=config['weight_quantize_type'],
            window_size=config['window_size'],
            moving_rate=config['moving_rate'],
            quantizable_op_type=transform_pass_ops,
            skip_pattern=config['not_quant_pattern'])

        transform_pass.apply(main_graph)

    if len(quant_dequant_ops) > 0:
        quant_dequant_pass = AddQuantDequantPass(
            scope=scope,
            place=place,
            moving_rate=config['moving_rate'],
            quant_bits=config['activation_bits'],
            skip_pattern=config['not_quant_pattern'],
            quantizable_op_type=quant_dequant_ops)
        quant_dequant_pass.apply(main_graph)
F
ftian 已提交
222 223 224 225 226 227 228 229

    if for_test:
        quant_program = main_graph.to_program()
    else:
        quant_program = fluid.CompiledProgram(main_graph.graph)
    return quant_program


S
slf12 已提交
230 231
def quant_post(executor,
               model_dir,
S
slf12 已提交
232
               quantize_model_path,
233 234
               batch_generator=None,
               sample_generator=None,
S
slf12 已提交
235 236
               model_filename=None,
               params_filename=None,
237 238
               save_model_filename='__model__',
               save_params_filename='__params__',
S
slf12 已提交
239
               batch_size=16,
S
slf12 已提交
240 241 242
               batch_nums=None,
               scope=None,
               algo='KL',
243 244
               quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
               is_full_quantize=False,
L
Liufang Sang 已提交
245 246
               weight_bits=8,
               activation_bits=8,
247 248
               activation_quantize_type='range_abs_max',
               weight_quantize_type='channel_wise_abs_max',
249 250
               is_use_cache_file=False,
               cache_dir="./temp_post_training"):
F
ftian 已提交
251
    """
S
slf12 已提交
252
    The function utilizes post training quantization method to quantize the 
S
slf12 已提交
253
    fp32 model. It uses calibrate data to calculate the scale factor of 
254 255
    quantized variables, and inserts fake quantization and dequantization 
    operators to obtain the quantized model.
S
slf12 已提交
256

F
ftian 已提交
257
    Args:
S
slf12 已提交
258 259
        executor(fluid.Executor): The executor to load, run and save the 
            quantized model.
S
slf12 已提交
260
        model_dir(str): The path of fp32 model that will be quantized, and 
261
            the model and params that saved by ``fluid.io.save_inference_model`` 
S
slf12 已提交
262 263
            are under the path.
        quantize_model_path(str): The path to save quantized model using api
264
            ``fluid.io.save_inference_model``.
265 266 267 268
        batch_generator(Python Generator): The batch generator provides 
                calibrate data for DataLoader, and it returns a batch every
                time. For sample_generator and batch_generator, only one
                can be set. Beisdes, batch_generator supports lod tensor.
S
slf12 已提交
269 270
        sample_generator(Python Generator): The sample generator provides 
            calibrate data for DataLoader, and it only returns a sample every time.
S
slf12 已提交
271
        model_filename(str, optional): The name of model file. If parameters 
272
            are saved in separate files, set it as 'None'. Default: 'None'.
S
slf12 已提交
273
        params_filename(str, optional): The name of params file.
S
slf12 已提交
274 275
                When all parameters are saved in a single file, set it 
                as filename. If parameters are saved in separate files, 
276
                set it as 'None'. Default : 'None'.
277 278 279
        save_model_filename(str): The name of model file to save the quantized inference program.  Default: '__model__'.
        save_params_filename(str): The name of file to save all related parameters. 
                If it is set None, parameters will be saved in separate files. Default: '__params__'.
S
slf12 已提交
280
        batch_size(int, optional): The batch size of DataLoader, default is 16.
S
slf12 已提交
281
        batch_nums(int, optional): If batch_nums is not None, the number of calibrate 
S
slf12 已提交
282 283 284 285
                        data is 'batch_size*batch_nums'. If batch_nums is None, use all data
                        generated by sample_generator  as calibrate data.
        scope(fluid.Scope, optional): The scope to run program, use it to load 
                        and save variables. If scope is None, will use fluid.global_scope().
S
slf12 已提交
286 287
        algo(str, optional): If algo=KL, use KL-divergenc method to 
                        get the more precise scale factor. If algo='direct', use 
288
                        abs_max method to get the scale factor. Default: 'KL'.
S
slf12 已提交
289
        quantizable_op_type(list[str], optional): The list of op types
290
                        that will be quantized. Default: ["conv2d", "depthwise_conv2d", 
S
slf12 已提交
291
                        "mul"].
L
Liufang Sang 已提交
292 293
        weight_bits(int, optional): quantization bit number for weights.
        activation_bits(int): quantization bit number for activation.
294 295 296 297 298 299 300 301 302
	activation_quantize_type(str): quantization type for activation,
                now support 'range_abs_max', 'moving_average_abs_max' and 'abs_max'.
                This parameter only specifies the fake ops in quantized model.
                If it is 'range_abs_max' or 'moving_average_abs_max', we save the scale
                obtained by post training quantization in fake ops. If it
                is 'abs_max', the scale will not be saved in fake ops.
        weight_quantize_type(str): quantization type for weights,
                support 'abs_max' and 'channel_wise_abs_max'. Compared to 'abs_max',
                the model accuracy is usually higher when using 'channel_wise_abs_max'.
303 304 305
        is_full_quantize(bool): if True, apply quantization to all supported quantizable op type.
                        If False, only apply quantization to the input quantizable_op_type. Default is False.
        is_use_cache_file(bool): If False, all temp data will be saved in memory. If True,
306
                                all temp data will be saved to disk. Defalut: False.
307
        cache_dir(str): When 'is_use_cache_file' is True, temp data will be save in 'cache_dir'. Default is './temp_post_training'.
308
    
S
slf12 已提交
309 310
    Returns:
        None
F
ftian 已提交
311
    """
S
slf12 已提交
312
    post_training_quantization = PostTrainingQuantization(
S
slf12 已提交
313 314
        executor=executor,
        sample_generator=sample_generator,
315
        batch_generator=batch_generator,
S
slf12 已提交
316 317 318 319 320 321 322 323
        model_dir=model_dir,
        model_filename=model_filename,
        params_filename=params_filename,
        batch_size=batch_size,
        batch_nums=batch_nums,
        scope=scope,
        algo=algo,
        quantizable_op_type=quantizable_op_type,
324
        is_full_quantize=is_full_quantize,
L
Liufang Sang 已提交
325 326
        weight_bits=weight_bits,
        activation_bits=activation_bits,
327 328
        activation_quantize_type=activation_quantize_type,
        weight_quantize_type=weight_quantize_type,
329 330
        is_use_cache_file=is_use_cache_file,
        cache_dir=cache_dir)
S
slf12 已提交
331
    post_training_quantization.quantize()
332 333 334 335
    post_training_quantization.save_quantized_model(
        quantize_model_path,
        model_filename=save_model_filename,
        params_filename=save_params_filename)
F
ftian 已提交
336 337


338
def convert(program, place, config=None, scope=None, save_int8=False):
F
ftian 已提交
339
    """
340 341
    convert quantized and well-trained ``program`` to final  quantized ``program`` that can be used to  save ``inference model``.
    
F
ftian 已提交
342
    Args:
343 344 345 346 347 348 349 350 351 352 353 354 355 356
        program(fluid.Program): quantized and well-trained ``test program``.
        place(fluid.CPUPlace or fluid.CUDAPlace): This parameter represents the executor run on which device.
        config(dict, optional): configs for convert. if set None, will use default config. 
            It must be same with config that used in 'quant_aware'. Default: None.
        scope(fluid.Scope, optional):  Scope records the mapping between variable names and variables, 
            similar to brackets in programming languages. Usually users can use 
            `fluid.global_scope <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/executor_cn/global_scope_cn.html>`_.              When ``None`` will use `fluid.global_scope() <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/executor_cn/global_scope_cn.html>`_ . Default: ``None``.
        save_int8: Whether to return ``program`` which model parameters' dtype is ``int8``. 
            This parameter can only be used to get model size. Default: ``False``.

    Returns:
        Tuple : freezed program which can be used for inference.
        when ``save_int8`` is False, return ``freezed_program(fluid.Program)``.
        when ``save_int8`` is True, return ``freezed_program(fluid.Program)`` and ``freezed_program_int8(fluid.Program)``
F
ftian 已提交
357
    """
S
slf12 已提交
358
    scope = fluid.global_scope() if not scope else scope
359 360 361 362 363 364 365 366

    if config is None:
        config = _quant_config_default
    else:
        assert isinstance(config, dict), "config must be dict"
        config = _parse_configs(config)
    _logger.info("convert config {}".format(config))

F
ftian 已提交
367 368 369 370 371 372 373
    test_graph = IrGraph(core.Graph(program.desc), for_test=True)

    # Freeze the graph after training by adjusting the quantize
    # operators' order for the inference.
    freeze_pass = QuantizationFreezePass(
        scope=scope,
        place=place,
374 375
        weight_bits=config['weight_bits'],
        activation_bits=config['activation_bits'],
376 377
        weight_quantize_type=config['weight_quantize_type'])

F
ftian 已提交
378 379 380 381 382
    freeze_pass.apply(test_graph)
    freezed_program = test_graph.to_program()

    if save_int8:
        convert_int8_pass = ConvertToInt8Pass(
383 384 385
            scope=fluid.global_scope(),
            place=place,
            quantizable_op_type=support_op_types)
F
ftian 已提交
386 387 388 389 390
        convert_int8_pass.apply(test_graph)
        freezed_program_int8 = test_graph.to_program()
        return freezed_program, freezed_program_int8
    else:
        return freezed_program
L
Liufang Sang 已提交
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446


def quant_post_only_weight(model_dir,
                           save_model_dir,
                           model_filename=None,
                           params_filename=None,
                           save_model_filename=None,
                           save_params_filename=None,
                           quantizable_op_type=["conv2d", "mul"],
                           weight_bits=8,
                           generate_test_model=False):
    '''
    In order to reduce the size of model, this api quantizes the weight
    of some ops from float32 to int8/16. In the inference stage, the 
    quantized weight will be dequantized to float32 again.
        
    Args:
        model_dir(str): The path of the fp32 model that will be quantized,
                    and the model and params files are under the path.
        save_model_dir(str): The path to save the quantized model.
        model_filename(str, optional): The name of file used to load the inference
                    program. If it is None, the default filename '__model__' will be used. Default is 'None'.
        params_filename(str, optional): The name of file used to load all parameters. When all parameters were saved 
                in a single binary file, set it as the real filename. If parameters were saved in separate files,
                set it as 'None'. Default is 'None'.
        save_model_dir(str): The path used to save the quantized model.
        save_model_filename(str, optional): The name of file to 
                save the inference program. If it is None, the default 
                filename '__model__' will be used. Default is 'None'.
        save_params_filename(str, optional): The name of file to 
                save all parameters. If it is None, parameters were 
                saved in separate files. If it is not None, all 
                parameters were saved in a single binary file.
        quantizable_op_type(list[str], optional): The list of ops 
                that will be quantized, and the quantized ops should be
                contained in ["conv2d", "depthwise_conv2d", "mul"]. 
                Default is ["conv2d", "depthwise_conv2d", "mul"].
        weight_bits(int, optional): The bits for the quantized weight, 
                and it should be 8 or 16. Default is 8.
        generate_test_model(bool, optional): If set generate_test_model 
                as True, it saves a fake quantized model, in which the weights 
                are quantized and dequantized. We can use PaddlePaddle to load 
                the fake quantized model and test the accuracy on GPU or CPU.
    '''

    weight_quant = WeightQuantization(
        model_dir=model_dir,
        model_filename=model_filename,
        params_filename=params_filename)
    weight_quant.quantize_weight_to_int(
        save_model_dir=save_model_dir,
        save_model_filename=save_model_filename,
        save_params_filename=save_params_filename,
        quantizable_op_type=quantizable_op_type,
        weight_bits=weight_bits,
        generate_test_model=generate_test_model)