quanter.py 7.7 KB
Newer Older
F
ftian 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
# Copyright (c) 2019  PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import copy
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
from paddle.fluid import core

S
slf12 已提交
25 26 27 28 29 30 31
WEIGHT_QUANTIZATION_TYPES = [
    'abs_max', 'channel_wise_abs_max', 'range_abs_max',
    'moving_average_abs_max'
]
ACTIVATION_QUANTIZATION_TYPES = [
    'abs_max', 'range_abs_max', 'moving_average_abs_max'
]
F
ftian 已提交
32 33 34
VALID_DTYPES = ['int8']

_quant_config_default = {
35 36 37 38
    # weight quantize type, default is 'abs_max'
    'weight_quantize_type': 'abs_max',
    # activation quantize type, default is 'abs_max'
    'activation_quantize_type': 'abs_max',
F
ftian 已提交
39 40 41 42 43 44 45
    # weight quantize bit num, default is 8
    'weight_bits': 8,
    # activation quantize bit num, default is 8
    'activation_bits': 8,
    # ops of name_scope in not_quant_pattern list, will not be quantized
    'not_quant_pattern': ['skip_quant'],
    # ops of type in quantize_op_types, will be quantized
46
    'quantize_op_types': ['conv2d', 'depthwise_conv2d', 'mul'],
F
ftian 已提交
47 48 49 50 51 52 53 54 55 56 57
    # data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
    'dtype': 'int8',
    # window size for 'range_abs_max' quantization. defaulf is 10000
    'window_size': 10000,
    # The decay coefficient of moving average, default is 0.9
    'moving_rate': 0.9,
}


def _parse_configs(user_config):
    """
58
    check user configs is valid, and set default value if user not config.
F
ftian 已提交
59
    Args:
60
        user_config(dict):the config of user.
F
ftian 已提交
61 62 63 64 65 66 67
    Return:
        configs(dict): final configs will be used.
    """

    configs = copy.deepcopy(_quant_config_default)
    configs.update(user_config)

68 69 70
    # check configs is valid
    assert configs['weight_quantize_type'] in WEIGHT_QUANTIZATION_TYPES, \
        "Unknown weight_quantize_type: '%s'. It can only be " + " ".join(WEIGHT_QUANTIZATION_TYPES)
F
ftian 已提交
71

72 73
    assert configs['activation_quantize_type'] in ACTIVATION_QUANTIZATION_TYPES, \
        "Unknown activation_quantize_type: '%s'. It can only be " + " ".join(ACTIVATION_QUANTIZATION_TYPES)
F
ftian 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86

    assert isinstance(configs['weight_bits'], int), \
        "weight_bits must be int value."

    assert (configs['weight_bits'] >= 1 and configs['weight_bits'] <= 16), \
        "weight_bits should be between 1 and 16."

    assert isinstance(configs['activation_bits'], int), \
        "activation_bits must be int value."

    assert (configs['activation_bits'] >= 1 and configs['activation_bits'] <= 16), \
        "activation_bits should be between 1 and 16."

87 88
    assert isinstance(configs['not_quant_pattern'], list), \
        "not_quant_pattern must be a list"
F
ftian 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104

    assert isinstance(configs['quantize_op_types'], list), \
        "quantize_op_types must be a list"

    assert isinstance(configs['dtype'], str), \
        "dtype must be a str."

    assert (configs['dtype'] in VALID_DTYPES), \
        "dtype can only be " + " ".join(VALID_DTYPES)

    assert isinstance(configs['window_size'], int), \
        "window_size must be int value, window size for 'range_abs_max' quantization, default is 10000."

    assert isinstance(configs['moving_rate'], float), \
        "moving_rate must be float value, The decay coefficient of moving average, default is 0.9."

105 106 107 108 109
    assert isinstance(configs['quant_weight_only'], bool), \
        "quant_weight_only must be bool value, if set quant_weight_only True, " \
        "then only quantize parameters of layers which need to be quantized, " \
        " and activations will not be quantized."

F
ftian 已提交
110 111 112
    return configs


113
def quant_aware(program, place, config=None, scope=None, for_test=False):
114 115
    """
    add trainable quantization ops in program.
F
ftian 已提交
116
    Args:
117 118 119 120 121 122 123
        program(fluid.Program): program
        scope(fluid.Scope): the scope to store var, it's should be the value of program's scope, usually it's fluid.global_scope().
        place(fluid.CPUPlace or fluid.CUDAPlace): place
        config(dict): configs for quantization, default values are in quant_config_default dict.
        for_test: if program is test program, for_test should be set True, else False.
    Return:
        fluid.Program: user can finetune this quantization program to enhance the accuracy.
F
ftian 已提交
124 125 126
    """

    scope = fluid.global_scope() if not scope else scope
127 128 129 130 131
    if config is None:
        config = _quant_config_default
    else:
        assert isinstance(config, dict), "config must be dict"
        config = _parse_configs(config)
132
        _logger.info("quant_aware config {}".format(config))
F
ftian 已提交
133 134 135

    main_graph = IrGraph(core.Graph(program.desc), for_test=for_test)

136 137 138 139 140 141 142 143 144 145 146 147 148
    transform_pass = QuantizationTransformPass(
        scope=scope,
        place=place,
        weight_bits=config['weight_bits'],
        activation_bits=config['activation_bits'],
        activation_quantize_type=config['activation_quantize_type'],
        weight_quantize_type=config['weight_quantize_type'],
        window_size=config['window_size'],
        moving_rate=config['moving_rate'],
        quantizable_op_type=config['quantize_op_types'],
        skip_pattern=config['not_quant_pattern'])

    transform_pass.apply(main_graph)
F
ftian 已提交
149 150 151 152 153 154 155 156

    if for_test:
        quant_program = main_graph.to_program()
    else:
        quant_program = fluid.CompiledProgram(main_graph.graph)
    return quant_program


157
def convert(program, place, config=None, scope=None, save_int8=False):
F
ftian 已提交
158
    """
159
    add quantization ops in program. the program returned is not trainable.
F
ftian 已提交
160
    Args:
161 162 163 164 165 166 167 168 169 170
        program(fluid.Program): program
        scope(fluid.Scope): the scope to store var, when is None will use fluid.global_scope()
        place(fluid.CPUPlace or fluid.CUDAPlace): place
        config(dict): configs for quantization, default values are in quant_config_default dict.
        save_int8: is export int8 freezed program.
    Return:
        fluid.Program: freezed program which can be used for inference.
                       parameters is float32 type, but it's value in int8 range.
        fluid.Program: freezed int8 program which can be used for inference.
                       if save_int8 is False, this value is None.
F
ftian 已提交
171
    """
S
slf12 已提交
172
    scope = fluid.global_scope() if not scope else scope
173 174 175 176 177 178 179 180

    if config is None:
        config = _quant_config_default
    else:
        assert isinstance(config, dict), "config must be dict"
        config = _parse_configs(config)
    _logger.info("convert config {}".format(config))

F
ftian 已提交
181 182 183 184 185 186 187
    test_graph = IrGraph(core.Graph(program.desc), for_test=True)

    # Freeze the graph after training by adjusting the quantize
    # operators' order for the inference.
    freeze_pass = QuantizationFreezePass(
        scope=scope,
        place=place,
188
        weight_quantize_type=config['weight_quantize_type'])
F
ftian 已提交
189 190 191 192 193
    freeze_pass.apply(test_graph)
    freezed_program = test_graph.to_program()

    if save_int8:
        convert_int8_pass = ConvertToInt8Pass(
194
            scope=fluid.global_scope(), place=place)
F
ftian 已提交
195 196 197 198 199
        convert_int8_pass.apply(test_graph)
        freezed_program_int8 = test_graph.to_program()
        return freezed_program, freezed_program_int8
    else:
        return freezed_program