post_training_quantization.py 76.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15 16
import os
import re
17 18
import math
import shutil
19 20
import logging
import numpy as np
21

22 23 24 25
try:
    from tqdm import tqdm
except:
    from .utils import tqdm
26
from inspect import isgeneratorfunction
27 28
from .... import io
from .... import core
29
from .... import reader
30
from .... import framework
31
from .... import unique_name
32
from ....executor import global_scope, Executor
33 34
from ....framework import IrGraph
from ....log_helper import get_logger
35
from .quantization_pass import QuantizationTransformPass, QuantizationTransformPassV2, QuantizationFreezePass, QuantWeightPass, AddQuantDequantPass, AddQuantDequantPassV2
36
from .cal_kl_threshold import cal_kl_threshold
37
from .adaround import run_adaround
38
from . import utils
39

40
__all__ = [
41 42 43
    'PostTrainingQuantization',
    'WeightQuantization',
    'PostTrainingQuantizationProgram',
44
]
45

46 47 48
_logger = get_logger(__name__,
                     logging.INFO,
                     fmt='%(asctime)s-%(levelname)s: %(message)s')
49 50


51 52 53 54 55 56 57 58
def _all_persistable_var_names(program):
    persistable_var_names = []
    for var in program.list_vars():
        if var.persistable:
            persistable_var_names.append(var.name)
    return persistable_var_names


59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
def _remove_unused_var_nodes(graph):
    all_used_vars = set()
    ops = graph.all_op_nodes()
    for op_node in ops:
        for input_node in op_node.inputs:
            all_used_vars.add(input_node)
        for output_node in op_node.outputs:
            all_used_vars.add(output_node)

    all_used_vars = {n.node for n in all_used_vars}
    all_unused_vars = {
        n
        for n in filter(lambda node: node.node not in all_used_vars,
                        graph.all_var_nodes())
    }
    graph.safe_remove_nodes(all_unused_vars)
    return graph


def _remove_ctrl_vars(graph):
    remove_ctr_vars = set()
    for node in graph.all_var_nodes():
        if node.is_ctrl_var():
            remove_ctr_vars.add(node)
    graph.safe_remove_nodes(remove_ctr_vars)
    return graph


def _apply_pass(scope,
                graph,
                pass_name,
                attrs=None,
                attr_values=None,
                debug=False):
    ir_pass = core.get_pass(pass_name)
    cpp_graph = graph.graph
    if not cpp_graph.has('__param_scope__'):
        cpp_graph.set_not_owned('__param_scope__', scope)
    if attrs:
        assert attr_values and len(attrs) == len(
99 100
            attr_values
        ), "Different number of pass attributes and their values."
101 102 103 104 105 106 107 108 109
        for attr, value in zip(attrs, attr_values):
            ir_pass.set(attr, value)
    ir_pass.apply(cpp_graph)
    if debug:
        graph.draw('.', 'qat_fp32_{}'.format(pass_name), graph.all_op_nodes())
    _remove_unused_var_nodes(graph)
    return graph


110
class PostTrainingQuantization(object):
111 112
    """
    Utilizing post training quantization methon to quantize the FP32 model,
113
    and it uses calibrate data to get the quantization information for all
114 115 116
    quantized variables.
    """

117
    def __init__(self,
118 119
                 executor,
                 model_dir,
120
                 scope=None,
121 122
                 model_filename=None,
                 params_filename=None,
123
                 batch_generator=None,
124
                 sample_generator=None,
125
                 data_loader=None,
126 127 128
                 batch_size=10,
                 batch_nums=None,
                 algo="KL",
X
XGZhang 已提交
129
                 hist_percent=0.99999,
130
                 quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
131
                 round_type='round',
132
                 learning_rate=0.001,
133
                 is_full_quantize=False,
X
XGZhang 已提交
134
                 bias_correction=False,
135
                 activation_bits=8,
136 137 138
                 weight_bits=8,
                 activation_quantize_type='range_abs_max',
                 weight_quantize_type='channel_wise_abs_max',
139
                 onnx_format=False,
140
                 freeze_model=True,
141
                 optimize_model=False,
142
                 is_use_cache_file=False,
143
                 skip_tensor_list=None,
144 145 146 147
                 same_scale_tensor_list=None,
                 cache_dir=None,
                 scale_dict=None,
                 return_graph=False):
148
        '''
149
        Constructor.
150 151

        Args:
152
            executor(fluid.Executor): The executor to load, run and save the
153
                quantized model.
154 155 156
            scope(fluid.Scope, optional): The scope of the program, use it to load
                and save variables. If scope=None, get scope by global_scope().
            model_dir(str): The path of the fp32 model that will be quantized,
157
                and the model and params files are under the path.
158 159
            model_filename(str, optional): The name of file to load the inference
                program. If it is None, the default filename '__model__' will
160 161
                be used. Default is 'None'.
            params_filename(str, optional): The name of file to load all parameters.
162 163
                When all parameters were saved in a single binary file, set it
                as the real filename. If parameters were saved in separate files,
164
                set it as 'None'. Default is 'None'.
165
            batch_generator(Python Generator): The batch generator provides
166 167 168 169 170 171 172
                calibrate data for DataLoader, and it returns a batch every
                time. Note that, sample_generator and batch_generator, only one
                should be set. Beisdes, batch_generator supports lod tensor.
            sample_generator(Python Generator): The sample generator provides
                calibrate data for DataLoader, and it only returns a sample every
                time. Note that, sample_generator and batch_generator, only one
                should be set. Beisdes, sample_generator dose not support lod tensor.
173 174 175
            data_loader(Python Generator, Paddle.io.DataLoader, optional): The
                Generator or Dataloader provides calibrate data, and it could
                return a batch every time.
176
            batch_size(int, optional): The batch size of DataLoader. Default is 10.
177 178
            batch_nums(int, optional): If batch_nums is not None, the number of
                calibrate data is batch_size*batch_nums. If batch_nums is None, use
179
                all data provided by sample_generator as calibrate data.
180 181
            algo(str, optional): If algo='KL', use KL-divergenc method to
                get the KL threshold for quantized activations and get the abs_max
182 183
                value for quantized weights. If algo='abs_max', get the abs max
                value for activations and weights. If algo= 'min_max', get the min
X
XGZhang 已提交
184
                and max value for quantized activations and weights. If algo='avg',
185
                get the average value among the max values for activations. If
X
XGZhang 已提交
186
                algo= 'hist', get the value of 'hist_percent' quantile as the threshold.
187
                If algo='mse', get the value which makes the quantization mse loss
X
XGZhang 已提交
188 189 190
                minimal. Default is KL.
            hist_percent(float, optional): The threshold of algo 'hist' for activations.
                Default is 0.99999.
191 192
            quantizable_op_type(list[str], optional): List the type of ops
                that will be quantized. Default is ["conv2d", "depthwise_conv2d",
193
                "mul"].
194
            round_type(str, optional): The method of converting the quantized weights
195
                value float->int. Currently supports ['round', 'adaround'] methods.
196 197
                Default is `round`, which is rounding nearest to the integer.
                'adaround' is refer to https://arxiv.org/abs/2004.10568.
198
            learning_rate(float, optional): The learning rate of adaround method.
199
            is_full_quantized(bool, optional): If set is_full_quantized as True,
200
                apply quantization to all supported quantizable op type. If set
201
                is_full_quantized as False, only apply quantization to the op type
202
                according to the input quantizable_op_type.
X
XGZhang 已提交
203 204
            bias_correction(bool, optional): If set as True, use the bias correction
                method of https://arxiv.org/abs/1810.05723. Default is False.
205
            activation_bits(int): quantization bit number for activation.
206 207 208 209 210 211 212 213 214 215 216 217
            weight_bits(int, optional): quantization bit number for weights.
            activation_quantize_type(str): quantization type for activation,
                now support 'range_abs_max', 'moving_average_abs_max' and 'abs_max'.
                This param only specifies the fake ops in saving quantized model.
                If it is 'range_abs_max' or 'moving_average_abs_max', we save the scale
                obtained by post training quantization in fake ops. Note that, if it
                is 'abs_max', the scale will not be saved in fake ops.
            weight_quantize_type(str): quantization type for weights,
                support 'abs_max' and 'channel_wise_abs_max'. This param only specifies
                the fake ops in saving quantized model, and we save the scale obtained
                by post training quantization in fake ops. Compared to 'abs_max',
                the model accuracy is usually higher when it is 'channel_wise_abs_max'.
218 219
            onnx_format(bool): Whether to export the quantized model with format of ONNX.
                Default is False.
220
            freeze_model(bool): Whether to convert quantized and trained ``program`` to final
221 222
                quantized ``program``. Default: True.
            skip_tensor_list(list): List of skip quant tensor name. Default: None.
223 224
            same_scale_tensor_list(list(list)): The list of tensor keep same scale in the outermost
                list, the final scale about every list is the max of the scale in the list
225
                of tensor. Default: None.
226 227 228 229 230 231 232 233
            optimize_model(bool, optional): If set optimize_model as True, it applies
                some passes to the model before quantization, and it supports
                `conv2d/depthwise_conv2d + bn` pass so far. Some targets require the
                weights are quantized by tensor-wise method, which means the weights
                scale for all channel are the same. However, if fuse
                `conv2d/depthwise_conv2d + bn`, the weights scale for all channel will
                be different. In address this problem, fuse the pattern before
                quantization. Default False.
234 235
            is_use_cache_file(bool, optional): This param is deprecated.
            cache_dir(str, optional): This param is deprecated.
236 237 238
        Returns:
            None

239 240 241 242
        Examples:
        .. code-block:: python
            import paddle.fluid as fluid
            from paddle.fluid.contrib.slim.quantization import PostTrainingQuantization
243

244
            exe = fluid.Executor(fluid.CPUPlace())
245
            model_dir = path/to/fp32_model_params
246
            # set model_filename as None when the filename is __model__,
247
            # otherwise set it as the real filename
248 249
            model_filename = None
            # set params_filename as None when all parameters were saved in
250 251 252
            # separate files, otherwise set it as the real filename
            params_filename = None
            save_model_path = path/to/save_model_path
253
            # prepare the sample generator according to the model, and the
254
            # sample generator must return a sample every time. The reference
255 256 257
            # document: https://www.paddlepaddle.org.cn/documentation/docs/zh
            # /user_guides/howto/prepare_data/use_py_reader.html
            sample_generator = your_sample_generator
258 259 260
            batch_size = 10
            batch_nums = 10
            algo = "KL"
261
            quantizable_op_type = ["conv2d", "depthwise_conv2d", "mul"]
262 263
            ptq = PostTrainingQuantization(
                        executor=exe,
264 265 266 267
                        sample_generator=sample_generator,
                        model_dir=model_dir,
                        model_filename=model_filename,
                        params_filename=params_filename,
268 269 270 271 272 273 274
                        batch_size=batch_size,
                        batch_nums=batch_nums,
                        algo=algo,
                        quantizable_op_type=quantizable_op_type)
            ptq.quantize()
            ptq.save_quantized_model(save_model_path)
        '''
275

276 277 278 279
        self._support_activation_quantize_type = [
            'range_abs_max', 'moving_average_abs_max', 'abs_max'
        ]
        self._support_weight_quantize_type = ['abs_max', 'channel_wise_abs_max']
X
XGZhang 已提交
280
        self._support_algo_type = [
H
handiz 已提交
281
            'KL', 'hist', 'avg', 'mse', 'emd', 'abs_max', 'min_max', 'ptf'
X
XGZhang 已提交
282
        ]
283
        assert round_type in ['adaround', 'round']
284 285
        self._round_type = round_type
        self._learning_rate = learning_rate
286
        self._dynamic_quantize_op_type = ['lstm']
287
        self._support_quantize_op_type = \
288 289
            list(set(utils._weight_supported_quantizable_op_type +
                utils._act_supported_quantizable_op_type +
290
                self._dynamic_quantize_op_type))
291 292

        # Check inputs
293
        assert executor is not None, "The executor cannot be None."
294
        assert any([gen is not None] for gen in [sample_generator,
295 296 297
            batch_generator, data_loader]), "The sample_generator, batch_generator " \
            "and data_loader cannot be None in the same time."
        if data_loader is not None:
298
            assert isinstance(data_loader, (io.DataLoader, type(isgeneratorfunction), reader.GeneratorLoader)), \
299
                "data_loader only accepts `paddle.io.DataLoader` or Generator instance."
300 301
        assert batch_size > 0, "The batch_size should be greater than 0."
        assert algo in self._support_algo_type, \
H
handiz 已提交
302
            "The algo should be KL, hist, mse, avg, abs_max, min_max or ptf."
303 304 305 306 307 308 309 310
        assert activation_quantize_type in self._support_activation_quantize_type, \
            "The activation_quantize_type ({}) should in ({}).".format(
            activation_quantize_type, self._support_activation_quantize_type)
        assert weight_quantize_type in self._support_weight_quantize_type, \
            "The weight_quantize_type ({}) shoud in ({}).".format(
            weight_quantize_type, self._support_weight_quantize_type)

        # Save input params
X
XGZhang 已提交
311
        self._bias_correction = bias_correction
312
        self._executor = executor
313
        self._scope = global_scope() if scope == None else scope
314 315 316
        self._model_dir = model_dir
        self._model_filename = model_filename
        self._params_filename = params_filename
317
        self._sample_generator = sample_generator
318
        self._batch_generator = batch_generator
319 320 321
        self._batch_size = batch_size
        self._batch_nums = batch_nums
        self._algo = algo
X
XGZhang 已提交
322
        self._hist_percent = hist_percent
323 324 325 326
        self._activation_bits = activation_bits
        self._weight_bits = weight_bits
        self._activation_quantize_type = activation_quantize_type
        self._weight_quantize_type = weight_quantize_type
327
        self._onnx_format = onnx_format
G
Guanghua Yu 已提交
328
        self._clip_extra = True if self._onnx_format else False
329
        self._skip_tensor_list = skip_tensor_list
330
        self._is_full_quantize = is_full_quantize
331
        if is_full_quantize:
332
            self._quantizable_op_type = self._support_quantize_op_type
333 334 335
        else:
            self._quantizable_op_type = quantizable_op_type
            for op_type in self._quantizable_op_type:
336
                assert op_type in self._support_quantize_op_type, \
337
                    op_type + " is not supported for quantization."
338
        self._optimize_model = optimize_model
339

340
        # Define variables
341 342 343 344
        self._place = self._executor.place
        self._program = None
        self._feed_list = None
        self._fetch_list = None
345
        self._data_loader = data_loader
346

347
        self._out_scale_op_list = utils.QUANT_SUPPORTED_OP_TYPE_LIST
348 349
        self._quantized_weight_var_name = set()
        self._quantized_act_var_name = set()
350
        self._weight_op_pairs = {}
X
XGZhang 已提交
351
        # The vars for alog = KL or hist
352 353
        self._sampling_act_abs_min_max = {}
        self._sampling_act_histogram = {}
354
        self._sampling_data = {}
X
XGZhang 已提交
355
        self._quantized_var_threshold = {}
356 357
        self._histogram_bins = 2048
        # The vars for algo = min_max
358 359
        self._quantized_var_min = {}
        self._quantized_var_max = {}
X
XGZhang 已提交
360 361 362
        # The vars for algo = avg
        self._quantized_var_avg = {}
        # The best loss of algo = mse
363
        self._best_calibration_loss = {}
X
XGZhang 已提交
364 365
        # The threshold for algo = abs_max, mse or avg
        self._quantized_threshold = {}
366 367 368 369
        self._same_scale_tensor_list = same_scale_tensor_list
        self._freeze_model = freeze_model
        self._scale_dict = scale_dict
        self._return_graph = return_graph
370 371 372
        self.FLAG = False
        if self._program is not None:
            self.FLAG = True
373 374 375

    def quantize(self):
        '''
376 377 378
        Load the FP32 model, and use the calibrate data to calculate the forward-stage.
        Based on the sample data, we can get the quantization information, and obtain
        the final quantized model.
379 380 381 382

        Args:
            None
        Returns:
383 384
            the program of quantized model.
        '''
385
        self._load_model_data()
386
        self._collect_target_varnames()
387
        self._set_activation_persistable()
388

X
XGZhang 已提交
389
        if self._algo in ["KL", "hist"]:
390
            batch_id = 0
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
            with tqdm(
                    total=self._batch_nums,
                    bar_format=
                    'Preparation stage, Run batch:|{bar}| {n_fmt}/{total_fmt}',
                    ncols=80) as t:
                for data in self._data_loader():
                    self._executor.run(program=self._program,
                                       feed=data,
                                       fetch_list=self._fetch_list,
                                       return_numpy=False,
                                       scope=self._scope)
                    self._collect_activation_abs_min_max()
                    batch_id += 1
                    t.update()
                    if self._batch_nums and batch_id >= self._batch_nums:
                        break
            self._init_sampling_act_histogram()

        batch_id = 0
        with tqdm(total=self._batch_nums,
                  bar_format=
                  'Sampling stage, Run batch:|{bar}| {n_fmt}/{total_fmt}',
                  ncols=80) as t:
414 415 416 417 418 419
            for data in self._data_loader():
                self._executor.run(program=self._program,
                                   feed=data,
                                   fetch_list=self._fetch_list,
                                   return_numpy=False,
                                   scope=self._scope)
420
                self._sampling()
421
                batch_id += 1
422
                t.update()
423 424
                if self._batch_nums and batch_id >= self._batch_nums:
                    break
425

X
XGZhang 已提交
426 427 428 429 430 431
        if self._algo == 'avg':
            for var_name in self._quantized_act_var_name:
                self._quantized_threshold[var_name] = \
                np.array(self._quantized_var_avg[var_name]).mean()
        if self._algo in ["KL", "hist"]:
            self._calculate_kl_hist_threshold()
432

433
        if self._round_type == 'adaround':
434 435 436 437 438
            self._adaround_apply()

        self._reset_activation_persistable()

        if self._algo is 'min_max':
439
            self._save_input_threhold()
440 441 442 443
        else:
            self._update_program()

        # save out_threshold for quantized ops.
444 445
        if not self.FLAG:
            self._save_output_threshold()
446

447 448 449 450
        if any(op_type in self._quantizable_op_type
               for op_type in self._dynamic_quantize_op_type):
            self._collect_dynamic_quantize_op_threshold(
                self._dynamic_quantize_op_type)
451

452
        utils.move_persistable_var_to_global_block(self._program)
453

454 455 456 457 458
        if not self._return_graph:
            return self._program
        else:
            main_graph = IrGraph(core.Graph(self._program.desc), for_test=True)
            return main_graph
459

460
    def _adaround_apply(self):
461
        assert self._algo != "min_max", "The algo should not be min_max."
462 463 464 465
        if self._algo in ["KL", "hist"]:
            scale_dict = self._quantized_var_threshold
        else:
            scale_dict = self._quantized_threshold
466 467 468 469 470 471 472 473 474 475
        run_adaround(self._data_loader,
                     self._program,
                     self._fetch_list,
                     self._executor,
                     self._scope,
                     self._place,
                     self._quantized_op_pairs,
                     self._weight_op_pairs,
                     scale_dict,
                     num_iterations=self._batch_nums,
476
                     bias_correction=self._bias_correction,
477
                     lr=self._learning_rate)
478

479 480 481 482
    def save_quantized_model(self,
                             save_model_path,
                             model_filename=None,
                             params_filename=None):
483 484 485 486
        '''
        Save the quantized model to the disk.

        Args:
487 488 489 490 491 492 493
            save_model_path(str): The path to save the quantized model.
            model_filename(str, optional): If the model_filename is None,
                save the model to '__model__'. Otherwise, save the model
                to the specified filename. Default: None.
            params_filename(str, optional): If the params_filename is None,
                save params to separted files. Otherwise, save all params
                to the specified filename.
494
        Returns:
495 496
            None
        '''
497 498 499 500 501 502 503
        io.save_inference_model(dirname=save_model_path,
                                model_filename=model_filename,
                                params_filename=params_filename,
                                feeded_var_names=self._feed_list,
                                target_vars=self._fetch_list,
                                executor=self._executor,
                                main_program=self._program,
G
Guanghua Yu 已提交
504
                                clip_extra=self._clip_extra)
505
        _logger.info("The quantized model is saved in " + save_model_path)
506

507
    def _load_model_data(self):
508
        '''
509
        Load model and set data loader.
510
        '''
511 512 513 514 515 516 517
        if self._program is None:
            _logger.info("Load model and set data loader ...")
            [self._program, self._feed_list, self._fetch_list] = \
                io.load_inference_model(dirname=self._model_dir,
                                        executor=self._executor,
                                        model_filename=self._model_filename,
                                        params_filename=self._params_filename)
518 519 520 521

        if self._optimize_model:
            self._optimize_fp32_model()

522 523
        feed_vars = [framework._get_var(str(var_name), self._program) \
            for var_name in self._feed_list]
524 525

        if self._data_loader is not None:
G
Guanghua Yu 已提交
526 527
            self._batch_nums = self._batch_nums if self._batch_nums else len(
                self._data_loader)
528
            return
529 530 531 532
        self._data_loader = io.DataLoader.from_generator(feed_list=feed_vars,
                                                         capacity=3 *
                                                         self._batch_size,
                                                         iterable=True)
533
        if self._sample_generator is not None:
534 535 536 537
            self._data_loader.set_sample_generator(self._sample_generator,
                                                   batch_size=self._batch_size,
                                                   drop_last=True,
                                                   places=self._place)
538
        elif self._batch_generator is not None:
539 540
            self._data_loader.set_batch_generator(self._batch_generator,
                                                  places=self._place)
G
Guanghua Yu 已提交
541 542
        self._batch_nums = self._batch_nums if self._batch_nums else len(
            list(self._data_loader))
543

544 545 546 547 548 549 550 551
    def _optimize_fp32_model(self):
        '''
        Fuse the `conv2d/depthwise_conv2d + bn` in FP32 model.
        '''
        _logger.info("Optimize FP32 model ...")
        graph = IrGraph(core.Graph(self._program.desc), for_test=True)
        graph = _remove_ctrl_vars(graph)
        graph = _apply_pass(self._scope, graph, 'conv_bn_fuse_pass')
552 553
        graph = _apply_pass(self._scope, graph, 'depthwise_conv_bn_fuse_pass')
        graph = _apply_pass(self._scope, graph, 'conv_transpose_bn_fuse_pass')
554 555 556 557
        graph = _apply_pass(self._scope, graph, 'conv_eltwiseadd_bn_fuse_pass')
        graph = _apply_pass(self._scope, graph,
                            'depthwise_conv_eltwiseadd_bn_fuse_pass')

558 559
        self._program = graph.to_program()

560
    def _collect_target_varnames(self):
561 562 563 564
        '''
        Collect the variable names for sampling, and set activation
        variables to be persistable.
        '''
565
        # TODO(juncaipeng), consider the name_scope of skip_quant
566
        _logger.info("Collect quantized variable names ...")
567
        self._quantized_op_pairs = {}
568

569
        def collect_var_name(var_name_list, persistable_var_names, op_type):
570 571 572
            for var_name in var_name_list:
                if var_name in persistable_var_names:
                    self._quantized_weight_var_name.add(var_name)
573
                    self._weight_op_pairs[var_name] = op_type
574 575 576
                else:
                    self._quantized_act_var_name.add(var_name)

577
        persistable_var_names = _all_persistable_var_names(self._program)
578 579
        for block_id in range(len(self._program.blocks)):
            for op in self._program.blocks[block_id].ops:
580 581 582 583 584 585
                # skip quant form self._skip_tensor_list
                if self._skip_tensor_list is not None:
                    for inp_name in utils._get_op_input_var_names(op):
                        if inp_name in self._skip_tensor_list:
                            op._set_attr("op_namescope", "skip_quant")

586 587 588 589 590 591 592
                op_type = op.type
                if self._is_full_quantize and \
                    op_type not in self._quantizable_op_type:
                    _logger.warning(op_type +
                                    " is not supported for quantization.")
                # For quantized ops, sample inputs and outputs
                if op_type in self._quantizable_op_type:
593 594 595 596
                    collect_var_name(utils._get_op_input_var_names(op),
                                     persistable_var_names, op_type)
                    collect_var_name(utils._get_op_output_var_names(op),
                                     persistable_var_names, op_type)
597
                    # collect quanted op output var name
598 599
                    for out_var_name in utils._get_op_output_var_names(op):
                        for in_var_name in utils._get_op_input_var_names(op):
600 601 602
                            if in_var_name in persistable_var_names:
                                self._quantized_op_pairs[
                                    in_var_name] = out_var_name
603 604
                # For other op, only sample output scale
                elif op_type in self._out_scale_op_list:
605 606
                    collect_var_name(utils._get_op_output_var_names(op),
                                     persistable_var_names, op_type)
607 608 609

    def _set_activation_persistable(self):
        '''
610
        Set activation variables to be persistable, so can obtain
611 612
        the tensor data in sample_data
        '''
613 614 615 616
        for var in self._program.list_vars():
            if var.name in self._quantized_act_var_name:
                var.persistable = True

617 618 619 620
    def _reset_activation_persistable(self):
        '''
        Reset activations to be not persistable.
        '''
621
        to_erase = []
622 623 624
        for var in self._program.list_vars():
            if var.name in self._quantized_act_var_name:
                var.persistable = False
625
                to_erase.append(var.name)
626

627
    def _sampling(self):
628
        '''
629
        Sample the min/max, abs_max or histogram in every iterations.
630 631
        '''
        if self._algo == "abs_max":
632
            self._sample_abs_max()
X
XGZhang 已提交
633 634
        elif self._algo == "avg":
            self._sample_avg()
635
        elif self._algo == "min_max":
636
            self._sample_min_max()
X
XGZhang 已提交
637 638
        elif self._algo == "mse":
            self._sample_mse()
639 640
        elif self._algo == "emd":
            self._sample_emd()
H
handiz 已提交
641 642
        elif self._algo == "ptf":
            self._sample_ptf()
X
XGZhang 已提交
643
        elif self._algo in ["KL", "hist"]:
644
            self._sample_histogram()
645

X
XGZhang 已提交
646 647 648
    def _sample_mse(self):
        if self._quantized_threshold == {}:
            for var_name in self._quantized_weight_var_name:
649
                var_tensor = utils.load_variable_data(self._scope, var_name)
X
XGZhang 已提交
650 651 652 653 654
                if self._weight_quantize_type == "abs_max":
                    abs_max_value = float(np.max(np.abs(var_tensor)))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    abs_max_value = []
                    if self._weight_op_pairs[
655
                            var_name] in utils._channelwise_quant_axis1_ops:
X
XGZhang 已提交
656 657 658 659 660 661 662 663 664 665
                        for i in range(var_tensor.shape[1]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[:, i]))))
                    else:
                        for i in range(var_tensor.shape[0]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[i]))))
                self._quantized_threshold[var_name] = abs_max_value
        _logger.info("MSE searching stage ...")
        for var_name in self._quantized_act_var_name:
666
            var_tensor = utils.load_variable_data(self._scope, var_name)
X
XGZhang 已提交
667 668
            var_tensor = var_tensor.flatten()
            abs_max_value = float(np.max(np.abs(var_tensor)))
X
XGZhang 已提交
669
            abs_max_value = 1e-8 if abs_max_value == 0.0 else abs_max_value
X
XGZhang 已提交
670
            s = 0.3
671 672
            if var_name not in self._best_calibration_loss:
                self._best_calibration_loss[var_name] = float('inf')
X
XGZhang 已提交
673 674 675 676
            while s <= 1.0:
                scale = s * abs_max_value
                s += 0.02
                bins = 2**(self._activation_bits - 1) - 1
677
                if self._onnx_format:
678
                    quant_var = np.clip(np.round(var_tensor / scale * bins),
679 680 681 682 683 684
                                        -bins - 1, bins)
                    quant_dequant_var = quant_var / bins * scale
                else:
                    quant_dequant_var = np.round(
                        np.clip(var_tensor, 0.0, scale) / scale *
                        bins) / bins * scale
X
XGZhang 已提交
685
                mse_loss = ((var_tensor - quant_dequant_var)**2).mean()
686 687 688 689 690 691 692
                if mse_loss <= self._best_calibration_loss[var_name]:
                    self._best_calibration_loss[var_name] = mse_loss
                    self._quantized_threshold[var_name] = scale

    def _sample_emd(self):
        if self._quantized_threshold == {}:
            for var_name in self._quantized_weight_var_name:
693
                var_tensor = utils.load_variable_data(self._scope, var_name)
694 695 696 697 698
                if self._weight_quantize_type == "abs_max":
                    abs_max_value = float(np.max(np.abs(var_tensor)))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    abs_max_value = []
                    if self._weight_op_pairs[
699
                            var_name] in utils._channelwise_quant_axis1_ops:
700 701 702 703 704 705 706 707 708 709
                        for i in range(var_tensor.shape[1]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[:, i]))))
                    else:
                        for i in range(var_tensor.shape[0]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[i]))))
                self._quantized_threshold[var_name] = abs_max_value
        _logger.info("EMD searching stage ...")
        for var_name in self._quantized_act_var_name:
710
            var_tensor = utils.load_variable_data(self._scope, var_name)
711 712 713 714 715 716 717 718 719 720
            var_tensor = var_tensor.flatten()
            abs_max_value = float(np.max(np.abs(var_tensor)))
            abs_max_value = 1e-8 if abs_max_value == 0.0 else abs_max_value
            s = 0.3
            if var_name not in self._best_calibration_loss:
                self._best_calibration_loss[var_name] = float('inf')
            while s <= 1.0:
                scale = s * abs_max_value
                s += 0.02
                bins = 2**(self._activation_bits - 1) - 1
721
                if self._onnx_format:
722
                    quant_var = np.clip(np.round(var_tensor / scale * bins),
723 724 725 726 727 728
                                        -bins - 1, bins)
                    quant_dequant_var = quant_var / bins * scale
                else:
                    quant_dequant_var = np.round(
                        np.clip(var_tensor, 0.0, scale) / scale *
                        bins) / bins * scale
729 730 731 732 733
                emd_loss = np.abs(
                    np.mean(var_tensor) - np.mean(quant_dequant_var)) + np.abs(
                        np.std(var_tensor) - np.std(quant_dequant_var))
                if emd_loss <= self._best_calibration_loss[var_name]:
                    self._best_calibration_loss[var_name] = emd_loss
X
XGZhang 已提交
734 735 736 737 738
                    self._quantized_threshold[var_name] = scale

    def _sample_avg(self):
        if self._quantized_threshold == {}:
            for var_name in self._quantized_weight_var_name:
739
                var_tensor = utils.load_variable_data(self._scope, var_name)
X
XGZhang 已提交
740 741 742 743 744
                if self._weight_quantize_type == "abs_max":
                    abs_max_value = float(np.max(np.abs(var_tensor)))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    abs_max_value = []
                    if self._weight_op_pairs[
745
                            var_name] in utils._channelwise_quant_axis1_ops:
X
XGZhang 已提交
746 747 748 749 750 751 752 753 754 755
                        for i in range(var_tensor.shape[1]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[:, i]))))
                    else:
                        for i in range(var_tensor.shape[0]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[i]))))
                self._quantized_threshold[var_name] = abs_max_value

        for var_name in self._quantized_act_var_name:
756
            var_tensor = utils.load_variable_data(self._scope, var_name)
X
XGZhang 已提交
757 758 759 760 761 762 763 764
            abs_max_value = float(np.max(np.abs(var_tensor)))
            if (var_name not in self._quantized_var_avg):
                self._quantized_var_avg[var_name] = []
            abs_avg_value = float(np.mean(np.max(  \
            np.abs(var_tensor.reshape(var_tensor.shape[0], -1)), axis=(1))))
            self._quantized_var_avg[var_name].append(abs_avg_value)
            continue

765
    def _sample_abs_max(self):
X
XGZhang 已提交
766
        if self._quantized_threshold == {}:
767
            for var_name in self._quantized_weight_var_name:
768
                var_tensor = utils.load_variable_data(self._scope, var_name)
769 770 771 772
                if self._weight_quantize_type == "abs_max":
                    abs_max_value = float(np.max(np.abs(var_tensor)))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    abs_max_value = []
773
                    if self._weight_op_pairs[
774
                            var_name] in utils._channelwise_quant_axis1_ops:
775 776 777 778 779 780 781
                        for i in range(var_tensor.shape[1]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[:, i]))))
                    else:
                        for i in range(var_tensor.shape[0]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[i]))))
X
XGZhang 已提交
782
                self._quantized_threshold[var_name] = abs_max_value
783 784

        for var_name in self._quantized_act_var_name:
785
            var_tensor = utils.load_variable_data(self._scope, var_name)
786
            abs_max_value = float(np.max(np.abs(var_tensor)))
X
XGZhang 已提交
787 788 789
            if (var_name not in self._quantized_threshold) or \
                (abs_max_value > self._quantized_threshold[var_name]):
                self._quantized_threshold[var_name] = abs_max_value
790

791
    def _sample_min_max(self):
792 793
        if self._quantized_var_min == {} and self._quantized_var_max == {}:
            for var_name in self._quantized_weight_var_name:
794
                var_tensor = utils.load_variable_data(self._scope, var_name)
795 796 797 798 799 800
                if self._weight_quantize_type == "abs_max":
                    min_value = float(np.min(var_tensor))
                    max_value = float(np.max(var_tensor))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    min_value = []
                    max_value = []
801
                    if self._weight_op_pairs[
802
                            var_name] in utils._channelwise_quant_axis1_ops:
803 804 805 806 807 808 809 810 811 812 813
                        for i in range(var_tensor.shape[1]):
                            min_value.append(float(np.min(var_tensor[:, i])))
                            max_value.append(float(np.max(var_tensor[:, i])))
                    else:
                        for i in range(var_tensor.shape[0]):
                            min_value.append(float(np.min(var_tensor[i])))
                            max_value.append(float(np.max(var_tensor[i])))
                self._quantized_var_min[var_name] = min_value
                self._quantized_var_max[var_name] = max_value

        for var_name in self._quantized_act_var_name:
814
            var_tensor = utils.load_variable_data(self._scope, var_name)
815 816 817 818 819 820 821 822
            min_value = float(np.min(var_tensor))
            max_value = float(np.max(var_tensor))
            if (var_name not in self._quantized_var_min) or \
                (min_value < self._quantized_var_min[var_name]):
                self._quantized_var_min[var_name] = min_value
            if (var_name not in self._quantized_var_max) or \
                (max_value > self._quantized_var_max[var_name]):
                self._quantized_var_max[var_name] = max_value
823

824 825
    def _sample_histogram(self):
        for var_name in self._quantized_act_var_name:
826
            var_tensor = utils.load_variable_data(self._scope, var_name)
827 828 829 830 831
            var_tensor_abs = np.abs(var_tensor)
            bins = self._sampling_act_histogram[var_name][1]
            hist, _ = np.histogram(var_tensor_abs, bins=bins)
            self._sampling_act_histogram[var_name][0] += hist

H
handiz 已提交
832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
    def _sample_ptf(self):
        """
        The following code are modified from:
        https://github.com/megvii-research/FQ-ViT/
        """
        if self._quantized_threshold == {}:
            for var_name in self._quantized_weight_var_name:
                var_tensor = utils.load_variable_data(self._scope, var_name)
                if self._weight_quantize_type == "abs_max":
                    abs_max_value = float(np.max(np.abs(var_tensor)))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    abs_max_value = []
                    if self._weight_op_pairs[
                            var_name] in utils._channelwise_quant_axis1_ops:
                        for i in range(var_tensor.shape[1]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[:, i]))))
                    else:
                        for i in range(var_tensor.shape[0]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[i]))))
                self._quantized_threshold[var_name] = abs_max_value

        for var_name in self._quantized_act_var_name:
            var_tensor = utils.load_variable_data(self._scope, var_name)
            abs_max_value = float(np.max(np.abs(var_tensor)))
            q_max = 2**(self._activation_bits - 1) - 1
            scale8 = abs_max_value / q_max
            scale4 = scale8 / 2
            scale2 = scale4 / 2
            scale1 = scale2 / 2
            quant_dequant_var_scale1 = np.clip(np.round(var_tensor / scale1), 0,
                                               q_max) * scale1
            quant_dequant_var_scale2 = np.clip(np.round(var_tensor / scale2), 0,
                                               q_max) * scale2
            quant_dequant_var_scale4 = np.clip(np.round(var_tensor / scale4), 0,
                                               q_max) * scale4
            quant_dequant_var_scale8 = np.clip(np.round(var_tensor / scale8), 0,
                                               q_max) * scale8
871 872 873 874
            score1 = utils.l2_loss(var_tensor, quant_dequant_var_scale1)
            score2 = utils.l2_loss(var_tensor, quant_dequant_var_scale2)
            score4 = utils.l2_loss(var_tensor, quant_dequant_var_scale4)
            score8 = utils.l2_loss(var_tensor, quant_dequant_var_scale8)
H
handiz 已提交
875 876 877 878 879 880
            score = [score1, score2, score4, score8]
            mask = 2**score.index(min(score))
            scale = scale1 * mask
            threshold = q_max * scale
            self._quantized_threshold[var_name] = threshold

881 882 883 884 885 886
    def _save_input_threhold(self):
        '''
        Save input threshold to the quantized op.
        '''
        assert self._algo == "min_max", \
            "The algo should be min_max to save input threshold."
887 888 889
        for block_id in range(len(self._program.blocks)):
            for op in self._program.blocks[block_id].ops:
                if op.type in self._quantizable_op_type:
890
                    for var_name in utils._get_op_input_var_names(op):
891 892 893 894 895 896 897
                        assert var_name in self._quantized_var_min
                        assert var_name in self._quantized_var_max
                        op._set_attr(var_name + ".min",
                                     self._quantized_var_min[var_name])
                        op._set_attr(var_name + ".max",
                                     self._quantized_var_max[var_name])
                        op._set_attr("with_quant_attr", True)
898

899
    def _collect_activation_abs_min_max(self):
900
        '''
901 902
        Collect the abs_min and abs_max for all activation. When algo = KL,
        get the min and max value, and then calculate the threshold.
903
        '''
904
        for var_name in self._quantized_act_var_name:
905
            var_tensor = utils.load_variable_data(self._scope, var_name)
906 907 908 909
            var_tensor = np.abs(var_tensor)
            min_value = float(np.min(var_tensor))
            max_value = float(np.max(var_tensor))
            if var_name not in self._sampling_act_abs_min_max:
910 911 912
                self._sampling_act_abs_min_max[var_name] = [
                    min_value, max_value
                ]
913 914 915 916 917 918 919 920 921 922 923 924 925 926
            else:
                if min_value < self._sampling_act_abs_min_max[var_name][0]:
                    self._sampling_act_abs_min_max[var_name][0] = min_value
                if max_value > self._sampling_act_abs_min_max[var_name][1]:
                    self._sampling_act_abs_min_max[var_name][1] = max_value

    def _init_sampling_act_histogram(self):
        '''
        Based on the min/max value, init the sampling_act_histogram.
        '''
        for var_name in self._quantized_act_var_name:
            if var_name not in self._sampling_act_histogram:
                min_val = self._sampling_act_abs_min_max[var_name][0]
                max_val = self._sampling_act_abs_min_max[var_name][1]
927 928 929
                hist, hist_edeges = np.histogram([],
                                                 bins=self._histogram_bins,
                                                 range=(min_val, max_val))
930
                self._sampling_act_histogram[var_name] = [hist, hist_edeges]
931

X
XGZhang 已提交
932
    def _calculate_kl_hist_threshold(self):
933
        '''
X
XGZhang 已提交
934
        Calculate the KL or hist threshold of quantized variables.
935
        '''
X
XGZhang 已提交
936 937
        _logger.info("Calculate {} threshold ...".format(self._algo))
        assert self._algo in ["KL", "hist"], "The algo should be KL or hist."
938 939

        # Abs_max threshold for weights
940
        for var_name in self._quantized_weight_var_name:
941
            weight_data = utils.load_variable_data(self._scope, var_name)
942
            if self._weight_quantize_type == "abs_max":
943
                weight_threshold = float(np.max(np.abs(weight_data)))
944 945
            elif self._weight_quantize_type == "channel_wise_abs_max":
                weight_threshold = []
946
                if self._weight_op_pairs[
947
                        var_name] in utils._channelwise_quant_axis1_ops:
948 949 950 951 952 953 954
                    for i in range(weight_data.shape[1]):
                        weight_threshold.append(
                            float(np.max(np.abs(weight_data[:, i]))))
                else:
                    for i in range(weight_data.shape[0]):
                        weight_threshold.append(
                            float(np.max(np.abs(weight_data[i]))))
X
XGZhang 已提交
955
            self._quantized_var_threshold[var_name] = weight_threshold
956

957 958
        for var_name in self._quantized_act_var_name:
            hist, hist_edeges = self._sampling_act_histogram[var_name]
X
XGZhang 已提交
959
            if self._algo == "KL":
960
                bin_width = hist_edeges[1] - hist_edeges[0]
X
XGZhang 已提交
961
                self._quantized_var_threshold[var_name] = \
962
                    cal_kl_threshold(hist, bin_width, self._activation_bits)
X
XGZhang 已提交
963 964 965
            elif self._algo == "hist":
                self._quantized_var_threshold[var_name] = \
                    self._get_hist_scaling_factor(hist, hist_edeges)
966 967 968

    def _update_program(self):
        '''
969 970
        Use QuantizationTransformPass and AddQuantDequantPass to insert
        fake_quantize, fake_dequantize and fake_quant_dequant op.
X
XGZhang 已提交
971
        Besides, save all threshold to the scale var node.
972
        '''
973
        _logger.info("Update the program ...")
974 975
        graph = IrGraph(core.Graph(self._program.desc), for_test=True)

976
        # use QuantizationTransformPass to insert fake_quant/fake_dequantize op
977
        major_quantizable_op_types = []
978
        for op_type in utils._weight_supported_quantizable_op_type:
979
            if op_type in self._quantizable_op_type:
980
                major_quantizable_op_types.append(op_type)
981 982 983 984 985 986 987 988
        if not self._onnx_format:
            transform_pass = QuantizationTransformPass(
                scope=self._scope,
                place=self._place,
                weight_bits=self._weight_bits,
                activation_bits=self._activation_bits,
                activation_quantize_type=self._activation_quantize_type,
                weight_quantize_type=self._weight_quantize_type,
989
                quantizable_op_type=major_quantizable_op_types)
990 991 992 993 994 995 996 997
        else:
            transform_pass = QuantizationTransformPassV2(
                scope=self._scope,
                place=self._place,
                weight_bits=self._weight_bits,
                activation_bits=self._activation_bits,
                activation_quantize_type=self._activation_quantize_type,
                weight_quantize_type=self._weight_quantize_type,
998
                quantizable_op_type=major_quantizable_op_types)
999 1000 1001 1002 1003 1004

        for sub_graph in graph.all_sub_graphs():
            # Insert fake_quant/fake_dequantize op must in test graph, so
            # set per graph's _for_test is True.
            sub_graph._for_test = True
            transform_pass.apply(sub_graph)
1005 1006

        # use AddQuantDequantPass to insert fake_quant_dequant op
1007
        minor_quantizable_op_types = []
1008
        for op_type in utils._act_supported_quantizable_op_type:
1009
            if op_type in self._quantizable_op_type:
1010
                minor_quantizable_op_types.append(op_type)
1011 1012 1013 1014
        if not self._onnx_format:
            add_quant_dequant_pass = AddQuantDequantPass(
                scope=self._scope,
                place=self._place,
1015
                quantizable_op_type=minor_quantizable_op_types)
1016 1017 1018 1019 1020
        else:
            add_quant_dequant_pass = AddQuantDequantPassV2(
                scope=self._scope,
                place=self._place,
                quantizable_op_type=minor_quantizable_op_types,
1021
                is_full_quantized=True)
1022 1023 1024 1025

        for sub_graph in graph.all_sub_graphs():
            sub_graph._for_test = True
            add_quant_dequant_pass.apply(sub_graph)
1026

X
XGZhang 已提交
1027
        # save threshold to scale var node
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
        if self._scale_dict is None:
            if self._algo in ["KL", "hist"]:
                scale_dict = self._quantized_var_threshold
            else:
                scale_dict = self._quantized_threshold

            if self._same_scale_tensor_list is not None:
                for tensor_list in self._same_scale_tensor_list:
                    max_scale = None
                    tmp_tensor_list = []
                    for tensor_name in tensor_list:
                        if '#' in tensor_name:
                            real_tensor_name, opera, scalar = tensor_name.split(
                                '#')
1042 1043
                            if real_tensor_name not in scale_dict.keys():
                                continue
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
                            if opera == '*':
                                scale_dict[real_tensor_name] = float(
                                    scale_dict[real_tensor_name]) * float(
                                        scalar)
                            elif opera == '/':
                                scale_dict[real_tensor_name] = float(
                                    scale_dict[real_tensor_name]) / float(
                                        scalar)
                            max_scale = scale_dict[
                                real_tensor_name] if max_scale is None else max(
                                    max_scale, scale_dict[real_tensor_name])
                        else:
1056 1057
                            if tensor_name not in scale_dict.keys():
                                continue
1058 1059 1060 1061 1062 1063 1064 1065
                            max_scale = scale_dict[
                                tensor_name] if max_scale is None else max(
                                    max_scale, scale_dict[tensor_name])

                    for tensor_name in tensor_list:
                        if '#' in tensor_name:
                            real_tensor_name, opera, scalar = tensor_name.split(
                                '#')
1066 1067
                            if real_tensor_name not in scale_dict.keys():
                                continue
1068 1069 1070 1071 1072 1073 1074 1075 1076
                            if opera == '*':
                                scale_dict[
                                    real_tensor_name] = max_scale / float(
                                        scalar)
                            elif opera == '/':
                                scale_dict[
                                    real_tensor_name] = max_scale * float(
                                        scalar)
                        else:
1077 1078
                            if tensor_name not in scale_dict.keys():
                                continue
1079 1080 1081 1082
                            scale_dict[tensor_name] = max_scale
            self._scale_dict = scale_dict

        for key, val in self._scale_dict.items():
H
handiz 已提交
1083
            utils.set_variable_data(self._scope, self._place, key + "@scale",
1084 1085
                                    np.array([val], dtype=np.float32))
            utils.set_variable_data(self._scope, self._place,
H
handiz 已提交
1086
                                    key + ".quant_dequant@scale",
1087
                                    np.array([val], dtype=np.float32))
1088

1089 1090
        if not self._onnx_format:
            # apply QuantizationFreezePass, and obtain the final quant model
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
            if self._freeze_model:
                freeze_pass = QuantizationFreezePass(
                    scope=self._scope,
                    place=self._place,
                    bias_correction=self._bias_correction,
                    weight_bits=self._weight_bits,
                    round_type=self._round_type,
                    activation_bits=self._activation_bits,
                    weight_quantize_type=self._weight_quantize_type,
                    quantizable_op_type=major_quantizable_op_types)

                for sub_graph in graph.all_sub_graphs():
                    sub_graph._for_test = True
                    freeze_pass.apply(sub_graph)
1105 1106 1107 1108 1109
        else:
            quant_weight_pass = QuantWeightPass(self._scope, self._place)
            for sub_graph in graph.all_sub_graphs():
                sub_graph._for_test = True
                quant_weight_pass.apply(sub_graph)
1110

1111 1112
        self._program = graph.to_program()

1113
    def _save_output_threshold(self):
1114
        '''
1115
        Save output threshold to the quantized op.
1116
        '''
1117
        self._calibration_scales = {}
1118 1119 1120 1121 1122 1123

        def save_info(op_node, out_var_name, threshold_map, out_info_name,
                      quantized_type):
            assert out_var_name in threshold_map, \
                "The output ({}) of {} node does not have threshold.".format(
                out_var_name, op_node.type)
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
            if self._onnx_format:
                # For easy extension, every var_node set a dict to save parameters of quant.
                self._calibration_scales[var_name] = {}
                self._calibration_scales[var_name]['scale'] = threshold_map[
                    var_name]
            else:
                op_node._set_attr(out_info_name, threshold_map[var_name])
                op_node._set_attr("with_quant_attr", True)
                if op_node.type in self._quantizable_op_type:
                    op._set_attr("quantization_type", quantized_type)
1134 1135

        def analysis_and_save_info(op_node, out_var_name):
1136
            argname_index = utils._get_output_name_index(op_node, out_var_name)
1137 1138
            assert argname_index is not None, \
                out_var_name + " is not the output of the op"
1139
            if self._algo == "KL":
1140
                # For compatibility, we save output threshold by two methods.
X
XGZhang 已提交
1141 1142
                save_info(op_node, out_var_name, self._quantized_var_threshold,
                          "out_threshold", "post_kl")
1143
                save_info(
X
XGZhang 已提交
1144
                    op_node, out_var_name, self._quantized_var_threshold,
1145 1146
                    argname_index[0] + str(argname_index[1]) + "_threshold",
                    "post_kl")
X
XGZhang 已提交
1147 1148 1149 1150
            elif self._algo == "hist":
                # For compatibility, we save output threshold by two methods.
                save_info(op_node, out_var_name, self._quantized_var_threshold,
                          "out_threshold", "post_hist")
1151
                save_info(
X
XGZhang 已提交
1152
                    op_node, out_var_name, self._quantized_var_threshold,
1153
                    argname_index[0] + str(argname_index[1]) + "_threshold",
X
XGZhang 已提交
1154 1155
                    "post_hist")

H
handiz 已提交
1156
            elif self._algo in ["avg", "abs_max", "mse", "emd", "ptf"]:
X
XGZhang 已提交
1157 1158 1159 1160 1161 1162
                save_info(op_node, out_var_name, self._quantized_threshold,
                          "out_threshold", "post_" + str(self._algo))
                save_info(
                    op_node, out_var_name, self._quantized_threshold,
                    argname_index[0] + str(argname_index[1]) + "_threshold",
                    "post_" + str(self._algo))
1163 1164 1165 1166 1167 1168
            elif self._algo == "min_max":
                save_info(op_node, out_var_name, self._quantized_var_min,
                          "out_min", "post_min_max")
                save_info(op_node, out_var_name, self._quantized_var_max,
                          "out_max", "post_min_max")

1169 1170
        for block_id in range(len(self._program.blocks)):
            for op in self._program.blocks[block_id].ops:
1171 1172
                if op.type in (self._quantizable_op_type +
                               self._out_scale_op_list):
1173
                    out_var_names = utils._get_op_output_var_names(op)
1174 1175
                    for var_name in out_var_names:
                        analysis_and_save_info(op, var_name)
1176

1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
    def _collect_dynamic_quantize_op_threshold(self, target_ops_type):
        """
        Collect and save the weight threshold for dynamic quantize ops,
        such as lstm and gru.
        Args:
            target_ops_type(list): the op type of target ops
        Returns:
            None
        """

        target_ops = []
        for index in range(self._program.num_blocks):
            for op in self._program.block(index).ops:
                if op.type in target_ops_type:
                    target_ops.append(op)

        quantization_type = str("post_" + self._algo).lower()
        persistable_var_names = _all_persistable_var_names(self._program)
        for op in target_ops:
1196
            for var_name in utils._get_op_input_var_names(op):
1197
                if var_name in persistable_var_names:
1198
                    var_data = utils.load_variable_data(self._scope, var_name)
1199
                    threshold = float(np.max(np.abs(var_data)))
1200
                    argname, index = utils._get_input_name_index(op, var_name)
1201 1202 1203
                    op._set_attr(argname + str(index) + "_threshold", threshold)
                    op._set_attr("quantization_type", quantization_type)
                    op._set_attr("bit_length", self._weight_bits)
1204
                    op._set_attr("with_quant_attr", True)
1205

X
XGZhang 已提交
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
    def _get_hist_scaling_factor(self, hist, hist_edges):
        '''
        Using the hist method to get the scaling factor.
        '''
        threshold_rate = self._hist_percent
        hist = hist / float(sum(hist))
        hist_sum = 0
        hist_index = 0
        for i in range(len(hist)):
            hist_sum += hist[i]
            if hist_sum >= threshold_rate:
                hist_index = i + 1
                break
        bin_width = hist_edges[1] - hist_edges[0]
        return (hist_index - 0.5) * bin_width

1222

1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
class PostTrainingQuantizationProgram(PostTrainingQuantization):

    def __init__(self,
                 executor,
                 program,
                 feed_list=None,
                 fetch_list=None,
                 scope=None,
                 batch_generator=None,
                 sample_generator=None,
                 data_loader=None,
                 batch_size=10,
                 batch_nums=None,
                 algo="KL",
                 hist_percent=0.99999,
                 quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
                 round_type='round',
                 learning_rate=0.001,
                 is_full_quantize=False,
                 bias_correction=False,
                 activation_bits=8,
                 weight_bits=8,
                 activation_quantize_type='range_abs_max',
                 weight_quantize_type='channel_wise_abs_max',
                 onnx_format=False,
                 freeze_model=True,
                 optimize_model=False,
                 is_use_cache_file=False,
                 skip_tensor_list=None,
                 same_scale_tensor_list=None,
                 cache_dir=None,
                 scale_dict=None,
                 return_graph=True):
        super().__init__(executor, scope, None, None, None, batch_generator,
                         sample_generator, data_loader, batch_size, batch_nums,
                         algo, hist_percent, quantizable_op_type, round_type,
                         learning_rate, is_full_quantize, bias_correction,
                         activation_bits, weight_bits, activation_quantize_type,
                         weight_quantize_type, onnx_format, freeze_model,
                         optimize_model, is_use_cache_file, skip_tensor_list,
1263 1264 1265
                         same_scale_tensor_list, cache_dir, scale_dict,
                         return_graph)
        self.FLAG = False
1266
        self._program = program
1267 1268
        if self._program is not None:
            self.FLAG = True
1269 1270 1271 1272 1273 1274 1275 1276
        assert feed_list is not None, \
            "Feed list should not be None."
        assert fetch_list is not None, \
            "Fetch list should not be None."
        self._feed_list = feed_list
        self._fetch_list = fetch_list


1277 1278
class WeightQuantization(object):
    _supported_quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul']
1279
    _supported_weight_quantize_type = ['channel_wise_abs_max', 'abs_max']
1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305

    def __init__(self, model_dir, model_filename=None, params_filename=None):
        '''
        This class quantizes the weight of some ops to reduce the size of model
        or improve the perforemace.

        Args:
            model_dir(str): The path of the fp32 model that will be quantized,
                and the model and params files are under the path.
            model_filename(str, optional): The name of file to load the inference
                program. If it is None, the default filename '__model__' will
                be used. Default is 'None'.
            params_filename(str, optional): The name of file to load all parameters.
                When all parameters were saved in a single binary file, set it
                as the real filename. If parameters were saved in separate files,
                set it as 'None'. Default is 'None'.
        '''
        self._model_dir = model_dir
        self._model_filename = model_filename
        self._params_filename = params_filename

    def quantize_weight_to_int(self,
                               save_model_dir,
                               save_model_filename=None,
                               save_params_filename=None,
                               quantizable_op_type=["conv2d", "mul"],
1306
                               weight_bits=8,
1307 1308
                               weight_quantize_type="channel_wise_abs_max",
                               generate_test_model=False,
1309 1310 1311
                               threshold_rate=0.0):
        '''
        In order to reduce the size of model, this api quantizes the weight
1312
        of some ops from float32 to int8/16. In the inference stage, the
1313
        quantized weight will be dequantized to float32 again.
1314

1315 1316
        Args:
            save_model_dir(str): The path to save the quantized model.
1317 1318
            save_model_filename(str, optional): The name of file to
                save the inference program. If it is None, the default
1319
                filename '__model__' will be used. Default is 'None'.
1320 1321 1322
            save_params_filename(str, optional): The name of file to
                save all parameters. If it is None, parameters were
                saved in separate files. If it is not None, all
1323
                parameters were saved in a single binary file.
1324
            quantizable_op_type(list[str], optional): The list of ops
1325
                that will be quantized, and the quantized ops should be
1326
                contained in ["conv2d", "depthwise_conv2d", "mul"].
1327
                Default is ["conv2d","mul"].
1328
            weight_bits(int, optional): The bits for the quantized weight,
1329
                and it should be 8 or 16. Default is 8.
1330 1331 1332
            weight_quantize_type(str, optional): quantization type for weights,
                support 'channel_wise_abs_max' and 'abs_max'. Set it as
                'channel_wise_abs_max', the accuracy performs better.
1333 1334 1335
            generate_test_model(bool, optional): If set generate_test_model
                as True, it saves a fake quantized model, in which the weights
                are quantized and dequantized. We can use PaddlePaddle to load
1336
                the fake quantized model and test the accuracy on GPU or CPU.
1337 1338 1339 1340 1341
            threshold_rate(float, optional): This api uses abs_max methd to
                quantize the weight from float32 to int8/16, and the abs max
                value is important for quantization diff. When the abs_max
                value is far away from the center of the numerical distribution,
                we can set threshold_rate between 1e-6 and 1e-8, so the abs max
1342 1343 1344 1345
                value will be optimized. Default is 0.0.
        '''
        for op_type in quantizable_op_type:
            assert op_type in self._supported_quantizable_op_type, \
1346
                "Input error:" + op_type + \
1347
                " is not supported for weight quantization."
1348
        assert weight_bits in [8, 16], \
1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
            "Input error: weight_bits should be 8 or 16."
        assert weight_quantize_type in self._supported_weight_quantize_type, \
            "Input error: weight_quantize_type should in {}".format(
                self._supported_weight_quantize_type)

        quantized_model_dir = os.path.join(save_model_dir, "quantized_model")
        self._quantize_weight_to_int(quantized_model_dir, save_model_filename,
                                     save_params_filename, quantizable_op_type,
                                     weight_bits, weight_quantize_type, False,
                                     threshold_rate)

        if generate_test_model:
            test_model_dir = os.path.join(save_model_dir, "test_model")
1362 1363 1364 1365 1366
            self._quantize_weight_to_int(test_model_dir, save_model_filename,
                                         save_params_filename,
                                         quantizable_op_type, weight_bits,
                                         weight_quantize_type, True,
                                         threshold_rate)
1367

1368 1369 1370 1371
    def convert_weight_to_fp16(self, save_model_dir):
        """
        Convert all presistable vars from fp32 to fp16.
        Note that, this api only changes the data type of variables in
1372
        __params__ file, and the __model__ file remains unchanged.
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403

        Args:
            save_model_dir(str): The path to save the fp16 model.
        """

        # Load model
        place = core.CPUPlace()
        exe = Executor(place)
        scope = global_scope()
        [infer_program, feed_list, fetch_list] = \
            io.load_inference_model(dirname=self._model_dir,
                                    executor=exe,
                                    model_filename=self._model_filename,
                                    params_filename=self._params_filename)

        # Clone and save fp16 weights
        save_program = framework.Program()
        save_block = save_program.global_block()
        save_var_map = {}

        for var in infer_program.list_vars():
            if (var.type == core.VarDesc.VarType.RAW) or \
                (not var.persistable) or (var.name in ['feed', 'fetch']) \
                or (var.dtype != core.VarDesc.VarType.FP32):
                continue

            #new_var = _clone_var_to_block_(var, save_block)
            new_var = save_block._clone_variable(var)
            if self._params_filename is not None:
                save_var_map[new_var.name] = new_var
            else:
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
                save_file_path = os.path.join(os.path.normpath(save_model_dir),
                                              new_var.name)
                save_block.append_op(type='save',
                                     inputs={'X': [new_var]},
                                     outputs={},
                                     attrs={
                                         'file_path':
                                         os.path.normpath(save_file_path),
                                         'save_as_fp16':
                                         True
                                     })
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425

        if self._params_filename is not None:
            save_var_list = []
            for name in sorted(save_var_map.keys()):
                save_var_list.append(save_var_map[name])

            saved_params_var = save_block.create_var(
                type=core.VarDesc.VarType.RAW,
                name=unique_name.generate("saved_params"))
            saved_params_var.desc.set_persistable(True)

1426 1427 1428 1429 1430 1431 1432 1433 1434
            save_path = os.path.join(os.path.normpath(save_model_dir),
                                     self._params_filename)
            save_block.append_op(type='save_combine',
                                 inputs={'X': save_var_list},
                                 outputs={'Y': saved_params_var},
                                 attrs={
                                     'file_path': save_path,
                                     'save_as_fp16': True
                                 })
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445

        save_program._sync_with_cpp()
        exe.run(save_program)

        # Copy model
        model_filename = "__model__" if self._model_filename is None \
                    else self._model_filename
        src_model = os.path.join(self._model_dir, model_filename)
        dest_model = os.path.join(save_model_dir, model_filename)
        shutil.copyfile(src_model, dest_model)

1446 1447 1448 1449 1450 1451 1452 1453
    def _quantize_weight_to_int(self, save_model_dir, save_model_filename,
                                save_params_filename, quantizable_op_type,
                                weight_bits, weight_quantize_type, for_test,
                                threshold_rate):
        """
        Generate quantized model or fake quantized model.
        """
        # Load model
1454 1455 1456 1457 1458 1459 1460 1461 1462
        place = core.CPUPlace()
        exe = Executor(place)
        scope = global_scope()
        [program, feed_list, fetch_list] = \
            io.load_inference_model(dirname=self._model_dir,
                                    executor=exe,
                                    model_filename=self._model_filename,
                                    params_filename=self._params_filename)

1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
        quantized_ops = []
        for index in range(program.num_blocks):
            block = program.block(index)
            for op in block.ops:
                if op.type in quantizable_op_type:
                    quantized_ops.append(op)

        # Quantize weights
        persistable_var_names = _all_persistable_var_names(program)
        for op in quantized_ops:
            for var_name in op.input_arg_names:
                if var_name in persistable_var_names:
                    if weight_quantize_type == "abs_max":
                        self._weight_abs_max_quantization(
                            scope, place, weight_bits, threshold_rate, op,
                            var_name, for_test)
                    elif weight_quantize_type == "channel_wise_abs_max":
                        self._weight_channel_wise_abs_max_quantization(
                            scope, place, weight_bits, op, var_name, for_test)
1482

1483 1484 1485 1486 1487 1488 1489
        io.save_inference_model(dirname=save_model_dir,
                                feeded_var_names=feed_list,
                                target_vars=fetch_list,
                                executor=exe,
                                main_program=program,
                                model_filename=save_model_filename,
                                params_filename=save_params_filename)
1490

1491 1492 1493 1494 1495 1496 1497 1498 1499
    def _weight_abs_max_quantization(self, scope, place, weight_bits,
                                     threshold_rate, op, var_name, for_test):
        '''
        Use abs_max method to quantize weight.
        '''
        quantize_range = (1 << (weight_bits - 1)) - 1
        save_weight_dtype = np.int8 if weight_bits == 8 else np.int16

        # Get quantized scale and weight data
1500
        weight_data = utils.load_variable_data(scope, var_name)
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
        if abs(threshold_rate) < 1e-10:
            threshold_value = np.max(np.abs(weight_data))
        else:
            threshold_value = self._calculate_threshold(\
                weight_data, threshold_rate)
            weight_data[weight_data > threshold_value] = threshold_value
            weight_data[weight_data < -threshold_value] = -threshold_value
        scale = threshold_value / quantize_range
        quantized_weight_data = \
            np.around(weight_data / scale).astype(save_weight_dtype)

        # Set weight data
        if not for_test:
1514 1515
            utils.set_variable_data(scope, place, var_name,
                                    quantized_weight_data)
1516 1517 1518
        else:
            dequantized_weight_data = \
                (quantized_weight_data * scale).astype(np.float32)
1519 1520
            utils.set_variable_data(scope, place, var_name,
                                    dequantized_weight_data)
1521 1522 1523 1524 1525

        # Save info
        op._set_attr('quantization_type', 'post_weight_abs_max')
        op._set_attr('quantize_weight_bits', weight_bits)
        op._set_attr(var_name + "_quant_scale", [scale])  # Save as list
1526
        op._set_attr("with_quant_attr", True)
1527

1528 1529 1530
    def _weight_channel_wise_abs_max_quantization(self, scope, place,
                                                  weight_bits, op, var_name,
                                                  for_test):
1531
        '''
1532 1533 1534 1535 1536 1537
        Use channel_wise_abs_max method to quantize weight.
        '''
        quantize_range = (1 << (weight_bits - 1)) - 1
        save_weight_dtype = np.int8 if weight_bits == 8 else np.int16

        # Get quantized scale and weight data
1538
        weight_data = utils.load_variable_data(scope, var_name)
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551
        if op.type == "mul":
            scales, quantized_weight_data = \
                self._mul_channel_wise_quantization(weight_data,
                    quantize_range, save_weight_dtype)
        elif op.type in ["conv2d", "depthwise_conv2d"]:
            scales, quantized_weight_data = \
                self._conv_channel_wise_quantization(weight_data,
                    quantize_range, save_weight_dtype)
        else:
            _logger.error(op.type + " is not supported by weight quantization")

        # Set weight data
        if not for_test:
1552 1553
            utils.set_variable_data(scope, place, var_name,
                                    quantized_weight_data)
1554 1555 1556 1557 1558 1559 1560 1561 1562 1563
        else:
            if op.type == "mul":
                dequantized_weight_data = \
                    self._mul_channel_wise_dequantization(quantized_weight_data, scales)
            elif op.type in ["conv2d", "depthwise_conv2d"]:
                dequantized_weight_data = \
                    self._conv_channel_wise_dequantization(quantized_weight_data, scales)
            else:
                _logger.error(op.type +
                              " is not supported by weight quantization")
1564 1565
            utils.set_variable_data(scope, place, var_name,
                                    dequantized_weight_data)
1566 1567 1568 1569 1570

        # Save info
        op._set_attr('quantization_type', 'post_weight_channel_wise_abs_max')
        op._set_attr('quantize_weight_bits', weight_bits)
        op._set_attr(var_name + "_quant_scale", scales)
1571
        op._set_attr("with_quant_attr", True)
1572 1573 1574 1575 1576 1577 1578 1579

    def _conv_channel_wise_quantization(self, weight_data, quantize_range,
                                        save_weight_dtype):
        '''
        Get channel wise scale for the weights of conv2d and depthwise_conv2d,
        and quantize the weights.
        '''
        scales = []
1580 1581
        quantized_weight_data = np.zeros_like(weight_data,
                                              dtype=save_weight_dtype)
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
        channel_num = weight_data.shape[0]
        for i in range(channel_num):
            scale = np.max(np.abs(weight_data[i])) / quantize_range
            scales.append(scale)
            quantized_weight_data[i] = \
                np.around(weight_data[i] / scale).astype(save_weight_dtype)
        return scales, quantized_weight_data

    def _conv_channel_wise_dequantization(self, quantized_weight_data, scales):
        '''
        For conv2d and depthwise_conv2d, dequantize the weights to fp32.
        '''
1594 1595
        dequantized_weight_data = np.zeros_like(quantized_weight_data,
                                                dtype=np.float32)
1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607
        for i in range(len(scales)):
            dequantized_weight_data[i] = \
                (quantized_weight_data[i] * scales[i]).astype(np.float32)
        return dequantized_weight_data

    def _mul_channel_wise_quantization(self, weight_data, quantize_range,
                                       save_weight_dtype):
        '''
        Get channel wise scale for the weights of conv2d and depthwise_conv2d,
        and quantize the weights.
        '''
        scales = []
1608 1609
        quantized_weight_data = np.zeros_like(weight_data,
                                              dtype=save_weight_dtype)
1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
        channel_num = weight_data.shape[-1]
        for i in range(channel_num):
            scale = np.max(np.abs(weight_data[:, i])) / quantize_range
            scales.append(scale)
            quantized_weight_data[:, i] = \
                np.around(weight_data[:, i] / scale).astype(save_weight_dtype)
        return scales, quantized_weight_data

    def _mul_channel_wise_dequantization(self, quantized_weight_data, scales):
        '''
        For mul, dequantize the weights to fp32.
        '''
1622 1623
        dequantized_weight_data = np.zeros_like(quantized_weight_data,
                                                dtype=np.float32)
1624 1625 1626 1627 1628
        for i in range(len(scales)):
            dequantized_weight_data[:, i] = \
                (quantized_weight_data[:, i] * scales[i]).astype(np.float32)
        return dequantized_weight_data

1629 1630
    def _calculate_threshold(self, input, threshold_rate, histogram_bins=5000):
        input_abs = np.abs(input)
1631 1632 1633
        hist, hist_edeges = np.histogram(input_abs,
                                         bins=histogram_bins,
                                         range=(0, np.max(input_abs)))
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
        hist = hist / float(sum(hist))
        hist_sum = 0
        hist_index = 0
        for i in range(len(hist)):
            hist_sum += hist[i]
            if hist_sum >= 1.0 - threshold_rate:
                hist_index = i + 1
                break
        bin_width = hist_edeges[1] - hist_edeges[0]
        return hist_index * bin_width