post_training_quantization.py 77.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15 16
import os
import re
17 18
import math
import shutil
19 20
import logging
import numpy as np
21

22 23 24 25
try:
    from tqdm import tqdm
except:
    from .utils import tqdm
26
from inspect import isgeneratorfunction
27 28
from .... import io
from .... import core
29
from .... import reader
30
from .... import framework
31
from .... import unique_name
32
from ....executor import global_scope, Executor
33 34
from ....framework import IrGraph
from ....log_helper import get_logger
35 36 37 38 39 40 41 42
from .quantization_pass import (
    QuantizationTransformPass,
    QuantizationTransformPassV2,
    QuantizationFreezePass,
    QuantWeightPass,
    AddQuantDequantPass,
    AddQuantDequantPassV2,
)
43
from .cal_kl_threshold import cal_kl_threshold
44
from .adaround import run_adaround
45
from . import utils
46

47
__all__ = [
48 49 50
    'PostTrainingQuantization',
    'WeightQuantization',
    'PostTrainingQuantizationProgram',
51
]
52

53 54 55
_logger = get_logger(
    __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s'
)
56 57


58 59 60 61 62 63 64 65
def _all_persistable_var_names(program):
    persistable_var_names = []
    for var in program.list_vars():
        if var.persistable:
            persistable_var_names.append(var.name)
    return persistable_var_names


66 67 68 69 70 71 72 73 74 75 76 77
def _remove_unused_var_nodes(graph):
    all_used_vars = set()
    ops = graph.all_op_nodes()
    for op_node in ops:
        for input_node in op_node.inputs:
            all_used_vars.add(input_node)
        for output_node in op_node.outputs:
            all_used_vars.add(output_node)

    all_used_vars = {n.node for n in all_used_vars}
    all_unused_vars = {
        n
78 79 80
        for n in filter(
            lambda node: node.node not in all_used_vars, graph.all_var_nodes()
        )
81 82 83 84 85 86 87 88 89 90 91 92 93 94
    }
    graph.safe_remove_nodes(all_unused_vars)
    return graph


def _remove_ctrl_vars(graph):
    remove_ctr_vars = set()
    for node in graph.all_var_nodes():
        if node.is_ctrl_var():
            remove_ctr_vars.add(node)
    graph.safe_remove_nodes(remove_ctr_vars)
    return graph


95 96 97
def _apply_pass(
    scope, graph, pass_name, attrs=None, attr_values=None, debug=False
):
98 99 100 101 102 103
    ir_pass = core.get_pass(pass_name)
    cpp_graph = graph.graph
    if not cpp_graph.has('__param_scope__'):
        cpp_graph.set_not_owned('__param_scope__', scope)
    if attrs:
        assert attr_values and len(attrs) == len(
104 105
            attr_values
        ), "Different number of pass attributes and their values."
106 107 108 109 110 111 112 113 114
        for attr, value in zip(attrs, attr_values):
            ir_pass.set(attr, value)
    ir_pass.apply(cpp_graph)
    if debug:
        graph.draw('.', 'qat_fp32_{}'.format(pass_name), graph.all_op_nodes())
    _remove_unused_var_nodes(graph)
    return graph


115
class PostTrainingQuantization(object):
116 117
    """
    Utilizing post training quantization methon to quantize the FP32 model,
118
    and it uses calibrate data to get the quantization information for all
119 120 121
    quantized variables.
    """

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
    def __init__(
        self,
        executor,
        model_dir,
        scope=None,
        model_filename=None,
        params_filename=None,
        batch_generator=None,
        sample_generator=None,
        data_loader=None,
        batch_size=10,
        batch_nums=None,
        algo="KL",
        hist_percent=0.99999,
        quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
        round_type='round',
        learning_rate=0.001,
        is_full_quantize=False,
        bias_correction=False,
        activation_bits=8,
        weight_bits=8,
        activation_quantize_type='range_abs_max',
        weight_quantize_type='channel_wise_abs_max',
        onnx_format=False,
        freeze_model=True,
        optimize_model=False,
        is_use_cache_file=False,
        skip_tensor_list=None,
        same_scale_tensor_list=None,
        cache_dir=None,
        scale_dict=None,
        return_graph=False,
    ):
155
        '''
156
        Constructor.
157 158

        Args:
159
            executor(fluid.Executor): The executor to load, run and save the
160
                quantized model.
161 162 163
            scope(fluid.Scope, optional): The scope of the program, use it to load
                and save variables. If scope=None, get scope by global_scope().
            model_dir(str): The path of the fp32 model that will be quantized,
164
                and the model and params files are under the path.
165 166
            model_filename(str, optional): The name of file to load the inference
                program. If it is None, the default filename '__model__' will
167 168
                be used. Default is 'None'.
            params_filename(str, optional): The name of file to load all parameters.
169 170
                When all parameters were saved in a single binary file, set it
                as the real filename. If parameters were saved in separate files,
171
                set it as 'None'. Default is 'None'.
172
            batch_generator(Python Generator): The batch generator provides
173 174 175 176 177 178 179
                calibrate data for DataLoader, and it returns a batch every
                time. Note that, sample_generator and batch_generator, only one
                should be set. Beisdes, batch_generator supports lod tensor.
            sample_generator(Python Generator): The sample generator provides
                calibrate data for DataLoader, and it only returns a sample every
                time. Note that, sample_generator and batch_generator, only one
                should be set. Beisdes, sample_generator dose not support lod tensor.
180 181 182
            data_loader(Python Generator, Paddle.io.DataLoader, optional): The
                Generator or Dataloader provides calibrate data, and it could
                return a batch every time.
183
            batch_size(int, optional): The batch size of DataLoader. Default is 10.
184 185
            batch_nums(int, optional): If batch_nums is not None, the number of
                calibrate data is batch_size*batch_nums. If batch_nums is None, use
186
                all data provided by sample_generator as calibrate data.
187 188
            algo(str, optional): If algo='KL', use KL-divergenc method to
                get the KL threshold for quantized activations and get the abs_max
189 190
                value for quantized weights. If algo='abs_max', get the abs max
                value for activations and weights. If algo= 'min_max', get the min
X
XGZhang 已提交
191
                and max value for quantized activations and weights. If algo='avg',
192
                get the average value among the max values for activations. If
X
XGZhang 已提交
193
                algo= 'hist', get the value of 'hist_percent' quantile as the threshold.
194
                If algo='mse', get the value which makes the quantization mse loss
X
XGZhang 已提交
195 196 197
                minimal. Default is KL.
            hist_percent(float, optional): The threshold of algo 'hist' for activations.
                Default is 0.99999.
198 199
            quantizable_op_type(list[str], optional): List the type of ops
                that will be quantized. Default is ["conv2d", "depthwise_conv2d",
200
                "mul"].
201
            round_type(str, optional): The method of converting the quantized weights
202
                value float->int. Currently supports ['round', 'adaround'] methods.
203 204
                Default is `round`, which is rounding nearest to the integer.
                'adaround' is refer to https://arxiv.org/abs/2004.10568.
205
            learning_rate(float, optional): The learning rate of adaround method.
206
            is_full_quantized(bool, optional): If set is_full_quantized as True,
207
                apply quantization to all supported quantizable op type. If set
208
                is_full_quantized as False, only apply quantization to the op type
209
                according to the input quantizable_op_type.
X
XGZhang 已提交
210 211
            bias_correction(bool, optional): If set as True, use the bias correction
                method of https://arxiv.org/abs/1810.05723. Default is False.
212
            activation_bits(int): quantization bit number for activation.
213 214 215 216 217 218 219 220 221 222 223 224
            weight_bits(int, optional): quantization bit number for weights.
            activation_quantize_type(str): quantization type for activation,
                now support 'range_abs_max', 'moving_average_abs_max' and 'abs_max'.
                This param only specifies the fake ops in saving quantized model.
                If it is 'range_abs_max' or 'moving_average_abs_max', we save the scale
                obtained by post training quantization in fake ops. Note that, if it
                is 'abs_max', the scale will not be saved in fake ops.
            weight_quantize_type(str): quantization type for weights,
                support 'abs_max' and 'channel_wise_abs_max'. This param only specifies
                the fake ops in saving quantized model, and we save the scale obtained
                by post training quantization in fake ops. Compared to 'abs_max',
                the model accuracy is usually higher when it is 'channel_wise_abs_max'.
225 226
            onnx_format(bool): Whether to export the quantized model with format of ONNX.
                Default is False.
227
            freeze_model(bool): Whether to convert quantized and trained ``program`` to final
228 229
                quantized ``program``. Default: True.
            skip_tensor_list(list): List of skip quant tensor name. Default: None.
230 231
            same_scale_tensor_list(list(list)): The list of tensor keep same scale in the outermost
                list, the final scale about every list is the max of the scale in the list
232
                of tensor. Default: None.
233 234 235 236 237 238 239 240
            optimize_model(bool, optional): If set optimize_model as True, it applies
                some passes to the model before quantization, and it supports
                `conv2d/depthwise_conv2d + bn` pass so far. Some targets require the
                weights are quantized by tensor-wise method, which means the weights
                scale for all channel are the same. However, if fuse
                `conv2d/depthwise_conv2d + bn`, the weights scale for all channel will
                be different. In address this problem, fuse the pattern before
                quantization. Default False.
241 242
            is_use_cache_file(bool, optional): This param is deprecated.
            cache_dir(str, optional): This param is deprecated.
243 244 245
        Returns:
            None

246 247 248 249
        Examples:
        .. code-block:: python
            import paddle.fluid as fluid
            from paddle.fluid.contrib.slim.quantization import PostTrainingQuantization
250

251
            exe = fluid.Executor(fluid.CPUPlace())
252
            model_dir = path/to/fp32_model_params
253
            # set model_filename as None when the filename is __model__,
254
            # otherwise set it as the real filename
255 256
            model_filename = None
            # set params_filename as None when all parameters were saved in
257 258 259
            # separate files, otherwise set it as the real filename
            params_filename = None
            save_model_path = path/to/save_model_path
260
            # prepare the sample generator according to the model, and the
261
            # sample generator must return a sample every time. The reference
262 263 264
            # document: https://www.paddlepaddle.org.cn/documentation/docs/zh
            # /user_guides/howto/prepare_data/use_py_reader.html
            sample_generator = your_sample_generator
265 266 267
            batch_size = 10
            batch_nums = 10
            algo = "KL"
268
            quantizable_op_type = ["conv2d", "depthwise_conv2d", "mul"]
269 270
            ptq = PostTrainingQuantization(
                        executor=exe,
271 272 273 274
                        sample_generator=sample_generator,
                        model_dir=model_dir,
                        model_filename=model_filename,
                        params_filename=params_filename,
275 276 277 278 279 280 281
                        batch_size=batch_size,
                        batch_nums=batch_nums,
                        algo=algo,
                        quantizable_op_type=quantizable_op_type)
            ptq.quantize()
            ptq.save_quantized_model(save_model_path)
        '''
282

283
        self._support_activation_quantize_type = [
284 285 286
            'range_abs_max',
            'moving_average_abs_max',
            'abs_max',
287 288
        ]
        self._support_weight_quantize_type = ['abs_max', 'channel_wise_abs_max']
X
XGZhang 已提交
289
        self._support_algo_type = [
290 291 292 293 294 295 296 297
            'KL',
            'hist',
            'avg',
            'mse',
            'emd',
            'abs_max',
            'min_max',
            'ptf',
X
XGZhang 已提交
298
        ]
299
        assert round_type in ['adaround', 'round']
300 301
        self._round_type = round_type
        self._learning_rate = learning_rate
302
        self._dynamic_quantize_op_type = ['lstm']
303 304 305 306 307 308 309
        self._support_quantize_op_type = list(
            set(
                utils._weight_supported_quantizable_op_type
                + utils._act_supported_quantizable_op_type
                + self._dynamic_quantize_op_type
            )
        )
310 311

        # Check inputs
312
        assert executor is not None, "The executor cannot be None."
313 314 315 316 317
        assert any(
            [gen is not None]
            for gen in [sample_generator, batch_generator, data_loader]
        ), (
            "The sample_generator, batch_generator "
318
            "and data_loader cannot be None in the same time."
319
        )
320
        if data_loader is not None:
321 322 323 324 325 326 327 328
            assert isinstance(
                data_loader,
                (
                    io.DataLoader,
                    type(isgeneratorfunction),
                    reader.GeneratorLoader,
                ),
            ), "data_loader only accepts `paddle.io.DataLoader` or Generator instance."
329
        assert batch_size > 0, "The batch_size should be greater than 0."
330 331 332 333 334 335 336 337 338 339 340 341 342
        assert (
            algo in self._support_algo_type
        ), "The algo should be KL, hist, mse, avg, abs_max, min_max or ptf."
        assert (
            activation_quantize_type in self._support_activation_quantize_type
        ), "The activation_quantize_type ({}) should in ({}).".format(
            activation_quantize_type, self._support_activation_quantize_type
        )
        assert (
            weight_quantize_type in self._support_weight_quantize_type
        ), "The weight_quantize_type ({}) shoud in ({}).".format(
            weight_quantize_type, self._support_weight_quantize_type
        )
343 344

        # Save input params
X
XGZhang 已提交
345
        self._bias_correction = bias_correction
346
        self._executor = executor
347
        self._scope = global_scope() if scope == None else scope
348 349 350
        self._model_dir = model_dir
        self._model_filename = model_filename
        self._params_filename = params_filename
351
        self._sample_generator = sample_generator
352
        self._batch_generator = batch_generator
353 354 355
        self._batch_size = batch_size
        self._batch_nums = batch_nums
        self._algo = algo
X
XGZhang 已提交
356
        self._hist_percent = hist_percent
357 358 359 360
        self._activation_bits = activation_bits
        self._weight_bits = weight_bits
        self._activation_quantize_type = activation_quantize_type
        self._weight_quantize_type = weight_quantize_type
361
        self._onnx_format = onnx_format
G
Guanghua Yu 已提交
362
        self._clip_extra = True if self._onnx_format else False
363
        self._skip_tensor_list = skip_tensor_list
364
        self._is_full_quantize = is_full_quantize
365
        if is_full_quantize:
366
            self._quantizable_op_type = self._support_quantize_op_type
367 368 369
        else:
            self._quantizable_op_type = quantizable_op_type
            for op_type in self._quantizable_op_type:
370
                assert op_type in self._support_quantize_op_type, (
371
                    op_type + " is not supported for quantization."
372
                )
373
        self._optimize_model = optimize_model
374

375
        # Define variables
376 377 378 379
        self._place = self._executor.place
        self._program = None
        self._feed_list = None
        self._fetch_list = None
380
        self._data_loader = data_loader
381

382
        self._out_scale_op_list = utils.QUANT_SUPPORTED_OP_TYPE_LIST
383 384
        self._quantized_weight_var_name = set()
        self._quantized_act_var_name = set()
385
        self._weight_op_pairs = {}
X
XGZhang 已提交
386
        # The vars for alog = KL or hist
387 388
        self._sampling_act_abs_min_max = {}
        self._sampling_act_histogram = {}
389
        self._sampling_data = {}
X
XGZhang 已提交
390
        self._quantized_var_threshold = {}
391 392
        self._histogram_bins = 2048
        # The vars for algo = min_max
393 394
        self._quantized_var_min = {}
        self._quantized_var_max = {}
X
XGZhang 已提交
395 396 397
        # The vars for algo = avg
        self._quantized_var_avg = {}
        # The best loss of algo = mse
398
        self._best_calibration_loss = {}
X
XGZhang 已提交
399 400
        # The threshold for algo = abs_max, mse or avg
        self._quantized_threshold = {}
401 402 403 404
        self._same_scale_tensor_list = same_scale_tensor_list
        self._freeze_model = freeze_model
        self._scale_dict = scale_dict
        self._return_graph = return_graph
405 406 407
        self.FLAG = False
        if self._program is not None:
            self.FLAG = True
408 409 410

    def quantize(self):
        '''
411 412 413
        Load the FP32 model, and use the calibrate data to calculate the forward-stage.
        Based on the sample data, we can get the quantization information, and obtain
        the final quantized model.
414 415 416 417

        Args:
            None
        Returns:
418 419
            the program of quantized model.
        '''
420
        self._load_model_data()
421
        self._collect_target_varnames()
422
        self._set_activation_persistable()
423

X
XGZhang 已提交
424
        if self._algo in ["KL", "hist"]:
425
            batch_id = 0
426
            with tqdm(
427 428 429 430
                total=self._batch_nums,
                bar_format='Preparation stage, Run batch:|{bar}| {n_fmt}/{total_fmt}',
                ncols=80,
            ) as t:
431
                for data in self._data_loader():
432 433 434 435 436 437 438
                    self._executor.run(
                        program=self._program,
                        feed=data,
                        fetch_list=self._fetch_list,
                        return_numpy=False,
                        scope=self._scope,
                    )
439 440 441 442 443 444 445 446
                    self._collect_activation_abs_min_max()
                    batch_id += 1
                    t.update()
                    if self._batch_nums and batch_id >= self._batch_nums:
                        break
            self._init_sampling_act_histogram()

        batch_id = 0
447 448 449 450 451
        with tqdm(
            total=self._batch_nums,
            bar_format='Sampling stage, Run batch:|{bar}| {n_fmt}/{total_fmt}',
            ncols=80,
        ) as t:
452
            for data in self._data_loader():
453 454 455 456 457 458 459
                self._executor.run(
                    program=self._program,
                    feed=data,
                    fetch_list=self._fetch_list,
                    return_numpy=False,
                    scope=self._scope,
                )
460
                self._sampling()
461
                batch_id += 1
462
                t.update()
463 464
                if self._batch_nums and batch_id >= self._batch_nums:
                    break
465

X
XGZhang 已提交
466 467
        if self._algo == 'avg':
            for var_name in self._quantized_act_var_name:
468 469 470
                self._quantized_threshold[var_name] = np.array(
                    self._quantized_var_avg[var_name]
                ).mean()
X
XGZhang 已提交
471 472
        if self._algo in ["KL", "hist"]:
            self._calculate_kl_hist_threshold()
473

474
        if self._round_type == 'adaround':
475 476 477 478
            self._adaround_apply()

        self._reset_activation_persistable()

479
        if self._algo == 'min_max':
480
            self._save_input_threhold()
481 482 483 484
        else:
            self._update_program()

        # save out_threshold for quantized ops.
485 486
        if not self.FLAG:
            self._save_output_threshold()
487

488 489 490 491
        if any(
            op_type in self._quantizable_op_type
            for op_type in self._dynamic_quantize_op_type
        ):
492
            self._collect_dynamic_quantize_op_threshold(
493 494
                self._dynamic_quantize_op_type
            )
495

496
        utils.move_persistable_var_to_global_block(self._program)
497

498 499 500 501 502
        if not self._return_graph:
            return self._program
        else:
            main_graph = IrGraph(core.Graph(self._program.desc), for_test=True)
            return main_graph
503

504
    def _adaround_apply(self):
505
        assert self._algo != "min_max", "The algo should not be min_max."
506 507 508 509
        if self._algo in ["KL", "hist"]:
            scale_dict = self._quantized_var_threshold
        else:
            scale_dict = self._quantized_threshold
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
        run_adaround(
            self._data_loader,
            self._program,
            self._fetch_list,
            self._executor,
            self._scope,
            self._place,
            self._quantized_op_pairs,
            self._weight_op_pairs,
            scale_dict,
            num_iterations=self._batch_nums,
            bias_correction=self._bias_correction,
            lr=self._learning_rate,
        )

    def save_quantized_model(
        self, save_model_path, model_filename=None, params_filename=None
    ):
528 529 530 531
        '''
        Save the quantized model to the disk.

        Args:
532 533 534 535 536 537 538
            save_model_path(str): The path to save the quantized model.
            model_filename(str, optional): If the model_filename is None,
                save the model to '__model__'. Otherwise, save the model
                to the specified filename. Default: None.
            params_filename(str, optional): If the params_filename is None,
                save params to separted files. Otherwise, save all params
                to the specified filename.
539
        Returns:
540 541
            None
        '''
542 543 544 545 546 547 548 549 550 551
        io.save_inference_model(
            dirname=save_model_path,
            model_filename=model_filename,
            params_filename=params_filename,
            feeded_var_names=self._feed_list,
            target_vars=self._fetch_list,
            executor=self._executor,
            main_program=self._program,
            clip_extra=self._clip_extra,
        )
552
        _logger.info("The quantized model is saved in " + save_model_path)
553

554
    def _load_model_data(self):
555
        '''
556
        Load model and set data loader.
557
        '''
558 559
        if self._program is None:
            _logger.info("Load model and set data loader ...")
560 561 562 563 564 565 566 567 568 569
            [
                self._program,
                self._feed_list,
                self._fetch_list,
            ] = io.load_inference_model(
                dirname=self._model_dir,
                executor=self._executor,
                model_filename=self._model_filename,
                params_filename=self._params_filename,
            )
570 571 572 573

        if self._optimize_model:
            self._optimize_fp32_model()

574 575 576 577
        feed_vars = [
            framework._get_var(str(var_name), self._program)
            for var_name in self._feed_list
        ]
578 579

        if self._data_loader is not None:
580 581 582
            self._batch_nums = (
                self._batch_nums if self._batch_nums else len(self._data_loader)
            )
583
            return
584 585 586
        self._data_loader = io.DataLoader.from_generator(
            feed_list=feed_vars, capacity=3 * self._batch_size, iterable=True
        )
587
        if self._sample_generator is not None:
588 589 590 591 592 593
            self._data_loader.set_sample_generator(
                self._sample_generator,
                batch_size=self._batch_size,
                drop_last=True,
                places=self._place,
            )
594
        elif self._batch_generator is not None:
595 596 597 598 599 600 601 602
            self._data_loader.set_batch_generator(
                self._batch_generator, places=self._place
            )
        self._batch_nums = (
            self._batch_nums
            if self._batch_nums
            else len(list(self._data_loader))
        )
603

604 605 606 607 608 609 610 611
    def _optimize_fp32_model(self):
        '''
        Fuse the `conv2d/depthwise_conv2d + bn` in FP32 model.
        '''
        _logger.info("Optimize FP32 model ...")
        graph = IrGraph(core.Graph(self._program.desc), for_test=True)
        graph = _remove_ctrl_vars(graph)
        graph = _apply_pass(self._scope, graph, 'conv_bn_fuse_pass')
612 613
        graph = _apply_pass(self._scope, graph, 'depthwise_conv_bn_fuse_pass')
        graph = _apply_pass(self._scope, graph, 'conv_transpose_bn_fuse_pass')
614
        graph = _apply_pass(self._scope, graph, 'conv_eltwiseadd_bn_fuse_pass')
615 616 617
        graph = _apply_pass(
            self._scope, graph, 'depthwise_conv_eltwiseadd_bn_fuse_pass'
        )
618

619 620
        self._program = graph.to_program()

621
    def _collect_target_varnames(self):
622 623 624 625
        '''
        Collect the variable names for sampling, and set activation
        variables to be persistable.
        '''
626
        # TODO(juncaipeng), consider the name_scope of skip_quant
627
        _logger.info("Collect quantized variable names ...")
628
        self._quantized_op_pairs = {}
629

630
        def collect_var_name(var_name_list, persistable_var_names, op_type):
631 632 633
            for var_name in var_name_list:
                if var_name in persistable_var_names:
                    self._quantized_weight_var_name.add(var_name)
634
                    self._weight_op_pairs[var_name] = op_type
635 636 637
                else:
                    self._quantized_act_var_name.add(var_name)

638
        persistable_var_names = _all_persistable_var_names(self._program)
639 640
        for block_id in range(len(self._program.blocks)):
            for op in self._program.blocks[block_id].ops:
641 642 643 644 645 646
                # skip quant form self._skip_tensor_list
                if self._skip_tensor_list is not None:
                    for inp_name in utils._get_op_input_var_names(op):
                        if inp_name in self._skip_tensor_list:
                            op._set_attr("op_namescope", "skip_quant")

647
                op_type = op.type
648 649 650 651 652 653 654
                if (
                    self._is_full_quantize
                    and op_type not in self._quantizable_op_type
                ):
                    _logger.warning(
                        op_type + " is not supported for quantization."
                    )
655 656
                # For quantized ops, sample inputs and outputs
                if op_type in self._quantizable_op_type:
657 658 659 660 661 662 663 664 665 666
                    collect_var_name(
                        utils._get_op_input_var_names(op),
                        persistable_var_names,
                        op_type,
                    )
                    collect_var_name(
                        utils._get_op_output_var_names(op),
                        persistable_var_names,
                        op_type,
                    )
667
                    # collect quanted op output var name
668 669
                    for out_var_name in utils._get_op_output_var_names(op):
                        for in_var_name in utils._get_op_input_var_names(op):
670 671
                            if in_var_name in persistable_var_names:
                                self._quantized_op_pairs[
672 673
                                    in_var_name
                                ] = out_var_name
674 675
                # For other op, only sample output scale
                elif op_type in self._out_scale_op_list:
676 677 678 679 680
                    collect_var_name(
                        utils._get_op_output_var_names(op),
                        persistable_var_names,
                        op_type,
                    )
681 682 683

    def _set_activation_persistable(self):
        '''
684
        Set activation variables to be persistable, so can obtain
685 686
        the tensor data in sample_data
        '''
687 688 689 690
        for var in self._program.list_vars():
            if var.name in self._quantized_act_var_name:
                var.persistable = True

691 692 693 694
    def _reset_activation_persistable(self):
        '''
        Reset activations to be not persistable.
        '''
695
        to_erase = []
696 697 698
        for var in self._program.list_vars():
            if var.name in self._quantized_act_var_name:
                var.persistable = False
699
                to_erase.append(var.name)
700

701
    def _sampling(self):
702
        '''
703
        Sample the min/max, abs_max or histogram in every iterations.
704 705
        '''
        if self._algo == "abs_max":
706
            self._sample_abs_max()
X
XGZhang 已提交
707 708
        elif self._algo == "avg":
            self._sample_avg()
709
        elif self._algo == "min_max":
710
            self._sample_min_max()
X
XGZhang 已提交
711 712
        elif self._algo == "mse":
            self._sample_mse()
713 714
        elif self._algo == "emd":
            self._sample_emd()
H
handiz 已提交
715 716
        elif self._algo == "ptf":
            self._sample_ptf()
X
XGZhang 已提交
717
        elif self._algo in ["KL", "hist"]:
718
            self._sample_histogram()
719

X
XGZhang 已提交
720 721 722
    def _sample_mse(self):
        if self._quantized_threshold == {}:
            for var_name in self._quantized_weight_var_name:
723
                var_tensor = utils.load_variable_data(self._scope, var_name)
X
XGZhang 已提交
724 725 726 727
                if self._weight_quantize_type == "abs_max":
                    abs_max_value = float(np.max(np.abs(var_tensor)))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    abs_max_value = []
728 729 730 731
                    if (
                        self._weight_op_pairs[var_name]
                        in utils._channelwise_quant_axis1_ops
                    ):
X
XGZhang 已提交
732 733
                        for i in range(var_tensor.shape[1]):
                            abs_max_value.append(
734 735
                                float(np.max(np.abs(var_tensor[:, i])))
                            )
X
XGZhang 已提交
736 737 738
                    else:
                        for i in range(var_tensor.shape[0]):
                            abs_max_value.append(
739 740
                                float(np.max(np.abs(var_tensor[i])))
                            )
X
XGZhang 已提交
741 742 743
                self._quantized_threshold[var_name] = abs_max_value
        _logger.info("MSE searching stage ...")
        for var_name in self._quantized_act_var_name:
744
            var_tensor = utils.load_variable_data(self._scope, var_name)
X
XGZhang 已提交
745 746
            var_tensor = var_tensor.flatten()
            abs_max_value = float(np.max(np.abs(var_tensor)))
X
XGZhang 已提交
747
            abs_max_value = 1e-8 if abs_max_value == 0.0 else abs_max_value
X
XGZhang 已提交
748
            s = 0.3
749 750
            if var_name not in self._best_calibration_loss:
                self._best_calibration_loss[var_name] = float('inf')
X
XGZhang 已提交
751 752 753
            while s <= 1.0:
                scale = s * abs_max_value
                s += 0.02
754
                bins = 2 ** (self._activation_bits - 1) - 1
755
                if self._onnx_format:
756 757 758
                    quant_var = np.clip(
                        np.round(var_tensor / scale * bins), -bins - 1, bins
                    )
759 760
                    quant_dequant_var = quant_var / bins * scale
                else:
761 762 763 764 765 766
                    quant_dequant_var = (
                        np.round(np.clip(var_tensor, 0.0, scale) / scale * bins)
                        / bins
                        * scale
                    )
                mse_loss = ((var_tensor - quant_dequant_var) ** 2).mean()
767 768 769 770 771 772 773
                if mse_loss <= self._best_calibration_loss[var_name]:
                    self._best_calibration_loss[var_name] = mse_loss
                    self._quantized_threshold[var_name] = scale

    def _sample_emd(self):
        if self._quantized_threshold == {}:
            for var_name in self._quantized_weight_var_name:
774
                var_tensor = utils.load_variable_data(self._scope, var_name)
775 776 777 778
                if self._weight_quantize_type == "abs_max":
                    abs_max_value = float(np.max(np.abs(var_tensor)))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    abs_max_value = []
779 780 781 782
                    if (
                        self._weight_op_pairs[var_name]
                        in utils._channelwise_quant_axis1_ops
                    ):
783 784
                        for i in range(var_tensor.shape[1]):
                            abs_max_value.append(
785 786
                                float(np.max(np.abs(var_tensor[:, i])))
                            )
787 788 789
                    else:
                        for i in range(var_tensor.shape[0]):
                            abs_max_value.append(
790 791
                                float(np.max(np.abs(var_tensor[i])))
                            )
792 793 794
                self._quantized_threshold[var_name] = abs_max_value
        _logger.info("EMD searching stage ...")
        for var_name in self._quantized_act_var_name:
795
            var_tensor = utils.load_variable_data(self._scope, var_name)
796 797 798 799 800 801 802 803 804
            var_tensor = var_tensor.flatten()
            abs_max_value = float(np.max(np.abs(var_tensor)))
            abs_max_value = 1e-8 if abs_max_value == 0.0 else abs_max_value
            s = 0.3
            if var_name not in self._best_calibration_loss:
                self._best_calibration_loss[var_name] = float('inf')
            while s <= 1.0:
                scale = s * abs_max_value
                s += 0.02
805
                bins = 2 ** (self._activation_bits - 1) - 1
806
                if self._onnx_format:
807 808 809
                    quant_var = np.clip(
                        np.round(var_tensor / scale * bins), -bins - 1, bins
                    )
810 811
                    quant_dequant_var = quant_var / bins * scale
                else:
812 813 814 815 816
                    quant_dequant_var = (
                        np.round(np.clip(var_tensor, 0.0, scale) / scale * bins)
                        / bins
                        * scale
                    )
817
                emd_loss = np.abs(
818 819
                    np.mean(var_tensor) - np.mean(quant_dequant_var)
                ) + np.abs(np.std(var_tensor) - np.std(quant_dequant_var))
820 821
                if emd_loss <= self._best_calibration_loss[var_name]:
                    self._best_calibration_loss[var_name] = emd_loss
X
XGZhang 已提交
822 823 824 825 826
                    self._quantized_threshold[var_name] = scale

    def _sample_avg(self):
        if self._quantized_threshold == {}:
            for var_name in self._quantized_weight_var_name:
827
                var_tensor = utils.load_variable_data(self._scope, var_name)
X
XGZhang 已提交
828 829 830 831
                if self._weight_quantize_type == "abs_max":
                    abs_max_value = float(np.max(np.abs(var_tensor)))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    abs_max_value = []
832 833 834 835
                    if (
                        self._weight_op_pairs[var_name]
                        in utils._channelwise_quant_axis1_ops
                    ):
X
XGZhang 已提交
836 837
                        for i in range(var_tensor.shape[1]):
                            abs_max_value.append(
838 839
                                float(np.max(np.abs(var_tensor[:, i])))
                            )
X
XGZhang 已提交
840 841 842
                    else:
                        for i in range(var_tensor.shape[0]):
                            abs_max_value.append(
843 844
                                float(np.max(np.abs(var_tensor[i])))
                            )
X
XGZhang 已提交
845 846 847
                self._quantized_threshold[var_name] = abs_max_value

        for var_name in self._quantized_act_var_name:
848
            var_tensor = utils.load_variable_data(self._scope, var_name)
X
XGZhang 已提交
849
            abs_max_value = float(np.max(np.abs(var_tensor)))
850
            if var_name not in self._quantized_var_avg:
X
XGZhang 已提交
851
                self._quantized_var_avg[var_name] = []
852 853 854 855 856 857 858 859
            abs_avg_value = float(
                np.mean(
                    np.max(
                        np.abs(var_tensor.reshape(var_tensor.shape[0], -1)),
                        axis=(1),
                    )
                )
            )
X
XGZhang 已提交
860 861 862
            self._quantized_var_avg[var_name].append(abs_avg_value)
            continue

863
    def _sample_abs_max(self):
X
XGZhang 已提交
864
        if self._quantized_threshold == {}:
865
            for var_name in self._quantized_weight_var_name:
866
                var_tensor = utils.load_variable_data(self._scope, var_name)
867 868 869 870
                if self._weight_quantize_type == "abs_max":
                    abs_max_value = float(np.max(np.abs(var_tensor)))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    abs_max_value = []
871 872 873 874
                    if (
                        self._weight_op_pairs[var_name]
                        in utils._channelwise_quant_axis1_ops
                    ):
875 876
                        for i in range(var_tensor.shape[1]):
                            abs_max_value.append(
877 878
                                float(np.max(np.abs(var_tensor[:, i])))
                            )
879 880 881
                    else:
                        for i in range(var_tensor.shape[0]):
                            abs_max_value.append(
882 883
                                float(np.max(np.abs(var_tensor[i])))
                            )
X
XGZhang 已提交
884
                self._quantized_threshold[var_name] = abs_max_value
885 886

        for var_name in self._quantized_act_var_name:
887
            var_tensor = utils.load_variable_data(self._scope, var_name)
888
            abs_max_value = float(np.max(np.abs(var_tensor)))
889 890 891
            if (var_name not in self._quantized_threshold) or (
                abs_max_value > self._quantized_threshold[var_name]
            ):
X
XGZhang 已提交
892
                self._quantized_threshold[var_name] = abs_max_value
893

894
    def _sample_min_max(self):
895 896
        if self._quantized_var_min == {} and self._quantized_var_max == {}:
            for var_name in self._quantized_weight_var_name:
897
                var_tensor = utils.load_variable_data(self._scope, var_name)
898 899 900 901 902 903
                if self._weight_quantize_type == "abs_max":
                    min_value = float(np.min(var_tensor))
                    max_value = float(np.max(var_tensor))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    min_value = []
                    max_value = []
904 905 906 907
                    if (
                        self._weight_op_pairs[var_name]
                        in utils._channelwise_quant_axis1_ops
                    ):
908 909 910 911 912 913 914 915 916 917 918
                        for i in range(var_tensor.shape[1]):
                            min_value.append(float(np.min(var_tensor[:, i])))
                            max_value.append(float(np.max(var_tensor[:, i])))
                    else:
                        for i in range(var_tensor.shape[0]):
                            min_value.append(float(np.min(var_tensor[i])))
                            max_value.append(float(np.max(var_tensor[i])))
                self._quantized_var_min[var_name] = min_value
                self._quantized_var_max[var_name] = max_value

        for var_name in self._quantized_act_var_name:
919
            var_tensor = utils.load_variable_data(self._scope, var_name)
920 921
            min_value = float(np.min(var_tensor))
            max_value = float(np.max(var_tensor))
922 923 924
            if (var_name not in self._quantized_var_min) or (
                min_value < self._quantized_var_min[var_name]
            ):
925
                self._quantized_var_min[var_name] = min_value
926 927 928
            if (var_name not in self._quantized_var_max) or (
                max_value > self._quantized_var_max[var_name]
            ):
929
                self._quantized_var_max[var_name] = max_value
930

931 932
    def _sample_histogram(self):
        for var_name in self._quantized_act_var_name:
933
            var_tensor = utils.load_variable_data(self._scope, var_name)
934 935 936 937 938
            var_tensor_abs = np.abs(var_tensor)
            bins = self._sampling_act_histogram[var_name][1]
            hist, _ = np.histogram(var_tensor_abs, bins=bins)
            self._sampling_act_histogram[var_name][0] += hist

H
handiz 已提交
939 940 941 942 943 944 945 946 947 948 949 950
    def _sample_ptf(self):
        """
        The following code are modified from:
        https://github.com/megvii-research/FQ-ViT/
        """
        if self._quantized_threshold == {}:
            for var_name in self._quantized_weight_var_name:
                var_tensor = utils.load_variable_data(self._scope, var_name)
                if self._weight_quantize_type == "abs_max":
                    abs_max_value = float(np.max(np.abs(var_tensor)))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    abs_max_value = []
951 952 953 954
                    if (
                        self._weight_op_pairs[var_name]
                        in utils._channelwise_quant_axis1_ops
                    ):
H
handiz 已提交
955 956
                        for i in range(var_tensor.shape[1]):
                            abs_max_value.append(
957 958
                                float(np.max(np.abs(var_tensor[:, i])))
                            )
H
handiz 已提交
959 960 961
                    else:
                        for i in range(var_tensor.shape[0]):
                            abs_max_value.append(
962 963
                                float(np.max(np.abs(var_tensor[i])))
                            )
H
handiz 已提交
964 965 966 967 968
                self._quantized_threshold[var_name] = abs_max_value

        for var_name in self._quantized_act_var_name:
            var_tensor = utils.load_variable_data(self._scope, var_name)
            abs_max_value = float(np.max(np.abs(var_tensor)))
969
            q_max = 2 ** (self._activation_bits - 1) - 1
H
handiz 已提交
970 971 972 973
            scale8 = abs_max_value / q_max
            scale4 = scale8 / 2
            scale2 = scale4 / 2
            scale1 = scale2 / 2
974 975 976 977 978 979 980 981 982 983 984 985
            quant_dequant_var_scale1 = (
                np.clip(np.round(var_tensor / scale1), 0, q_max) * scale1
            )
            quant_dequant_var_scale2 = (
                np.clip(np.round(var_tensor / scale2), 0, q_max) * scale2
            )
            quant_dequant_var_scale4 = (
                np.clip(np.round(var_tensor / scale4), 0, q_max) * scale4
            )
            quant_dequant_var_scale8 = (
                np.clip(np.round(var_tensor / scale8), 0, q_max) * scale8
            )
986 987 988 989
            score1 = utils.l2_loss(var_tensor, quant_dequant_var_scale1)
            score2 = utils.l2_loss(var_tensor, quant_dequant_var_scale2)
            score4 = utils.l2_loss(var_tensor, quant_dequant_var_scale4)
            score8 = utils.l2_loss(var_tensor, quant_dequant_var_scale8)
H
handiz 已提交
990
            score = [score1, score2, score4, score8]
991
            mask = 2 ** score.index(min(score))
H
handiz 已提交
992 993 994 995
            scale = scale1 * mask
            threshold = q_max * scale
            self._quantized_threshold[var_name] = threshold

996 997 998 999
    def _save_input_threhold(self):
        '''
        Save input threshold to the quantized op.
        '''
1000 1001 1002
        assert (
            self._algo == "min_max"
        ), "The algo should be min_max to save input threshold."
1003 1004 1005
        for block_id in range(len(self._program.blocks)):
            for op in self._program.blocks[block_id].ops:
                if op.type in self._quantizable_op_type:
1006
                    for var_name in utils._get_op_input_var_names(op):
1007 1008
                        assert var_name in self._quantized_var_min
                        assert var_name in self._quantized_var_max
1009 1010 1011 1012 1013 1014
                        op._set_attr(
                            var_name + ".min", self._quantized_var_min[var_name]
                        )
                        op._set_attr(
                            var_name + ".max", self._quantized_var_max[var_name]
                        )
1015
                        op._set_attr("with_quant_attr", True)
1016

1017
    def _collect_activation_abs_min_max(self):
1018
        '''
1019 1020
        Collect the abs_min and abs_max for all activation. When algo = KL,
        get the min and max value, and then calculate the threshold.
1021
        '''
1022
        for var_name in self._quantized_act_var_name:
1023
            var_tensor = utils.load_variable_data(self._scope, var_name)
1024 1025 1026 1027
            var_tensor = np.abs(var_tensor)
            min_value = float(np.min(var_tensor))
            max_value = float(np.max(var_tensor))
            if var_name not in self._sampling_act_abs_min_max:
1028
                self._sampling_act_abs_min_max[var_name] = [
1029 1030
                    min_value,
                    max_value,
1031
                ]
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
            else:
                if min_value < self._sampling_act_abs_min_max[var_name][0]:
                    self._sampling_act_abs_min_max[var_name][0] = min_value
                if max_value > self._sampling_act_abs_min_max[var_name][1]:
                    self._sampling_act_abs_min_max[var_name][1] = max_value

    def _init_sampling_act_histogram(self):
        '''
        Based on the min/max value, init the sampling_act_histogram.
        '''
        for var_name in self._quantized_act_var_name:
            if var_name not in self._sampling_act_histogram:
                min_val = self._sampling_act_abs_min_max[var_name][0]
                max_val = self._sampling_act_abs_min_max[var_name][1]
1046 1047 1048
                hist, hist_edeges = np.histogram(
                    [], bins=self._histogram_bins, range=(min_val, max_val)
                )
1049
                self._sampling_act_histogram[var_name] = [hist, hist_edeges]
1050

X
XGZhang 已提交
1051
    def _calculate_kl_hist_threshold(self):
1052
        '''
X
XGZhang 已提交
1053
        Calculate the KL or hist threshold of quantized variables.
1054
        '''
X
XGZhang 已提交
1055 1056
        _logger.info("Calculate {} threshold ...".format(self._algo))
        assert self._algo in ["KL", "hist"], "The algo should be KL or hist."
1057 1058

        # Abs_max threshold for weights
1059
        for var_name in self._quantized_weight_var_name:
1060
            weight_data = utils.load_variable_data(self._scope, var_name)
1061
            if self._weight_quantize_type == "abs_max":
1062
                weight_threshold = float(np.max(np.abs(weight_data)))
1063 1064
            elif self._weight_quantize_type == "channel_wise_abs_max":
                weight_threshold = []
1065 1066 1067 1068
                if (
                    self._weight_op_pairs[var_name]
                    in utils._channelwise_quant_axis1_ops
                ):
1069 1070
                    for i in range(weight_data.shape[1]):
                        weight_threshold.append(
1071 1072
                            float(np.max(np.abs(weight_data[:, i])))
                        )
1073 1074 1075
                else:
                    for i in range(weight_data.shape[0]):
                        weight_threshold.append(
1076 1077
                            float(np.max(np.abs(weight_data[i])))
                        )
X
XGZhang 已提交
1078
            self._quantized_var_threshold[var_name] = weight_threshold
1079

1080 1081
        for var_name in self._quantized_act_var_name:
            hist, hist_edeges = self._sampling_act_histogram[var_name]
X
XGZhang 已提交
1082
            if self._algo == "KL":
1083
                bin_width = hist_edeges[1] - hist_edeges[0]
1084 1085 1086
                self._quantized_var_threshold[var_name] = cal_kl_threshold(
                    hist, bin_width, self._activation_bits
                )
X
XGZhang 已提交
1087
            elif self._algo == "hist":
1088 1089 1090
                self._quantized_var_threshold[
                    var_name
                ] = self._get_hist_scaling_factor(hist, hist_edeges)
1091 1092 1093

    def _update_program(self):
        '''
1094 1095
        Use QuantizationTransformPass and AddQuantDequantPass to insert
        fake_quantize, fake_dequantize and fake_quant_dequant op.
X
XGZhang 已提交
1096
        Besides, save all threshold to the scale var node.
1097
        '''
1098
        _logger.info("Update the program ...")
1099 1100
        graph = IrGraph(core.Graph(self._program.desc), for_test=True)

1101
        # use QuantizationTransformPass to insert fake_quant/fake_dequantize op
1102
        major_quantizable_op_types = []
1103
        for op_type in utils._weight_supported_quantizable_op_type:
1104
            if op_type in self._quantizable_op_type:
1105
                major_quantizable_op_types.append(op_type)
1106 1107 1108 1109 1110 1111 1112 1113
        if not self._onnx_format:
            transform_pass = QuantizationTransformPass(
                scope=self._scope,
                place=self._place,
                weight_bits=self._weight_bits,
                activation_bits=self._activation_bits,
                activation_quantize_type=self._activation_quantize_type,
                weight_quantize_type=self._weight_quantize_type,
1114 1115
                quantizable_op_type=major_quantizable_op_types,
            )
1116 1117 1118 1119 1120 1121 1122 1123
        else:
            transform_pass = QuantizationTransformPassV2(
                scope=self._scope,
                place=self._place,
                weight_bits=self._weight_bits,
                activation_bits=self._activation_bits,
                activation_quantize_type=self._activation_quantize_type,
                weight_quantize_type=self._weight_quantize_type,
1124 1125
                quantizable_op_type=major_quantizable_op_types,
            )
1126 1127 1128 1129 1130 1131

        for sub_graph in graph.all_sub_graphs():
            # Insert fake_quant/fake_dequantize op must in test graph, so
            # set per graph's _for_test is True.
            sub_graph._for_test = True
            transform_pass.apply(sub_graph)
1132 1133

        # use AddQuantDequantPass to insert fake_quant_dequant op
1134
        minor_quantizable_op_types = []
1135
        for op_type in utils._act_supported_quantizable_op_type:
1136
            if op_type in self._quantizable_op_type:
1137
                minor_quantizable_op_types.append(op_type)
1138 1139 1140 1141
        if not self._onnx_format:
            add_quant_dequant_pass = AddQuantDequantPass(
                scope=self._scope,
                place=self._place,
1142 1143
                quantizable_op_type=minor_quantizable_op_types,
            )
1144 1145 1146 1147 1148
        else:
            add_quant_dequant_pass = AddQuantDequantPassV2(
                scope=self._scope,
                place=self._place,
                quantizable_op_type=minor_quantizable_op_types,
1149 1150
                is_full_quantized=True,
            )
1151 1152 1153 1154

        for sub_graph in graph.all_sub_graphs():
            sub_graph._for_test = True
            add_quant_dequant_pass.apply(sub_graph)
1155

X
XGZhang 已提交
1156
        # save threshold to scale var node
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
        if self._scale_dict is None:
            if self._algo in ["KL", "hist"]:
                scale_dict = self._quantized_var_threshold
            else:
                scale_dict = self._quantized_threshold

            if self._same_scale_tensor_list is not None:
                for tensor_list in self._same_scale_tensor_list:
                    max_scale = None
                    tmp_tensor_list = []
                    for tensor_name in tensor_list:
                        if '#' in tensor_name:
                            real_tensor_name, opera, scalar = tensor_name.split(
1170 1171
                                '#'
                            )
1172 1173
                            if real_tensor_name not in scale_dict.keys():
                                continue
1174 1175
                            if opera == '*':
                                scale_dict[real_tensor_name] = float(
1176 1177
                                    scale_dict[real_tensor_name]
                                ) * float(scalar)
1178 1179
                            elif opera == '/':
                                scale_dict[real_tensor_name] = float(
1180 1181 1182 1183 1184 1185 1186 1187 1188
                                    scale_dict[real_tensor_name]
                                ) / float(scalar)
                            max_scale = (
                                scale_dict[real_tensor_name]
                                if max_scale is None
                                else max(
                                    max_scale, scale_dict[real_tensor_name]
                                )
                            )
1189
                        else:
1190 1191
                            if tensor_name not in scale_dict.keys():
                                continue
1192 1193 1194 1195 1196
                            max_scale = (
                                scale_dict[tensor_name]
                                if max_scale is None
                                else max(max_scale, scale_dict[tensor_name])
                            )
1197 1198 1199 1200

                    for tensor_name in tensor_list:
                        if '#' in tensor_name:
                            real_tensor_name, opera, scalar = tensor_name.split(
1201 1202
                                '#'
                            )
1203 1204
                            if real_tensor_name not in scale_dict.keys():
                                continue
1205 1206
                            if opera == '*':
                                scale_dict[
1207 1208
                                    real_tensor_name
                                ] = max_scale / float(scalar)
1209 1210
                            elif opera == '/':
                                scale_dict[
1211 1212
                                    real_tensor_name
                                ] = max_scale * float(scalar)
1213
                        else:
1214 1215
                            if tensor_name not in scale_dict.keys():
                                continue
1216 1217 1218 1219
                            scale_dict[tensor_name] = max_scale
            self._scale_dict = scale_dict

        for key, val in self._scale_dict.items():
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
            utils.set_variable_data(
                self._scope,
                self._place,
                key + "@scale",
                np.array([val], dtype=np.float32),
            )
            utils.set_variable_data(
                self._scope,
                self._place,
                key + ".quant_dequant@scale",
                np.array([val], dtype=np.float32),
            )
1232

1233 1234
        if not self._onnx_format:
            # apply QuantizationFreezePass, and obtain the final quant model
1235 1236 1237 1238 1239 1240 1241 1242 1243
            if self._freeze_model:
                freeze_pass = QuantizationFreezePass(
                    scope=self._scope,
                    place=self._place,
                    bias_correction=self._bias_correction,
                    weight_bits=self._weight_bits,
                    round_type=self._round_type,
                    activation_bits=self._activation_bits,
                    weight_quantize_type=self._weight_quantize_type,
1244 1245
                    quantizable_op_type=major_quantizable_op_types,
                )
1246 1247 1248 1249

                for sub_graph in graph.all_sub_graphs():
                    sub_graph._for_test = True
                    freeze_pass.apply(sub_graph)
1250 1251 1252 1253 1254
        else:
            quant_weight_pass = QuantWeightPass(self._scope, self._place)
            for sub_graph in graph.all_sub_graphs():
                sub_graph._for_test = True
                quant_weight_pass.apply(sub_graph)
1255

1256 1257
        self._program = graph.to_program()

1258
    def _save_output_threshold(self):
1259
        '''
1260
        Save output threshold to the quantized op.
1261
        '''
1262
        self._calibration_scales = {}
1263

1264 1265 1266 1267 1268 1269 1270 1271
        def save_info(
            op_node, out_var_name, threshold_map, out_info_name, quantized_type
        ):
            assert (
                out_var_name in threshold_map
            ), "The output ({}) of {} node does not have threshold.".format(
                out_var_name, op_node.type
            )
1272 1273 1274 1275
            if self._onnx_format:
                # For easy extension, every var_node set a dict to save parameters of quant.
                self._calibration_scales[var_name] = {}
                self._calibration_scales[var_name]['scale'] = threshold_map[
1276 1277
                    var_name
                ]
1278 1279 1280 1281 1282
            else:
                op_node._set_attr(out_info_name, threshold_map[var_name])
                op_node._set_attr("with_quant_attr", True)
                if op_node.type in self._quantizable_op_type:
                    op._set_attr("quantization_type", quantized_type)
1283 1284

        def analysis_and_save_info(op_node, out_var_name):
1285
            argname_index = utils._get_output_name_index(op_node, out_var_name)
1286
            assert argname_index is not None, (
1287
                out_var_name + " is not the output of the op"
1288
            )
1289
            if self._algo == "KL":
1290 1291
                # For compatibility, we save output threshold by two methods.
                save_info(
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
                    op_node,
                    out_var_name,
                    self._quantized_var_threshold,
                    "out_threshold",
                    "post_kl",
                )
                save_info(
                    op_node,
                    out_var_name,
                    self._quantized_var_threshold,
1302
                    argname_index[0] + str(argname_index[1]) + "_threshold",
1303 1304
                    "post_kl",
                )
X
XGZhang 已提交
1305 1306
            elif self._algo == "hist":
                # For compatibility, we save output threshold by two methods.
1307
                save_info(
1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
                    op_node,
                    out_var_name,
                    self._quantized_var_threshold,
                    "out_threshold",
                    "post_hist",
                )
                save_info(
                    op_node,
                    out_var_name,
                    self._quantized_var_threshold,
1318
                    argname_index[0] + str(argname_index[1]) + "_threshold",
1319 1320
                    "post_hist",
                )
X
XGZhang 已提交
1321

H
handiz 已提交
1322
            elif self._algo in ["avg", "abs_max", "mse", "emd", "ptf"]:
X
XGZhang 已提交
1323
                save_info(
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
                    op_node,
                    out_var_name,
                    self._quantized_threshold,
                    "out_threshold",
                    "post_" + str(self._algo),
                )
                save_info(
                    op_node,
                    out_var_name,
                    self._quantized_threshold,
X
XGZhang 已提交
1334
                    argname_index[0] + str(argname_index[1]) + "_threshold",
1335 1336
                    "post_" + str(self._algo),
                )
1337
            elif self._algo == "min_max":
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
                save_info(
                    op_node,
                    out_var_name,
                    self._quantized_var_min,
                    "out_min",
                    "post_min_max",
                )
                save_info(
                    op_node,
                    out_var_name,
                    self._quantized_var_max,
                    "out_max",
                    "post_min_max",
                )
1352

1353 1354
        for block_id in range(len(self._program.blocks)):
            for op in self._program.blocks[block_id].ops:
1355 1356 1357
                if op.type in (
                    self._quantizable_op_type + self._out_scale_op_list
                ):
1358
                    out_var_names = utils._get_op_output_var_names(op)
1359 1360
                    for var_name in out_var_names:
                        analysis_and_save_info(op, var_name)
1361

1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
    def _collect_dynamic_quantize_op_threshold(self, target_ops_type):
        """
        Collect and save the weight threshold for dynamic quantize ops,
        such as lstm and gru.
        Args:
            target_ops_type(list): the op type of target ops
        Returns:
            None
        """

        target_ops = []
        for index in range(self._program.num_blocks):
            for op in self._program.block(index).ops:
                if op.type in target_ops_type:
                    target_ops.append(op)

        quantization_type = str("post_" + self._algo).lower()
        persistable_var_names = _all_persistable_var_names(self._program)
        for op in target_ops:
1381
            for var_name in utils._get_op_input_var_names(op):
1382
                if var_name in persistable_var_names:
1383
                    var_data = utils.load_variable_data(self._scope, var_name)
1384
                    threshold = float(np.max(np.abs(var_data)))
1385
                    argname, index = utils._get_input_name_index(op, var_name)
1386 1387 1388
                    op._set_attr(argname + str(index) + "_threshold", threshold)
                    op._set_attr("quantization_type", quantization_type)
                    op._set_attr("bit_length", self._weight_bits)
1389
                    op._set_attr("with_quant_attr", True)
1390

X
XGZhang 已提交
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
    def _get_hist_scaling_factor(self, hist, hist_edges):
        '''
        Using the hist method to get the scaling factor.
        '''
        threshold_rate = self._hist_percent
        hist = hist / float(sum(hist))
        hist_sum = 0
        hist_index = 0
        for i in range(len(hist)):
            hist_sum += hist[i]
            if hist_sum >= threshold_rate:
                hist_index = i + 1
                break
        bin_width = hist_edges[1] - hist_edges[0]
        return (hist_index - 0.5) * bin_width

1407

1408
class PostTrainingQuantizationProgram(PostTrainingQuantization):
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
    def __init__(
        self,
        executor,
        program,
        feed_list=None,
        fetch_list=None,
        scope=None,
        batch_generator=None,
        sample_generator=None,
        data_loader=None,
        batch_size=10,
        batch_nums=None,
        algo="KL",
        hist_percent=0.99999,
        quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
        round_type='round',
        learning_rate=0.001,
        is_full_quantize=False,
        bias_correction=False,
        activation_bits=8,
        weight_bits=8,
        activation_quantize_type='range_abs_max',
        weight_quantize_type='channel_wise_abs_max',
        onnx_format=False,
        freeze_model=True,
        optimize_model=False,
        is_use_cache_file=False,
        skip_tensor_list=None,
        same_scale_tensor_list=None,
        cache_dir=None,
        scale_dict=None,
        return_graph=True,
    ):
        super().__init__(
            executor,
            scope,
            None,
            None,
            None,
            batch_generator,
            sample_generator,
            data_loader,
            batch_size,
            batch_nums,
            algo,
            hist_percent,
            quantizable_op_type,
            round_type,
            learning_rate,
            is_full_quantize,
            bias_correction,
            activation_bits,
            weight_bits,
            activation_quantize_type,
            weight_quantize_type,
            onnx_format,
            freeze_model,
            optimize_model,
            is_use_cache_file,
            skip_tensor_list,
            same_scale_tensor_list,
            cache_dir,
            scale_dict,
            return_graph,
        )
1474
        self.FLAG = False
1475
        self._program = program
1476 1477
        if self._program is not None:
            self.FLAG = True
1478 1479
        assert feed_list is not None, "Feed list should not be None."
        assert fetch_list is not None, "Fetch list should not be None."
1480 1481 1482 1483
        self._feed_list = feed_list
        self._fetch_list = fetch_list


1484 1485
class WeightQuantization(object):
    _supported_quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul']
1486
    _supported_weight_quantize_type = ['channel_wise_abs_max', 'abs_max']
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507

    def __init__(self, model_dir, model_filename=None, params_filename=None):
        '''
        This class quantizes the weight of some ops to reduce the size of model
        or improve the perforemace.

        Args:
            model_dir(str): The path of the fp32 model that will be quantized,
                and the model and params files are under the path.
            model_filename(str, optional): The name of file to load the inference
                program. If it is None, the default filename '__model__' will
                be used. Default is 'None'.
            params_filename(str, optional): The name of file to load all parameters.
                When all parameters were saved in a single binary file, set it
                as the real filename. If parameters were saved in separate files,
                set it as 'None'. Default is 'None'.
        '''
        self._model_dir = model_dir
        self._model_filename = model_filename
        self._params_filename = params_filename

1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
    def quantize_weight_to_int(
        self,
        save_model_dir,
        save_model_filename=None,
        save_params_filename=None,
        quantizable_op_type=["conv2d", "mul"],
        weight_bits=8,
        weight_quantize_type="channel_wise_abs_max",
        generate_test_model=False,
        threshold_rate=0.0,
    ):
1519 1520
        '''
        In order to reduce the size of model, this api quantizes the weight
1521
        of some ops from float32 to int8/16. In the inference stage, the
1522
        quantized weight will be dequantized to float32 again.
1523

1524 1525
        Args:
            save_model_dir(str): The path to save the quantized model.
1526 1527
            save_model_filename(str, optional): The name of file to
                save the inference program. If it is None, the default
1528
                filename '__model__' will be used. Default is 'None'.
1529 1530 1531
            save_params_filename(str, optional): The name of file to
                save all parameters. If it is None, parameters were
                saved in separate files. If it is not None, all
1532
                parameters were saved in a single binary file.
1533
            quantizable_op_type(list[str], optional): The list of ops
1534
                that will be quantized, and the quantized ops should be
1535
                contained in ["conv2d", "depthwise_conv2d", "mul"].
1536
                Default is ["conv2d","mul"].
1537
            weight_bits(int, optional): The bits for the quantized weight,
1538
                and it should be 8 or 16. Default is 8.
1539 1540 1541
            weight_quantize_type(str, optional): quantization type for weights,
                support 'channel_wise_abs_max' and 'abs_max'. Set it as
                'channel_wise_abs_max', the accuracy performs better.
1542 1543 1544
            generate_test_model(bool, optional): If set generate_test_model
                as True, it saves a fake quantized model, in which the weights
                are quantized and dequantized. We can use PaddlePaddle to load
1545
                the fake quantized model and test the accuracy on GPU or CPU.
1546 1547 1548 1549 1550
            threshold_rate(float, optional): This api uses abs_max methd to
                quantize the weight from float32 to int8/16, and the abs max
                value is important for quantization diff. When the abs_max
                value is far away from the center of the numerical distribution,
                we can set threshold_rate between 1e-6 and 1e-8, so the abs max
1551 1552 1553
                value will be optimized. Default is 0.0.
        '''
        for op_type in quantizable_op_type:
1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
            assert op_type in self._supported_quantizable_op_type, (
                "Input error:"
                + op_type
                + " is not supported for weight quantization."
            )
        assert weight_bits in [
            8,
            16,
        ], "Input error: weight_bits should be 8 or 16."
        assert (
            weight_quantize_type in self._supported_weight_quantize_type
        ), "Input error: weight_quantize_type should in {}".format(
            self._supported_weight_quantize_type
        )
1568 1569

        quantized_model_dir = os.path.join(save_model_dir, "quantized_model")
1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
        self._quantize_weight_to_int(
            quantized_model_dir,
            save_model_filename,
            save_params_filename,
            quantizable_op_type,
            weight_bits,
            weight_quantize_type,
            False,
            threshold_rate,
        )
1580 1581 1582

        if generate_test_model:
            test_model_dir = os.path.join(save_model_dir, "test_model")
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
            self._quantize_weight_to_int(
                test_model_dir,
                save_model_filename,
                save_params_filename,
                quantizable_op_type,
                weight_bits,
                weight_quantize_type,
                True,
                threshold_rate,
            )
1593

1594 1595 1596 1597
    def convert_weight_to_fp16(self, save_model_dir):
        """
        Convert all presistable vars from fp32 to fp16.
        Note that, this api only changes the data type of variables in
1598
        __params__ file, and the __model__ file remains unchanged.
1599 1600 1601 1602 1603 1604 1605 1606 1607

        Args:
            save_model_dir(str): The path to save the fp16 model.
        """

        # Load model
        place = core.CPUPlace()
        exe = Executor(place)
        scope = global_scope()
1608 1609 1610 1611 1612 1613
        [infer_program, feed_list, fetch_list] = io.load_inference_model(
            dirname=self._model_dir,
            executor=exe,
            model_filename=self._model_filename,
            params_filename=self._params_filename,
        )
1614 1615 1616 1617 1618 1619 1620

        # Clone and save fp16 weights
        save_program = framework.Program()
        save_block = save_program.global_block()
        save_var_map = {}

        for var in infer_program.list_vars():
1621 1622 1623 1624 1625 1626
            if (
                (var.type == core.VarDesc.VarType.RAW)
                or (not var.persistable)
                or (var.name in ['feed', 'fetch'])
                or (var.dtype != core.VarDesc.VarType.FP32)
            ):
1627 1628
                continue

1629
            # new_var = _clone_var_to_block_(var, save_block)
1630 1631 1632 1633
            new_var = save_block._clone_variable(var)
            if self._params_filename is not None:
                save_var_map[new_var.name] = new_var
            else:
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
                save_file_path = os.path.join(
                    os.path.normpath(save_model_dir), new_var.name
                )
                save_block.append_op(
                    type='save',
                    inputs={'X': [new_var]},
                    outputs={},
                    attrs={
                        'file_path': os.path.normpath(save_file_path),
                        'save_as_fp16': True,
                    },
                )
1646 1647 1648 1649 1650 1651 1652 1653

        if self._params_filename is not None:
            save_var_list = []
            for name in sorted(save_var_map.keys()):
                save_var_list.append(save_var_map[name])

            saved_params_var = save_block.create_var(
                type=core.VarDesc.VarType.RAW,
1654 1655
                name=unique_name.generate("saved_params"),
            )
1656 1657
            saved_params_var.desc.set_persistable(True)

1658 1659 1660 1661 1662 1663 1664 1665 1666
            save_path = os.path.join(
                os.path.normpath(save_model_dir), self._params_filename
            )
            save_block.append_op(
                type='save_combine',
                inputs={'X': save_var_list},
                outputs={'Y': saved_params_var},
                attrs={'file_path': save_path, 'save_as_fp16': True},
            )
1667 1668 1669 1670 1671

        save_program._sync_with_cpp()
        exe.run(save_program)

        # Copy model
1672 1673 1674 1675 1676
        model_filename = (
            "__model__"
            if self._model_filename is None
            else self._model_filename
        )
1677 1678 1679 1680
        src_model = os.path.join(self._model_dir, model_filename)
        dest_model = os.path.join(save_model_dir, model_filename)
        shutil.copyfile(src_model, dest_model)

1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691
    def _quantize_weight_to_int(
        self,
        save_model_dir,
        save_model_filename,
        save_params_filename,
        quantizable_op_type,
        weight_bits,
        weight_quantize_type,
        for_test,
        threshold_rate,
    ):
1692 1693 1694 1695
        """
        Generate quantized model or fake quantized model.
        """
        # Load model
1696 1697 1698
        place = core.CPUPlace()
        exe = Executor(place)
        scope = global_scope()
1699 1700 1701 1702 1703 1704
        [program, feed_list, fetch_list] = io.load_inference_model(
            dirname=self._model_dir,
            executor=exe,
            model_filename=self._model_filename,
            params_filename=self._params_filename,
        )
1705

1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
        quantized_ops = []
        for index in range(program.num_blocks):
            block = program.block(index)
            for op in block.ops:
                if op.type in quantizable_op_type:
                    quantized_ops.append(op)

        # Quantize weights
        persistable_var_names = _all_persistable_var_names(program)
        for op in quantized_ops:
            for var_name in op.input_arg_names:
                if var_name in persistable_var_names:
                    if weight_quantize_type == "abs_max":
                        self._weight_abs_max_quantization(
1720 1721 1722 1723 1724 1725 1726 1727
                            scope,
                            place,
                            weight_bits,
                            threshold_rate,
                            op,
                            var_name,
                            for_test,
                        )
1728 1729
                    elif weight_quantize_type == "channel_wise_abs_max":
                        self._weight_channel_wise_abs_max_quantization(
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
                            scope, place, weight_bits, op, var_name, for_test
                        )

        io.save_inference_model(
            dirname=save_model_dir,
            feeded_var_names=feed_list,
            target_vars=fetch_list,
            executor=exe,
            main_program=program,
            model_filename=save_model_filename,
            params_filename=save_params_filename,
        )

    def _weight_abs_max_quantization(
        self, scope, place, weight_bits, threshold_rate, op, var_name, for_test
    ):
1746 1747 1748 1749 1750 1751 1752
        '''
        Use abs_max method to quantize weight.
        '''
        quantize_range = (1 << (weight_bits - 1)) - 1
        save_weight_dtype = np.int8 if weight_bits == 8 else np.int16

        # Get quantized scale and weight data
1753
        weight_data = utils.load_variable_data(scope, var_name)
1754 1755 1756
        if abs(threshold_rate) < 1e-10:
            threshold_value = np.max(np.abs(weight_data))
        else:
1757 1758 1759
            threshold_value = self._calculate_threshold(
                weight_data, threshold_rate
            )
1760 1761 1762
            weight_data[weight_data > threshold_value] = threshold_value
            weight_data[weight_data < -threshold_value] = -threshold_value
        scale = threshold_value / quantize_range
1763 1764 1765
        quantized_weight_data = np.around(weight_data / scale).astype(
            save_weight_dtype
        )
1766 1767 1768

        # Set weight data
        if not for_test:
1769 1770 1771
            utils.set_variable_data(
                scope, place, var_name, quantized_weight_data
            )
1772
        else:
1773 1774 1775 1776 1777 1778
            dequantized_weight_data = (quantized_weight_data * scale).astype(
                np.float32
            )
            utils.set_variable_data(
                scope, place, var_name, dequantized_weight_data
            )
1779 1780 1781 1782 1783

        # Save info
        op._set_attr('quantization_type', 'post_weight_abs_max')
        op._set_attr('quantize_weight_bits', weight_bits)
        op._set_attr(var_name + "_quant_scale", [scale])  # Save as list
1784
        op._set_attr("with_quant_attr", True)
1785

1786 1787 1788
    def _weight_channel_wise_abs_max_quantization(
        self, scope, place, weight_bits, op, var_name, for_test
    ):
1789
        '''
1790 1791 1792 1793 1794 1795
        Use channel_wise_abs_max method to quantize weight.
        '''
        quantize_range = (1 << (weight_bits - 1)) - 1
        save_weight_dtype = np.int8 if weight_bits == 8 else np.int16

        # Get quantized scale and weight data
1796
        weight_data = utils.load_variable_data(scope, var_name)
1797
        if op.type == "mul":
1798 1799 1800
            scales, quantized_weight_data = self._mul_channel_wise_quantization(
                weight_data, quantize_range, save_weight_dtype
            )
1801
        elif op.type in ["conv2d", "depthwise_conv2d"]:
1802 1803 1804 1805 1806 1807
            (
                scales,
                quantized_weight_data,
            ) = self._conv_channel_wise_quantization(
                weight_data, quantize_range, save_weight_dtype
            )
1808 1809 1810 1811 1812
        else:
            _logger.error(op.type + " is not supported by weight quantization")

        # Set weight data
        if not for_test:
1813 1814 1815
            utils.set_variable_data(
                scope, place, var_name, quantized_weight_data
            )
1816 1817
        else:
            if op.type == "mul":
1818 1819 1820
                dequantized_weight_data = self._mul_channel_wise_dequantization(
                    quantized_weight_data, scales
                )
1821
            elif op.type in ["conv2d", "depthwise_conv2d"]:
1822 1823 1824 1825 1826
                dequantized_weight_data = (
                    self._conv_channel_wise_dequantization(
                        quantized_weight_data, scales
                    )
                )
1827
            else:
1828 1829 1830 1831 1832 1833
                _logger.error(
                    op.type + " is not supported by weight quantization"
                )
            utils.set_variable_data(
                scope, place, var_name, dequantized_weight_data
            )
1834 1835 1836 1837 1838

        # Save info
        op._set_attr('quantization_type', 'post_weight_channel_wise_abs_max')
        op._set_attr('quantize_weight_bits', weight_bits)
        op._set_attr(var_name + "_quant_scale", scales)
1839
        op._set_attr("with_quant_attr", True)
1840

1841 1842 1843
    def _conv_channel_wise_quantization(
        self, weight_data, quantize_range, save_weight_dtype
    ):
1844 1845 1846 1847 1848
        '''
        Get channel wise scale for the weights of conv2d and depthwise_conv2d,
        and quantize the weights.
        '''
        scales = []
1849 1850 1851
        quantized_weight_data = np.zeros_like(
            weight_data, dtype=save_weight_dtype
        )
1852 1853 1854 1855
        channel_num = weight_data.shape[0]
        for i in range(channel_num):
            scale = np.max(np.abs(weight_data[i])) / quantize_range
            scales.append(scale)
1856 1857 1858
            quantized_weight_data[i] = np.around(weight_data[i] / scale).astype(
                save_weight_dtype
            )
1859 1860 1861 1862 1863 1864
        return scales, quantized_weight_data

    def _conv_channel_wise_dequantization(self, quantized_weight_data, scales):
        '''
        For conv2d and depthwise_conv2d, dequantize the weights to fp32.
        '''
1865 1866 1867
        dequantized_weight_data = np.zeros_like(
            quantized_weight_data, dtype=np.float32
        )
1868
        for i in range(len(scales)):
1869 1870 1871
            dequantized_weight_data[i] = (
                quantized_weight_data[i] * scales[i]
            ).astype(np.float32)
1872 1873
        return dequantized_weight_data

1874 1875 1876
    def _mul_channel_wise_quantization(
        self, weight_data, quantize_range, save_weight_dtype
    ):
1877 1878 1879 1880 1881
        '''
        Get channel wise scale for the weights of conv2d and depthwise_conv2d,
        and quantize the weights.
        '''
        scales = []
1882 1883 1884
        quantized_weight_data = np.zeros_like(
            weight_data, dtype=save_weight_dtype
        )
1885 1886 1887 1888
        channel_num = weight_data.shape[-1]
        for i in range(channel_num):
            scale = np.max(np.abs(weight_data[:, i])) / quantize_range
            scales.append(scale)
1889 1890 1891
            quantized_weight_data[:, i] = np.around(
                weight_data[:, i] / scale
            ).astype(save_weight_dtype)
1892 1893 1894 1895 1896 1897
        return scales, quantized_weight_data

    def _mul_channel_wise_dequantization(self, quantized_weight_data, scales):
        '''
        For mul, dequantize the weights to fp32.
        '''
1898 1899 1900
        dequantized_weight_data = np.zeros_like(
            quantized_weight_data, dtype=np.float32
        )
1901
        for i in range(len(scales)):
1902 1903 1904
            dequantized_weight_data[:, i] = (
                quantized_weight_data[:, i] * scales[i]
            ).astype(np.float32)
1905 1906
        return dequantized_weight_data

1907 1908
    def _calculate_threshold(self, input, threshold_rate, histogram_bins=5000):
        input_abs = np.abs(input)
1909 1910 1911
        hist, hist_edeges = np.histogram(
            input_abs, bins=histogram_bins, range=(0, np.max(input_abs))
        )
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921
        hist = hist / float(sum(hist))
        hist_sum = 0
        hist_index = 0
        for i in range(len(hist)):
            hist_sum += hist[i]
            if hist_sum >= 1.0 - threshold_rate:
                hist_index = i + 1
                break
        bin_width = hist_edeges[1] - hist_edeges[0]
        return hist_index * bin_width