post_training_quantization.py 58.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
15 16
import os
import re
17 18
import logging
import numpy as np
19
import shutil
20 21 22
from .... import io
from .... import core
from .... import framework
23
from .... import unique_name
24
from ....executor import global_scope, Executor
25 26 27 28 29
from ....framework import IrGraph
from ....log_helper import get_logger
from .quantization_pass import QuantizationTransformPass
from .quantization_pass import QuantizationFreezePass
from .quantization_pass import AddQuantDequantPass
30 31 32
from .quantization_pass import _out_scale_op_list
from .quantization_pass import _get_op_input_var_names
from .quantization_pass import _get_op_output_var_names
33
from .quantization_pass import _get_output_name_index
34
from .quantization_pass import _get_input_name_index
35
from .quantization_pass import _channelwise_quant_axis1_ops
36
from .cal_kl_threshold import cal_kl_threshold
37

38
__all__ = ['PostTrainingQuantization', 'WeightQuantization']
39 40 41 42 43

_logger = get_logger(
    __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')


44 45 46 47
def _load_variable_data(scope, var_name):
    '''
    Load variable value from scope
    '''
48 49 50 51
    var_node = scope.find_var(var_name)
    assert var_node is not None, \
        "Cannot find " + var_name + " in scope."
    return np.array(var_node.get_tensor())
52 53 54 55 56 57 58


def _set_variable_data(scope, place, var_name, np_value):
    '''
    Set the value of var node by name, if the node exits,
    '''
    assert isinstance(np_value, np.ndarray), \
X
XGZhang 已提交
59
       'The type of value should be numpy array.'
60 61 62 63 64 65
    var_node = scope.find_var(var_name)
    if var_node != None:
        tensor = var_node.get_tensor()
        tensor.set(np_value, place)


66 67 68 69 70 71 72 73
def _all_persistable_var_names(program):
    persistable_var_names = []
    for var in program.list_vars():
        if var.persistable:
            persistable_var_names.append(var.name)
    return persistable_var_names


74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
def _remove_unused_var_nodes(graph):
    all_used_vars = set()
    ops = graph.all_op_nodes()
    for op_node in ops:
        for input_node in op_node.inputs:
            all_used_vars.add(input_node)
        for output_node in op_node.outputs:
            all_used_vars.add(output_node)

    all_used_vars = {n.node for n in all_used_vars}
    all_unused_vars = {
        n
        for n in filter(lambda node: node.node not in all_used_vars,
                        graph.all_var_nodes())
    }
    graph.safe_remove_nodes(all_unused_vars)
    return graph


def _remove_ctrl_vars(graph):
    remove_ctr_vars = set()
    for node in graph.all_var_nodes():
        if node.is_ctrl_var():
            remove_ctr_vars.add(node)
    graph.safe_remove_nodes(remove_ctr_vars)
    return graph


def _apply_pass(scope,
                graph,
                pass_name,
                attrs=None,
                attr_values=None,
                debug=False):
    ir_pass = core.get_pass(pass_name)
    cpp_graph = graph.graph
    if not cpp_graph.has('__param_scope__'):
        cpp_graph.set_not_owned('__param_scope__', scope)
    if attrs:
        assert attr_values and len(attrs) == len(
            attr_values), "Different number of pass attributes and their values."
        for attr, value in zip(attrs, attr_values):
            ir_pass.set(attr, value)
    ir_pass.apply(cpp_graph)
    if debug:
        graph.draw('.', 'qat_fp32_{}'.format(pass_name), graph.all_op_nodes())
    _remove_unused_var_nodes(graph)
    return graph


124
class PostTrainingQuantization(object):
125 126 127 128 129 130
    """
    Utilizing post training quantization methon to quantize the FP32 model,
    and it uses calibrate data to get the quantization information for all 
    quantized variables.
    """

131
    def __init__(self,
132 133 134
                 executor=None,
                 scope=None,
                 model_dir=None,
135 136
                 model_filename=None,
                 params_filename=None,
137
                 batch_generator=None,
138
                 sample_generator=None,
139 140 141
                 batch_size=10,
                 batch_nums=None,
                 algo="KL",
X
XGZhang 已提交
142
                 hist_percent=0.99999,
143
                 quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
144
                 is_full_quantize=False,
X
XGZhang 已提交
145
                 bias_correction=False,
146
                 activation_bits=8,
147 148 149
                 weight_bits=8,
                 activation_quantize_type='range_abs_max',
                 weight_quantize_type='channel_wise_abs_max',
150
                 optimize_model=False,
151
                 is_use_cache_file=False,
152
                 cache_dir=None):
153
        '''
154
        Constructor.
155 156

        Args:
157
            executor(fluid.Executor): The executor to load, run and save the
158
                quantized model.
159 160
            scope(fluid.Scope, optional): The scope of the program, use it to load 
                and save variables. If scope=None, get scope by global_scope(). 
161 162 163 164 165 166 167 168 169
            model_dir(str): The path of the fp32 model that will be quantized, 
                and the model and params files are under the path.
            model_filename(str, optional): The name of file to load the inference 
                program. If it is None, the default filename '__model__' will 
                be used. Default is 'None'.
            params_filename(str, optional): The name of file to load all parameters.
                When all parameters were saved in a single binary file, set it 
                as the real filename. If parameters were saved in separate files, 
                set it as 'None'. Default is 'None'.
170 171 172 173 174 175 176 177
            batch_generator(Python Generator): The batch generator provides 
                calibrate data for DataLoader, and it returns a batch every
                time. Note that, sample_generator and batch_generator, only one
                should be set. Beisdes, batch_generator supports lod tensor.
            sample_generator(Python Generator): The sample generator provides
                calibrate data for DataLoader, and it only returns a sample every
                time. Note that, sample_generator and batch_generator, only one
                should be set. Beisdes, sample_generator dose not support lod tensor.
178 179 180 181
            batch_size(int, optional): The batch size of DataLoader. Default is 10.
            batch_nums(int, optional): If batch_nums is not None, the number of 
                calibrate data is batch_size*batch_nums. If batch_nums is None, use 
                all data provided by sample_generator as calibrate data.
182 183 184 185
            algo(str, optional): If algo='KL', use KL-divergenc method to
                get the KL threshold for quantized activations and get the abs_max
                value for quantized weights. If algo='abs_max', get the abs max 
                value for activations and weights. If algo= 'min_max', get the min 
X
XGZhang 已提交
186 187 188 189 190 191 192
                and max value for quantized activations and weights. If algo='avg',
                get the average value among the max values for activations. If 
                algo= 'hist', get the value of 'hist_percent' quantile as the threshold.
                If algo='mse', get the value which makes the quantization mse loss 
                minimal. Default is KL.
            hist_percent(float, optional): The threshold of algo 'hist' for activations.
                Default is 0.99999.
193 194
            quantizable_op_type(list[str], optional): List the type of ops 
                that will be quantized. Default is ["conv2d", "depthwise_conv2d", 
195 196
                "mul"].
            is_full_quantized(bool, optional): If set is_full_quantized as True, 
197
                apply quantization to all supported quantizable op type. If set
198 199
                is_full_quantized as False, only apply quantization to the op type 
                according to the input quantizable_op_type.
X
XGZhang 已提交
200 201
            bias_correction(bool, optional): If set as True, use the bias correction
                method of https://arxiv.org/abs/1810.05723. Default is False.
202
            activation_bits(int): quantization bit number for activation.
203 204 205 206 207 208 209 210 211 212 213 214
            weight_bits(int, optional): quantization bit number for weights.
            activation_quantize_type(str): quantization type for activation,
                now support 'range_abs_max', 'moving_average_abs_max' and 'abs_max'.
                This param only specifies the fake ops in saving quantized model.
                If it is 'range_abs_max' or 'moving_average_abs_max', we save the scale
                obtained by post training quantization in fake ops. Note that, if it
                is 'abs_max', the scale will not be saved in fake ops.
            weight_quantize_type(str): quantization type for weights,
                support 'abs_max' and 'channel_wise_abs_max'. This param only specifies
                the fake ops in saving quantized model, and we save the scale obtained
                by post training quantization in fake ops. Compared to 'abs_max',
                the model accuracy is usually higher when it is 'channel_wise_abs_max'.
215 216 217 218 219 220 221 222
            optimize_model(bool, optional): If set optimize_model as True, it applies
                some passes to the model before quantization, and it supports
                `conv2d/depthwise_conv2d + bn` pass so far. Some targets require the
                weights are quantized by tensor-wise method, which means the weights
                scale for all channel are the same. However, if fuse
                `conv2d/depthwise_conv2d + bn`, the weights scale for all channel will
                be different. In address this problem, fuse the pattern before
                quantization. Default False.
223 224
            is_use_cache_file(bool, optional): This param is deprecated.
            cache_dir(str, optional): This param is deprecated.
225 226 227
        Returns:
            None

228 229 230 231 232 233
        Examples:
        .. code-block:: python
            import paddle.fluid as fluid
            from paddle.fluid.contrib.slim.quantization import PostTrainingQuantization
            
            exe = fluid.Executor(fluid.CPUPlace())
234 235 236 237 238 239 240 241 242
            model_dir = path/to/fp32_model_params
            # set model_filename as None when the filename is __model__, 
            # otherwise set it as the real filename
            model_filename = None 
            # set params_filename as None when all parameters were saved in 
            # separate files, otherwise set it as the real filename
            params_filename = None
            save_model_path = path/to/save_model_path
            # prepare the sample generator according to the model, and the 
243
            # sample generator must return a sample every time. The reference
244 245 246
            # document: https://www.paddlepaddle.org.cn/documentation/docs/zh
            # /user_guides/howto/prepare_data/use_py_reader.html
            sample_generator = your_sample_generator
247 248 249
            batch_size = 10
            batch_nums = 10
            algo = "KL"
250
            quantizable_op_type = ["conv2d", "depthwise_conv2d", "mul"]
251 252
            ptq = PostTrainingQuantization(
                        executor=exe,
253 254 255 256
                        sample_generator=sample_generator,
                        model_dir=model_dir,
                        model_filename=model_filename,
                        params_filename=params_filename,
257 258 259 260 261 262 263
                        batch_size=batch_size,
                        batch_nums=batch_nums,
                        algo=algo,
                        quantizable_op_type=quantizable_op_type)
            ptq.quantize()
            ptq.save_quantized_model(save_model_path)
        '''
264

265 266 267 268
        self._support_activation_quantize_type = [
            'range_abs_max', 'moving_average_abs_max', 'abs_max'
        ]
        self._support_weight_quantize_type = ['abs_max', 'channel_wise_abs_max']
X
XGZhang 已提交
269 270 271
        self._support_algo_type = [
            'KL', 'hist', 'avg', 'mse', 'abs_max', 'min_max'
        ]
272
        self._dynamic_quantize_op_type = ['lstm']
273 274
        self._support_quantize_op_type = \
            list(set(QuantizationTransformPass._supported_quantizable_op_type +
275 276
                AddQuantDequantPass._supported_quantizable_op_type +
                self._dynamic_quantize_op_type))
277 278

        # Check inputs
279 280
        assert executor is not None, "The executor cannot be None."
        assert model_dir is not None, "The model_dir cannot be None."
281 282 283 284 285
        assert any([gen is not None] for gen in [sample_generator,
            batch_generator]), "The sample_generator and batch_generator " \
            "cannot be None in the same time."
        assert batch_size > 0, "The batch_size should be greater than 0."
        assert algo in self._support_algo_type, \
X
XGZhang 已提交
286
            "The algo should be KL, hist, mse, avg, abs_max or min_max."
287 288 289 290 291 292 293 294
        assert activation_quantize_type in self._support_activation_quantize_type, \
            "The activation_quantize_type ({}) should in ({}).".format(
            activation_quantize_type, self._support_activation_quantize_type)
        assert weight_quantize_type in self._support_weight_quantize_type, \
            "The weight_quantize_type ({}) shoud in ({}).".format(
            weight_quantize_type, self._support_weight_quantize_type)

        # Save input params
X
XGZhang 已提交
295
        self._bias_correction = bias_correction
296
        self._executor = executor
297
        self._scope = global_scope() if scope == None else scope
298 299 300
        self._model_dir = model_dir
        self._model_filename = model_filename
        self._params_filename = params_filename
301
        self._sample_generator = sample_generator
302
        self._batch_generator = batch_generator
303 304 305
        self._batch_size = batch_size
        self._batch_nums = batch_nums
        self._algo = algo
X
XGZhang 已提交
306
        self._hist_percent = hist_percent
307 308 309 310 311
        self._activation_bits = activation_bits
        self._weight_bits = weight_bits
        self._activation_quantize_type = activation_quantize_type
        self._weight_quantize_type = weight_quantize_type
        self._is_full_quantize = is_full_quantize
312
        if is_full_quantize:
313
            self._quantizable_op_type = self._support_quantize_op_type
314 315 316
        else:
            self._quantizable_op_type = quantizable_op_type
            for op_type in self._quantizable_op_type:
317
                assert op_type in self._support_quantize_op_type, \
318
                    op_type + " is not supported for quantization."
319
        self._optimize_model = optimize_model
320

321
        # Define variables
322 323 324 325 326 327
        self._place = self._executor.place
        self._program = None
        self._feed_list = None
        self._fetch_list = None
        self._data_loader = None

328
        self._out_scale_op_list = _out_scale_op_list
329 330
        self._quantized_weight_var_name = set()
        self._quantized_act_var_name = set()
331
        self._weight_op_pairs = {}
X
XGZhang 已提交
332
        # The vars for alog = KL or hist
333 334
        self._sampling_act_abs_min_max = {}
        self._sampling_act_histogram = {}
335
        self._sampling_data = {}
X
XGZhang 已提交
336
        self._quantized_var_threshold = {}
337 338
        self._histogram_bins = 2048
        # The vars for algo = min_max
339 340
        self._quantized_var_min = {}
        self._quantized_var_max = {}
X
XGZhang 已提交
341 342 343 344 345 346
        # The vars for algo = avg
        self._quantized_var_avg = {}
        # The best loss of algo = mse
        self._best_mse_loss = {}
        # The threshold for algo = abs_max, mse or avg
        self._quantized_threshold = {}
347 348 349

    def quantize(self):
        '''
350 351 352
        Load the FP32 model, and use the calibrate data to calculate the forward-stage.
        Based on the sample data, we can get the quantization information, and obtain
        the final quantized model.
353 354 355 356

        Args:
            None
        Returns:
357 358
            the program of quantized model.
        '''
359
        self._load_model_data()
360
        self._collect_target_varnames()
361
        self._set_activation_persistable()
362

X
XGZhang 已提交
363
        if self._algo in ["KL", "hist"]:
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
            _logger.info("Preparation stage ...")
            batch_id = 0
            for data in self._data_loader():
                self._executor.run(program=self._program,
                                   feed=data,
                                   fetch_list=self._fetch_list,
                                   return_numpy=False,
                                   scope=self._scope)
                self._collect_activation_abs_min_max()
                if batch_id % 5 == 0:
                    _logger.info("Run batch: " + str(batch_id))
                batch_id += 1
                if self._batch_nums and batch_id >= self._batch_nums:
                    break
            _logger.info("Finish preparation stage, all batch:" + str(batch_id))
            self._init_sampling_act_histogram()

        _logger.info("Sampling stage ...")
382 383 384 385
        batch_id = 0
        for data in self._data_loader():
            self._executor.run(program=self._program,
                               feed=data,
386
                               fetch_list=self._fetch_list,
387 388
                               return_numpy=False,
                               scope=self._scope)
389
            self._sampling()
390
            if batch_id % 5 == 0:
391
                _logger.info("Run batch: " + str(batch_id))
392 393 394
            batch_id += 1
            if self._batch_nums and batch_id >= self._batch_nums:
                break
395
        _logger.info("Finish sampling stage, all batch: " + str(batch_id))
396
        self._reset_activation_persistable()
X
XGZhang 已提交
397 398 399 400 401 402 403
        if self._algo == 'avg':
            for var_name in self._quantized_act_var_name:
                self._quantized_threshold[var_name] = \
                np.array(self._quantized_var_avg[var_name]).mean()
        if self._algo in ["KL", "hist"]:
            self._calculate_kl_hist_threshold()
        if self._algo in ["KL", "abs_max", "hist", "avg", "mse"]:
404 405 406 407 408
            self._update_program()
        else:
            self._save_input_threhold()

        self._save_output_threshold()
409 410 411 412
        if any(op_type in self._quantizable_op_type
               for op_type in self._dynamic_quantize_op_type):
            self._collect_dynamic_quantize_op_threshold(
                self._dynamic_quantize_op_type)
413 414
        return self._program

415 416 417 418
    def save_quantized_model(self,
                             save_model_path,
                             model_filename=None,
                             params_filename=None):
419 420 421 422
        '''
        Save the quantized model to the disk.

        Args:
423 424 425 426 427 428 429
            save_model_path(str): The path to save the quantized model.
            model_filename(str, optional): If the model_filename is None,
                save the model to '__model__'. Otherwise, save the model
                to the specified filename. Default: None.
            params_filename(str, optional): If the params_filename is None,
                save params to separted files. Otherwise, save all params
                to the specified filename.
430
        Returns:
431 432 433 434
            None
        '''
        io.save_inference_model(
            dirname=save_model_path,
435 436
            model_filename=model_filename,
            params_filename=params_filename,
437 438 439 440
            feeded_var_names=self._feed_list,
            target_vars=self._fetch_list,
            executor=self._executor,
            main_program=self._program)
441
        _logger.info("The quantized model is saved in " + save_model_path)
442

443
    def _load_model_data(self):
444
        '''
445
        Load model and set data loader.
446
        '''
447
        _logger.info("Load model and set data loader ...")
448
        [self._program, self._feed_list, self._fetch_list] = \
449 450 451 452
            io.load_inference_model(dirname=self._model_dir,
                                    executor=self._executor,
                                    model_filename=self._model_filename,
                                    params_filename=self._params_filename)
453

454 455 456 457
        if self._program.num_blocks > 1:
            _logger.error("The post training quantization requires that the "
                          "program only has one block.")

458 459 460
        if self._optimize_model:
            self._optimize_fp32_model()

461 462 463 464
        feed_vars = [framework._get_var(str(var_name), self._program) \
            for var_name in self._feed_list]
        self._data_loader = io.DataLoader.from_generator(
            feed_list=feed_vars, capacity=3 * self._batch_size, iterable=True)
465 466 467 468 469 470 471 472 473 474
        if self._sample_generator is not None:
            self._data_loader.set_sample_generator(
                self._sample_generator,
                batch_size=self._batch_size,
                drop_last=True,
                places=self._place)
        elif self._batch_generator is not None:
            self._data_loader.set_batch_generator(
                self._batch_generator, places=self._place)

475 476 477 478 479 480 481 482
    def _optimize_fp32_model(self):
        '''
        Fuse the `conv2d/depthwise_conv2d + bn` in FP32 model.
        '''
        _logger.info("Optimize FP32 model ...")
        graph = IrGraph(core.Graph(self._program.desc), for_test=True)
        graph = _remove_ctrl_vars(graph)
        graph = _apply_pass(self._scope, graph, 'conv_bn_fuse_pass')
483 484
        graph = _apply_pass(self._scope, graph, 'depthwise_conv_bn_fuse_pass')
        graph = _apply_pass(self._scope, graph, 'conv_transpose_bn_fuse_pass')
485 486 487 488
        graph = _apply_pass(self._scope, graph, 'conv_eltwiseadd_bn_fuse_pass')
        graph = _apply_pass(self._scope, graph,
                            'depthwise_conv_eltwiseadd_bn_fuse_pass')

489 490
        self._program = graph.to_program()

491
    def _collect_target_varnames(self):
492 493 494 495
        '''
        Collect the variable names for sampling, and set activation
        variables to be persistable.
        '''
496
        # TODO(juncaipeng), consider the name_scope of skip_quant
497
        _logger.info("Collect quantized variable names ...")
498

499
        def collect_var_name(var_name_list, persistable_var_names, op_type):
500 501 502
            for var_name in var_name_list:
                if var_name in persistable_var_names:
                    self._quantized_weight_var_name.add(var_name)
503
                    self._weight_op_pairs[var_name] = op_type
504 505 506
                else:
                    self._quantized_act_var_name.add(var_name)

507
        persistable_var_names = _all_persistable_var_names(self._program)
508
        for op in self._program.global_block().ops:
509
            op_type = op.type
510 511 512
            if self._is_full_quantize and \
                op_type not in self._quantizable_op_type:
                _logger.warning(op_type + " is not supported for quantization.")
513
            # For quantized ops, sample inputs and outputs
514
            if op_type in self._quantizable_op_type:
515
                collect_var_name(
516
                    _get_op_input_var_names(op), persistable_var_names, op_type)
517
                collect_var_name(
518 519
                    _get_op_output_var_names(op), persistable_var_names,
                    op_type)
520 521 522
            # For other op, only sample output scale
            elif op_type in self._out_scale_op_list:
                collect_var_name(
523 524
                    _get_op_output_var_names(op), persistable_var_names,
                    op_type)
525 526 527 528 529 530

    def _set_activation_persistable(self):
        '''
        Set activation variables to be persistable, so can obtain 
        the tensor data in sample_data
        '''
531 532 533 534
        for var in self._program.list_vars():
            if var.name in self._quantized_act_var_name:
                var.persistable = True

535 536 537 538 539 540 541 542
    def _reset_activation_persistable(self):
        '''
        Reset activations to be not persistable.
        '''
        for var in self._program.list_vars():
            if var.name in self._quantized_act_var_name:
                var.persistable = False

543
    def _sampling(self):
544
        '''
545
        Sample the min/max, abs_max or histogram in every iterations.
546 547
        '''
        if self._algo == "abs_max":
548
            self._sample_abs_max()
X
XGZhang 已提交
549 550
        elif self._algo == "avg":
            self._sample_avg()
551
        elif self._algo == "min_max":
552
            self._sample_min_max()
X
XGZhang 已提交
553 554 555
        elif self._algo == "mse":
            self._sample_mse()
        elif self._algo in ["KL", "hist"]:
556
            self._sample_histogram()
557

X
XGZhang 已提交
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
    def _sample_mse(self):
        if self._quantized_threshold == {}:
            for var_name in self._quantized_weight_var_name:
                var_tensor = _load_variable_data(self._scope, var_name)
                if self._weight_quantize_type == "abs_max":
                    abs_max_value = float(np.max(np.abs(var_tensor)))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    abs_max_value = []
                    if self._weight_op_pairs[
                            var_name] in _channelwise_quant_axis1_ops:
                        for i in range(var_tensor.shape[1]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[:, i]))))
                    else:
                        for i in range(var_tensor.shape[0]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[i]))))
                self._quantized_threshold[var_name] = abs_max_value
        _logger.info("MSE searching stage ...")
        for var_name in self._quantized_act_var_name:
            var_tensor = _load_variable_data(self._scope, var_name)
            var_tensor = var_tensor.flatten()
            abs_max_value = float(np.max(np.abs(var_tensor)))
            s = 0.3
            if var_name not in self._best_mse_loss:
                self._best_mse_loss[var_name] = float('inf')
            while s <= 1.0:
                scale = s * abs_max_value
                s += 0.02
                bins = 2**(self._activation_bits - 1) - 1
                quant_dequant_var = np.round(
                    np.clip(var_tensor, 0.0, scale) / scale *
                    bins) / bins * scale
                mse_loss = ((var_tensor - quant_dequant_var)**2).mean()
                if mse_loss <= self._best_mse_loss[var_name]:
                    self._best_mse_loss[var_name] = mse_loss
                    self._quantized_threshold[var_name] = scale

    def _sample_avg(self):
        if self._quantized_threshold == {}:
            for var_name in self._quantized_weight_var_name:
                var_tensor = _load_variable_data(self._scope, var_name)
                if self._weight_quantize_type == "abs_max":
                    abs_max_value = float(np.max(np.abs(var_tensor)))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    abs_max_value = []
                    if self._weight_op_pairs[
                            var_name] in _channelwise_quant_axis1_ops:
                        for i in range(var_tensor.shape[1]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[:, i]))))
                    else:
                        for i in range(var_tensor.shape[0]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[i]))))
                self._quantized_threshold[var_name] = abs_max_value

        for var_name in self._quantized_act_var_name:
            var_tensor = _load_variable_data(self._scope, var_name)
            abs_max_value = float(np.max(np.abs(var_tensor)))
            if (var_name not in self._quantized_var_avg):
                self._quantized_var_avg[var_name] = []
            abs_avg_value = float(np.mean(np.max(  \
            np.abs(var_tensor.reshape(var_tensor.shape[0], -1)), axis=(1))))
            self._quantized_var_avg[var_name].append(abs_avg_value)
            continue

625
    def _sample_abs_max(self):
X
XGZhang 已提交
626
        if self._quantized_threshold == {}:
627 628 629 630 631 632
            for var_name in self._quantized_weight_var_name:
                var_tensor = _load_variable_data(self._scope, var_name)
                if self._weight_quantize_type == "abs_max":
                    abs_max_value = float(np.max(np.abs(var_tensor)))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    abs_max_value = []
633
                    if self._weight_op_pairs[
634 635 636 637 638 639 640 641
                            var_name] in _channelwise_quant_axis1_ops:
                        for i in range(var_tensor.shape[1]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[:, i]))))
                    else:
                        for i in range(var_tensor.shape[0]):
                            abs_max_value.append(
                                float(np.max(np.abs(var_tensor[i]))))
X
XGZhang 已提交
642
                self._quantized_threshold[var_name] = abs_max_value
643 644 645 646

        for var_name in self._quantized_act_var_name:
            var_tensor = _load_variable_data(self._scope, var_name)
            abs_max_value = float(np.max(np.abs(var_tensor)))
X
XGZhang 已提交
647 648 649
            if (var_name not in self._quantized_threshold) or \
                (abs_max_value > self._quantized_threshold[var_name]):
                self._quantized_threshold[var_name] = abs_max_value
650

651
    def _sample_min_max(self):
652 653
        if self._quantized_var_min == {} and self._quantized_var_max == {}:
            for var_name in self._quantized_weight_var_name:
654
                var_tensor = _load_variable_data(self._scope, var_name)
655 656 657 658 659 660
                if self._weight_quantize_type == "abs_max":
                    min_value = float(np.min(var_tensor))
                    max_value = float(np.max(var_tensor))
                elif self._weight_quantize_type == "channel_wise_abs_max":
                    min_value = []
                    max_value = []
661
                    if self._weight_op_pairs[
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
                            var_name] in _channelwise_quant_axis1_ops:
                        for i in range(var_tensor.shape[1]):
                            min_value.append(float(np.min(var_tensor[:, i])))
                            max_value.append(float(np.max(var_tensor[:, i])))
                    else:
                        for i in range(var_tensor.shape[0]):
                            min_value.append(float(np.min(var_tensor[i])))
                            max_value.append(float(np.max(var_tensor[i])))
                self._quantized_var_min[var_name] = min_value
                self._quantized_var_max[var_name] = max_value

        for var_name in self._quantized_act_var_name:
            var_tensor = _load_variable_data(self._scope, var_name)
            min_value = float(np.min(var_tensor))
            max_value = float(np.max(var_tensor))
            if (var_name not in self._quantized_var_min) or \
                (min_value < self._quantized_var_min[var_name]):
                self._quantized_var_min[var_name] = min_value
            if (var_name not in self._quantized_var_max) or \
                (max_value > self._quantized_var_max[var_name]):
                self._quantized_var_max[var_name] = max_value
683

684 685 686 687 688 689 690 691
    def _sample_histogram(self):
        for var_name in self._quantized_act_var_name:
            var_tensor = _load_variable_data(self._scope, var_name)
            var_tensor_abs = np.abs(var_tensor)
            bins = self._sampling_act_histogram[var_name][1]
            hist, _ = np.histogram(var_tensor_abs, bins=bins)
            self._sampling_act_histogram[var_name][0] += hist

692 693 694 695 696 697 698 699
    def _save_input_threhold(self):
        '''
        Save input threshold to the quantized op.
        '''
        assert self._algo == "min_max", \
            "The algo should be min_max to save input threshold."
        for op in self._program.global_block().ops:
            if op.type in self._quantizable_op_type:
700 701 702 703 704 705 706
                for var_name in _get_op_input_var_names(op):
                    assert var_name in self._quantized_var_min
                    assert var_name in self._quantized_var_max
                    op._set_attr(var_name + ".min",
                                 self._quantized_var_min[var_name])
                    op._set_attr(var_name + ".max",
                                 self._quantized_var_max[var_name])
707

708
    def _collect_activation_abs_min_max(self):
709
        '''
710 711
        Collect the abs_min and abs_max for all activation. When algo = KL,
        get the min and max value, and then calculate the threshold.
712
        '''
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
        for var_name in self._quantized_act_var_name:
            var_tensor = _load_variable_data(self._scope, var_name)
            var_tensor = np.abs(var_tensor)
            min_value = float(np.min(var_tensor))
            max_value = float(np.max(var_tensor))
            if var_name not in self._sampling_act_abs_min_max:
                self._sampling_act_abs_min_max[
                    var_name] = [min_value, max_value]
            else:
                if min_value < self._sampling_act_abs_min_max[var_name][0]:
                    self._sampling_act_abs_min_max[var_name][0] = min_value
                if max_value > self._sampling_act_abs_min_max[var_name][1]:
                    self._sampling_act_abs_min_max[var_name][1] = max_value

    def _init_sampling_act_histogram(self):
        '''
        Based on the min/max value, init the sampling_act_histogram.
        '''
        for var_name in self._quantized_act_var_name:
            if var_name not in self._sampling_act_histogram:
                min_val = self._sampling_act_abs_min_max[var_name][0]
                max_val = self._sampling_act_abs_min_max[var_name][1]
                hist, hist_edeges = np.histogram(
                    [], bins=self._histogram_bins, range=(min_val, max_val))
                self._sampling_act_histogram[var_name] = [hist, hist_edeges]
738

X
XGZhang 已提交
739
    def _calculate_kl_hist_threshold(self):
740
        '''
X
XGZhang 已提交
741
        Calculate the KL or hist threshold of quantized variables.
742
        '''
X
XGZhang 已提交
743 744
        _logger.info("Calculate {} threshold ...".format(self._algo))
        assert self._algo in ["KL", "hist"], "The algo should be KL or hist."
745 746

        # Abs_max threshold for weights
747
        for var_name in self._quantized_weight_var_name:
748
            weight_data = _load_variable_data(self._scope, var_name)
749
            if self._weight_quantize_type == "abs_max":
750
                weight_threshold = float(np.max(np.abs(weight_data)))
751 752
            elif self._weight_quantize_type == "channel_wise_abs_max":
                weight_threshold = []
753
                if self._weight_op_pairs[
754 755 756 757 758 759 760 761
                        var_name] in _channelwise_quant_axis1_ops:
                    for i in range(weight_data.shape[1]):
                        weight_threshold.append(
                            float(np.max(np.abs(weight_data[:, i]))))
                else:
                    for i in range(weight_data.shape[0]):
                        weight_threshold.append(
                            float(np.max(np.abs(weight_data[i]))))
X
XGZhang 已提交
762
            self._quantized_var_threshold[var_name] = weight_threshold
763

764 765
        for var_name in self._quantized_act_var_name:
            hist, hist_edeges = self._sampling_act_histogram[var_name]
X
XGZhang 已提交
766
            if self._algo == "KL":
767
                bin_width = hist_edeges[1] - hist_edeges[0]
X
XGZhang 已提交
768
                self._quantized_var_threshold[var_name] = \
769
                    cal_kl_threshold(hist, bin_width, self._activation_bits)
X
XGZhang 已提交
770 771 772
            elif self._algo == "hist":
                self._quantized_var_threshold[var_name] = \
                    self._get_hist_scaling_factor(hist, hist_edeges)
773 774 775

    def _update_program(self):
        '''
776 777
        Use QuantizationTransformPass and AddQuantDequantPass to insert 
        fake_quantize, fake_dequantize and fake_quant_dequant op. 
X
XGZhang 已提交
778
        Besides, save all threshold to the scale var node.
779
        '''
780
        _logger.info("Update the program ...")
781 782
        graph = IrGraph(core.Graph(self._program.desc), for_test=True)

783
        # use QuantizationTransformPass to insert fake_quant/fake_dequantize op
784 785
        major_quantizable_op_types = []
        for op_type in QuantizationTransformPass._supported_quantizable_op_type:
786
            if op_type in self._quantizable_op_type:
787
                major_quantizable_op_types.append(op_type)
788 789 790
        transform_pass = QuantizationTransformPass(
            scope=self._scope,
            place=self._place,
791 792 793 794
            weight_bits=self._weight_bits,
            activation_bits=self._activation_bits,
            activation_quantize_type=self._activation_quantize_type,
            weight_quantize_type=self._weight_quantize_type,
795
            quantizable_op_type=major_quantizable_op_types)
796 797 798
        transform_pass.apply(graph)

        # use AddQuantDequantPass to insert fake_quant_dequant op
799 800
        minor_quantizable_op_types = []
        for op_type in AddQuantDequantPass._supported_quantizable_op_type:
801
            if op_type in self._quantizable_op_type:
802
                minor_quantizable_op_types.append(op_type)
803 804 805
        add_quant_dequant_pass = AddQuantDequantPass(
            scope=self._scope,
            place=self._place,
806
            quantizable_op_type=minor_quantizable_op_types)
807 808
        add_quant_dequant_pass.apply(graph)

X
XGZhang 已提交
809 810 811
        # save threshold to scale var node
        if self._algo in ["KL", "hist"]:
            scale_dict = self._quantized_var_threshold
812
        else:
X
XGZhang 已提交
813
            scale_dict = self._quantized_threshold
814
        for key, val in scale_dict.items():
815 816 817 818 819
            _set_variable_data(
                self._scope,
                self._place,
                key + ".scale",
                np.array(
820
                    [val], dtype=np.float32))
821 822 823 824 825
            _set_variable_data(
                self._scope,
                self._place,
                key + ".quant_dequant.scale",
                np.array(
826 827 828 829 830 831
                    [val], dtype=np.float32))

        # apply QuantizationFreezePass, and obtain the final quant model
        freeze_pass = QuantizationFreezePass(
            scope=self._scope,
            place=self._place,
X
XGZhang 已提交
832
            bias_correction=self._bias_correction,
833 834 835
            weight_bits=self._weight_bits,
            activation_bits=self._activation_bits,
            weight_quantize_type=self._weight_quantize_type,
836
            quantizable_op_type=major_quantizable_op_types)
837 838 839
        freeze_pass.apply(graph)
        self._program = graph.to_program()

840
    def _save_output_threshold(self):
841
        '''
842
        Save output threshold to the quantized op.
843
        '''
844 845 846 847 848 849 850 851 852 853 854

        def save_info(op_node, out_var_name, threshold_map, out_info_name,
                      quantized_type):
            assert out_var_name in threshold_map, \
                "The output ({}) of {} node does not have threshold.".format(
                out_var_name, op_node.type)
            op_node._set_attr(out_info_name, threshold_map[var_name])
            if op_node.type in self._quantizable_op_type:
                op._set_attr("quantization_type", quantized_type)

        def analysis_and_save_info(op_node, out_var_name):
855 856 857
            argname_index = _get_output_name_index(op_node, out_var_name)
            assert argname_index is not None, \
                out_var_name + " is not the output of the op"
858
            if self._algo == "KL":
859
                # For compatibility, we save output threshold by two methods.
X
XGZhang 已提交
860 861
                save_info(op_node, out_var_name, self._quantized_var_threshold,
                          "out_threshold", "post_kl")
862
                save_info(
X
XGZhang 已提交
863
                    op_node, out_var_name, self._quantized_var_threshold,
864 865
                    argname_index[0] + str(argname_index[1]) + "_threshold",
                    "post_kl")
X
XGZhang 已提交
866 867 868 869
            elif self._algo == "hist":
                # For compatibility, we save output threshold by two methods.
                save_info(op_node, out_var_name, self._quantized_var_threshold,
                          "out_threshold", "post_hist")
870
                save_info(
X
XGZhang 已提交
871
                    op_node, out_var_name, self._quantized_var_threshold,
872
                    argname_index[0] + str(argname_index[1]) + "_threshold",
X
XGZhang 已提交
873 874 875 876 877 878 879 880 881
                    "post_hist")

            elif self._algo in ["avg", "abs_max", "mse"]:
                save_info(op_node, out_var_name, self._quantized_threshold,
                          "out_threshold", "post_" + str(self._algo))
                save_info(
                    op_node, out_var_name, self._quantized_threshold,
                    argname_index[0] + str(argname_index[1]) + "_threshold",
                    "post_" + str(self._algo))
882 883 884 885 886 887
            elif self._algo == "min_max":
                save_info(op_node, out_var_name, self._quantized_var_min,
                          "out_min", "post_min_max")
                save_info(op_node, out_var_name, self._quantized_var_max,
                          "out_max", "post_min_max")

888
        for op in self._program.global_block().ops:
889 890 891 892 893 894
            if op.type in (self._quantizable_op_type + self._out_scale_op_list):
                out_var_names = _get_op_output_var_names(op)
                assert len(out_var_names) == 1, "Post training " + \
                    "quantization only support one output for " + op.type
                for var_name in out_var_names:
                    analysis_and_save_info(op, var_name)
895

896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
    def _collect_dynamic_quantize_op_threshold(self, target_ops_type):
        """
        Collect and save the weight threshold for dynamic quantize ops,
        such as lstm and gru.
        Args:
            target_ops_type(list): the op type of target ops
        Returns:
            None
        """

        target_ops = []
        for index in range(self._program.num_blocks):
            for op in self._program.block(index).ops:
                if op.type in target_ops_type:
                    target_ops.append(op)

        quantization_type = str("post_" + self._algo).lower()
        persistable_var_names = _all_persistable_var_names(self._program)
        for op in target_ops:
            for var_name in _get_op_input_var_names(op):
                if var_name in persistable_var_names:
                    var_data = _load_variable_data(self._scope, var_name)
                    threshold = float(np.max(np.abs(var_data)))
                    argname, index = _get_input_name_index(op, var_name)
                    op._set_attr(argname + str(index) + "_threshold", threshold)
                    op._set_attr("quantization_type", quantization_type)
                    op._set_attr("bit_length", self._weight_bits)

X
XGZhang 已提交
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
    def _get_hist_scaling_factor(self, hist, hist_edges):
        '''
        Using the hist method to get the scaling factor.
        '''
        threshold_rate = self._hist_percent
        hist = hist / float(sum(hist))
        hist_sum = 0
        hist_index = 0
        for i in range(len(hist)):
            hist_sum += hist[i]
            if hist_sum >= threshold_rate:
                hist_index = i + 1
                break
        bin_width = hist_edges[1] - hist_edges[0]
        return (hist_index - 0.5) * bin_width

940 941 942

class WeightQuantization(object):
    _supported_quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul']
943
    _supported_weight_quantize_type = ['channel_wise_abs_max', 'abs_max']
944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969

    def __init__(self, model_dir, model_filename=None, params_filename=None):
        '''
        This class quantizes the weight of some ops to reduce the size of model
        or improve the perforemace.

        Args:
            model_dir(str): The path of the fp32 model that will be quantized,
                and the model and params files are under the path.
            model_filename(str, optional): The name of file to load the inference
                program. If it is None, the default filename '__model__' will
                be used. Default is 'None'.
            params_filename(str, optional): The name of file to load all parameters.
                When all parameters were saved in a single binary file, set it
                as the real filename. If parameters were saved in separate files,
                set it as 'None'. Default is 'None'.
        '''
        self._model_dir = model_dir
        self._model_filename = model_filename
        self._params_filename = params_filename

    def quantize_weight_to_int(self,
                               save_model_dir,
                               save_model_filename=None,
                               save_params_filename=None,
                               quantizable_op_type=["conv2d", "mul"],
970
                               weight_bits=8,
971 972
                               weight_quantize_type="channel_wise_abs_max",
                               generate_test_model=False,
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
                               threshold_rate=0.0):
        '''
        In order to reduce the size of model, this api quantizes the weight
        of some ops from float32 to int8/16. In the inference stage, the 
        quantized weight will be dequantized to float32 again.
        
        Args:
            save_model_dir(str): The path to save the quantized model.
            save_model_filename(str, optional): The name of file to 
                save the inference program. If it is None, the default 
                filename '__model__' will be used. Default is 'None'.
            save_params_filename(str, optional): The name of file to 
                save all parameters. If it is None, parameters were 
                saved in separate files. If it is not None, all 
                parameters were saved in a single binary file.
            quantizable_op_type(list[str], optional): The list of ops 
                that will be quantized, and the quantized ops should be
                contained in ["conv2d", "depthwise_conv2d", "mul"]. 
                Default is ["conv2d","mul"].
992 993
            weight_bits(int, optional): The bits for the quantized weight, 
                and it should be 8 or 16. Default is 8.
994 995 996 997 998 999 1000
            weight_quantize_type(str, optional): quantization type for weights,
                support 'channel_wise_abs_max' and 'abs_max'. Set it as
                'channel_wise_abs_max', the accuracy performs better.
            generate_test_model(bool, optional): If set generate_test_model 
                as True, it saves a fake quantized model, in which the weights 
                are quantized and dequantized. We can use PaddlePaddle to load 
                the fake quantized model and test the accuracy on GPU or CPU.
1001 1002 1003 1004 1005 1006 1007 1008 1009
            threshold_rate(float, optional): This api uses abs_max methd to 
                quantize the weight from float32 to int8/16, and the abs max 
                value is important for quantization diff. When the abs_max 
                value is far away from the center of the numerical distribution, 
                we can set threshold_rate between 1e-6 and 1e-8, so the abs max 
                value will be optimized. Default is 0.0.
        '''
        for op_type in quantizable_op_type:
            assert op_type in self._supported_quantizable_op_type, \
1010
                "Input error:" + op_type + \
1011
                " is not supported for weight quantization."
1012
        assert weight_bits in [8, 16], \
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
            "Input error: weight_bits should be 8 or 16."
        assert weight_quantize_type in self._supported_weight_quantize_type, \
            "Input error: weight_quantize_type should in {}".format(
                self._supported_weight_quantize_type)

        quantized_model_dir = os.path.join(save_model_dir, "quantized_model")
        self._quantize_weight_to_int(quantized_model_dir, save_model_filename,
                                     save_params_filename, quantizable_op_type,
                                     weight_bits, weight_quantize_type, False,
                                     threshold_rate)

        if generate_test_model:
            test_model_dir = os.path.join(save_model_dir, "test_model")
            self._quantize_weight_to_int(
                test_model_dir, save_model_filename, save_params_filename,
                quantizable_op_type, weight_bits, weight_quantize_type, True,
                threshold_rate)

1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
    def convert_weight_to_fp16(self, save_model_dir):
        """
        Convert all presistable vars from fp32 to fp16.
        Note that, this api only changes the data type of variables in
        __params__ file, and the __model__ file remains unchanged. 

        Args:
            save_model_dir(str): The path to save the fp16 model.
        """

        # Load model
        place = core.CPUPlace()
        exe = Executor(place)
        scope = global_scope()
        [infer_program, feed_list, fetch_list] = \
            io.load_inference_model(dirname=self._model_dir,
                                    executor=exe,
                                    model_filename=self._model_filename,
                                    params_filename=self._params_filename)

        # Clone and save fp16 weights
        save_program = framework.Program()
        save_block = save_program.global_block()
        save_var_map = {}

        for var in infer_program.list_vars():
            if (var.type == core.VarDesc.VarType.RAW) or \
                (not var.persistable) or (var.name in ['feed', 'fetch']) \
                or (var.dtype != core.VarDesc.VarType.FP32):
                continue

            #new_var = _clone_var_to_block_(var, save_block)
            new_var = save_block._clone_variable(var)
            if self._params_filename is not None:
                save_var_map[new_var.name] = new_var
            else:
                save_file_path = os.path.join(
                    os.path.normpath(save_model_dir), new_var.name)
                save_block.append_op(
                    type='save',
                    inputs={'X': [new_var]},
                    outputs={},
                    attrs={
                        'file_path': os.path.normpath(save_file_path),
                        'save_as_fp16': True
                    })

        if self._params_filename is not None:
            save_var_list = []
            for name in sorted(save_var_map.keys()):
                save_var_list.append(save_var_map[name])

            saved_params_var = save_block.create_var(
                type=core.VarDesc.VarType.RAW,
                name=unique_name.generate("saved_params"))
            saved_params_var.desc.set_persistable(True)

            save_path = os.path.join(
                os.path.normpath(save_model_dir), self._params_filename)
            save_block.append_op(
                type='save_combine',
                inputs={'X': save_var_list},
                outputs={'Y': saved_params_var},
                attrs={'file_path': save_path,
                       'save_as_fp16': True})

        save_program._sync_with_cpp()
        exe.run(save_program)

        # Copy model
        model_filename = "__model__" if self._model_filename is None \
                    else self._model_filename
        src_model = os.path.join(self._model_dir, model_filename)
        dest_model = os.path.join(save_model_dir, model_filename)
        shutil.copyfile(src_model, dest_model)

1107 1108 1109 1110 1111 1112 1113 1114
    def _quantize_weight_to_int(self, save_model_dir, save_model_filename,
                                save_params_filename, quantizable_op_type,
                                weight_bits, weight_quantize_type, for_test,
                                threshold_rate):
        """
        Generate quantized model or fake quantized model.
        """
        # Load model
1115 1116 1117 1118 1119 1120 1121 1122 1123
        place = core.CPUPlace()
        exe = Executor(place)
        scope = global_scope()
        [program, feed_list, fetch_list] = \
            io.load_inference_model(dirname=self._model_dir,
                                    executor=exe,
                                    model_filename=self._model_filename,
                                    params_filename=self._params_filename)

1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
        quantized_ops = []
        for index in range(program.num_blocks):
            block = program.block(index)
            for op in block.ops:
                if op.type in quantizable_op_type:
                    quantized_ops.append(op)

        # Quantize weights
        persistable_var_names = _all_persistable_var_names(program)
        for op in quantized_ops:
            for var_name in op.input_arg_names:
                if var_name in persistable_var_names:
                    if weight_quantize_type == "abs_max":
                        self._weight_abs_max_quantization(
                            scope, place, weight_bits, threshold_rate, op,
                            var_name, for_test)
                    elif weight_quantize_type == "channel_wise_abs_max":
                        self._weight_channel_wise_abs_max_quantization(
                            scope, place, weight_bits, op, var_name, for_test)
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152

        io.save_inference_model(
            dirname=save_model_dir,
            feeded_var_names=feed_list,
            target_vars=fetch_list,
            executor=exe,
            main_program=program,
            model_filename=save_model_filename,
            params_filename=save_params_filename)

1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
    def _weight_abs_max_quantization(self, scope, place, weight_bits,
                                     threshold_rate, op, var_name, for_test):
        '''
        Use abs_max method to quantize weight.
        '''
        quantize_range = (1 << (weight_bits - 1)) - 1
        save_weight_dtype = np.int8 if weight_bits == 8 else np.int16

        # Get quantized scale and weight data
        weight_data = _load_variable_data(scope, var_name)
        if abs(threshold_rate) < 1e-10:
            threshold_value = np.max(np.abs(weight_data))
        else:
            threshold_value = self._calculate_threshold(\
                weight_data, threshold_rate)
            weight_data[weight_data > threshold_value] = threshold_value
            weight_data[weight_data < -threshold_value] = -threshold_value
        scale = threshold_value / quantize_range
        quantized_weight_data = \
            np.around(weight_data / scale).astype(save_weight_dtype)

        # Set weight data
        if not for_test:
            _set_variable_data(scope, place, var_name, quantized_weight_data)
        else:
            dequantized_weight_data = \
                (quantized_weight_data * scale).astype(np.float32)
            _set_variable_data(scope, place, var_name, dequantized_weight_data)

        # Save info
        op._set_attr('quantization_type', 'post_weight_abs_max')
        op._set_attr('quantize_weight_bits', weight_bits)
        op._set_attr(var_name + "_quant_scale", [scale])  # Save as list

    def _weight_channel_wise_abs_max_quantization(
            self, scope, place, weight_bits, op, var_name, for_test):
        ''' 
        Use channel_wise_abs_max method to quantize weight.
        '''
        quantize_range = (1 << (weight_bits - 1)) - 1
        save_weight_dtype = np.int8 if weight_bits == 8 else np.int16

        # Get quantized scale and weight data
        weight_data = _load_variable_data(scope, var_name)
        if op.type == "mul":
            scales, quantized_weight_data = \
                self._mul_channel_wise_quantization(weight_data,
                    quantize_range, save_weight_dtype)
        elif op.type in ["conv2d", "depthwise_conv2d"]:
            scales, quantized_weight_data = \
                self._conv_channel_wise_quantization(weight_data,
                    quantize_range, save_weight_dtype)
        else:
            _logger.error(op.type + " is not supported by weight quantization")

        # Set weight data
        if not for_test:
            _set_variable_data(scope, place, var_name, quantized_weight_data)
        else:
            if op.type == "mul":
                dequantized_weight_data = \
                    self._mul_channel_wise_dequantization(quantized_weight_data, scales)
            elif op.type in ["conv2d", "depthwise_conv2d"]:
                dequantized_weight_data = \
                    self._conv_channel_wise_dequantization(quantized_weight_data, scales)
            else:
                _logger.error(op.type +
                              " is not supported by weight quantization")
            _set_variable_data(scope, place, var_name, dequantized_weight_data)

        # Save info
        op._set_attr('quantization_type', 'post_weight_channel_wise_abs_max')
        op._set_attr('quantize_weight_bits', weight_bits)
        op._set_attr(var_name + "_quant_scale", scales)

    def _conv_channel_wise_quantization(self, weight_data, quantize_range,
                                        save_weight_dtype):
        '''
        Get channel wise scale for the weights of conv2d and depthwise_conv2d,
        and quantize the weights.
        '''
        scales = []
        quantized_weight_data = np.zeros_like(
            weight_data, dtype=save_weight_dtype)
        channel_num = weight_data.shape[0]
        for i in range(channel_num):
            scale = np.max(np.abs(weight_data[i])) / quantize_range
            scales.append(scale)
            quantized_weight_data[i] = \
                np.around(weight_data[i] / scale).astype(save_weight_dtype)
        return scales, quantized_weight_data

    def _conv_channel_wise_dequantization(self, quantized_weight_data, scales):
        '''
        For conv2d and depthwise_conv2d, dequantize the weights to fp32.
        '''
        dequantized_weight_data = np.zeros_like(
            quantized_weight_data, dtype=np.float32)
        for i in range(len(scales)):
            dequantized_weight_data[i] = \
                (quantized_weight_data[i] * scales[i]).astype(np.float32)
        return dequantized_weight_data

    def _mul_channel_wise_quantization(self, weight_data, quantize_range,
                                       save_weight_dtype):
        '''
        Get channel wise scale for the weights of conv2d and depthwise_conv2d,
        and quantize the weights.
        '''
        scales = []
        quantized_weight_data = np.zeros_like(
            weight_data, dtype=save_weight_dtype)
        channel_num = weight_data.shape[-1]
        for i in range(channel_num):
            scale = np.max(np.abs(weight_data[:, i])) / quantize_range
            scales.append(scale)
            quantized_weight_data[:, i] = \
                np.around(weight_data[:, i] / scale).astype(save_weight_dtype)
        return scales, quantized_weight_data

    def _mul_channel_wise_dequantization(self, quantized_weight_data, scales):
        '''
        For mul, dequantize the weights to fp32.
        '''
        dequantized_weight_data = np.zeros_like(
            quantized_weight_data, dtype=np.float32)
        for i in range(len(scales)):
            dequantized_weight_data[:, i] = \
                (quantized_weight_data[:, i] * scales[i]).astype(np.float32)
        return dequantized_weight_data

1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
    def _calculate_threshold(self, input, threshold_rate, histogram_bins=5000):
        input_abs = np.abs(input)
        hist, hist_edeges = np.histogram(
            input_abs, bins=histogram_bins, range=(0, np.max(input_abs)))
        hist = hist / float(sum(hist))
        hist_sum = 0
        hist_index = 0
        for i in range(len(hist)):
            hist_sum += hist[i]
            if hist_sum >= 1.0 - threshold_rate:
                hist_index = i + 1
                break
        bin_width = hist_edeges[1] - hist_edeges[0]
        return hist_index * bin_width