save_quant_model.py 4.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
#   copyright (c) 2019 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
#     http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.

import unittest
import os
import sys
import argparse
import logging
import struct
import six
import numpy as np
import time
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import IrGraph
W
Wojciech Uss 已提交
27
from paddle.fluid.contrib.slim.quantization import Quant2Int8MkldnnPass
28 29 30 31 32 33
from paddle.fluid import core


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
W
Wojciech Uss 已提交
34 35 36 37
        '--quant_model_path',
        type=str,
        default='',
        help='A path to a Quant model.')
38 39 40 41 42 43 44 45 46 47
    parser.add_argument(
        '--fp32_model_save_path',
        type=str,
        default='',
        help='Saved optimized fp32 model')
    parser.add_argument(
        '--int8_model_save_path',
        type=str,
        default='',
        help='Saved optimized and quantized INT8 model')
48
    parser.add_argument(
49
        '--ops_to_quantize',
50 51
        type=str,
        default='',
52 53
        help='A comma separated list of operators to quantize. Only quantizable operators are taken into account. If the option is not used, an attempt to quantize all quantizable operators will be made.'
    )
54 55 56 57 58 59 60 61
    parser.add_argument(
        '--op_ids_to_skip',
        type=str,
        default='',
        help='A comma separated list of operator ids to skip in quantization.')
    parser.add_argument(
        '--debug',
        action='store_true',
W
Wojciech Uss 已提交
62
        help='If used, the graph of Quant model is drawn.')
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80

    test_args, args = parser.parse_known_args(namespace=unittest)
    return test_args, sys.argv[:1] + args


def transform_and_save_model(original_path, save_path, save_type):
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    inference_scope = fluid.executor.global_scope()
    with fluid.scope_guard(inference_scope):
        if os.path.exists(os.path.join(original_path, '__model__')):
            [inference_program, feed_target_names,
             fetch_targets] = fluid.io.load_inference_model(original_path, exe)
        else:
            [inference_program, feed_target_names,
             fetch_targets] = fluid.io.load_inference_model(original_path, exe,
                                                            'model', 'params')

81 82 83 84
        ops_to_quantize = set()
        if len(test_args.ops_to_quantize) > 0:
            ops_to_quantize = set(test_args.ops_to_quantize.split(','))

85 86 87 88 89 90
        op_ids_to_skip = set([-1])
        if len(test_args.op_ids_to_skip) > 0:
            op_ids_to_skip = set(map(int, test_args.op_ids_to_skip.split(',')))

        graph = IrGraph(core.Graph(inference_program.desc), for_test=True)
        if (test_args.debug):
W
Wojciech Uss 已提交
91 92
            graph.draw('.', 'quant_orig', graph.all_op_nodes())
        transform_to_mkldnn_int8_pass = Quant2Int8MkldnnPass(
93 94 95 96 97 98
            ops_to_quantize,
            _op_ids_to_skip=op_ids_to_skip,
            _scope=inference_scope,
            _place=place,
            _core=core,
            _debug=test_args.debug)
99 100 101 102 103 104 105 106 107 108

        graph = IrGraph(core.Graph(inference_program.desc), for_test=True)
        if save_type == 'FP32':
            graph = transform_to_mkldnn_int8_pass.apply_fp32(graph)
        elif save_type == 'INT8':
            graph = transform_to_mkldnn_int8_pass.apply(graph)
        inference_program = graph.to_program()
        with fluid.scope_guard(inference_scope):
            fluid.io.save_inference_model(save_path, feed_target_names,
                                          fetch_targets, exe, inference_program)
W
Wojciech Uss 已提交
109 110
        print("Success! Transformed Quant_{0} model can be found at {1}\n".
              format(save_type, save_path))
111 112 113 114 115 116


if __name__ == '__main__':
    global test_args
    test_args, remaining_args = parse_args()
    if test_args.fp32_model_save_path:
W
Wojciech Uss 已提交
117
        transform_and_save_model(test_args.quant_model_path,
118 119
                                 test_args.fp32_model_save_path, 'FP32')
    if test_args.int8_model_save_path:
W
Wojciech Uss 已提交
120
        transform_and_save_model(test_args.quant_model_path,
121
                                 test_args.int8_model_save_path, 'INT8')