compressor.py 43.1 KB
Newer Older
C
ceci3 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging
import os
import sys
W
whs 已提交
18
import copy
C
ceci3 已提交
19
import numpy as np
C
ceci3 已提交
20
import copy
C
ceci3 已提交
21
import inspect
C
ceci3 已提交
22
import shutil
W
whs 已提交
23
from time import gmtime, strftime
24
import platform
C
ceci3 已提交
25
import paddle
W
whs 已提交
26
import itertools
C
ceci3 已提交
27
import paddle.distributed.fleet as fleet
28
from ..quant.quanter import convert, quant_post
C
ceci3 已提交
29 30
from ..common.recover_program import recover_inference_program
from ..common import get_logger
C
ceci3 已提交
31
from ..common.patterns import get_patterns
G
Guanghua Yu 已提交
32
from ..common.load_model import load_inference_model, get_model_dir, export_onnx
33 34
from ..common.dataloader import wrap_dataloader, get_feed_vars
from ..common.config_helper import load_config
C
ceci3 已提交
35
from ..analysis import TableLatencyPredictor
Z
zhouzj 已提交
36
from .create_compressed_program import build_distill_program, build_quant_program, build_prune_program, remove_unused_var_nodes
C
ceci3 已提交
37
from .strategy_config import TrainConfig, ProgramInfo, merge_config
38
from .auto_strategy import prepare_strategy, get_final_quant_config, create_strategy_config, create_train_config
39
from .config_helpers import extract_strategy_config, extract_train_config
40
from .utils.predict import with_variable_shape
C
ceci3 已提交
41 42 43

_logger = get_logger(__name__, level=logging.INFO)

C
ceci3 已提交
44 45
try:
    if platform.system().lower() == 'linux':
C
ceci3 已提交
46
        from ..quant import post_quant_hpo
C
ceci3 已提交
47 48 49
except Exception as e:
    _logger.warning(e)

C
ceci3 已提交
50 51 52 53 54

class AutoCompression:
    def __init__(self,
                 model_dir,
                 train_dataloader,
55 56 57
                 model_filename=None,
                 params_filename=None,
                 save_dir='./output',
W
whs 已提交
58
                 config=None,
59
                 input_shapes=None,
C
ceci3 已提交
60
                 target_speedup=None,
61
                 eval_callback=None,
C
ceci3 已提交
62 63 64 65 66 67 68
                 eval_dataloader=None,
                 deploy_hardware='gpu'):
        """
        Compress inference model automatically.

        Args:
            model_dir(str): The path of inference model that will be compressed, and
C
ceci3 已提交
69
                the model and params that saved by ``paddle.static.save_inference_model``
C
ceci3 已提交
70
                are under the path.
G
Guanghua Yu 已提交
71
            train_dataloader(Python Generator, Paddle.io.DataLoader): The
72 73
                Generator or Dataloader provides train data, and it could
                return a batch every time.
C
ceci3 已提交
74 75
            model_filename(str):  The name of model file. 
            params_filename(str): The name of params file.
W
whs 已提交
76 77
            save_dir(str): The path to save compressed model. The models in this directory will be overwrited
                after calling 'compress()' function.
78 79 80 81 82 83 84
            input_shapes(dict|tuple|list): It is used when the model has implicit dimensions except batch size. 
                If it is a dict, the key is the name of input and the value is the shape. 
                Given the input shape of input "X" is [-1, 3, -1, -1] which means the batch size, hight
                and width is variable. And the input_shapes can be set {"X": [-1, 3, 512, 512]}.
                If it is a list or tuple, the number of model's inputs should be 1. And the shape of input
                will be set input_shapes. None means keeping the original shapes, then
                the compression strategies searching may be skipped. Default: None.
C
ceci3 已提交
85 86 87 88 89 90 91 92 93 94 95
            train_config(dict, optional): The train config in the compression process, the key can 
                reference `<https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L103>`_ . 
                Only one strategy(quant_post with hyperparameter optimization) can set train_config 
                to None. Default: None. 
            strategy_config(dict, list(dict), optional): The strategy config. You can set single config to get multi-strategy config, such as
                1. set ``Quantization`` and ``Distillation`` to get quant_aware and distillation compress config.
                    The Quantization config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L24`_ .
                    The Distillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L39`_ .
                2. set ``Quantization`` and ``HyperParameterOptimization`` to get quant_post and hyperparameter optimization compress config.
                    The Quantization config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L24`_ .
                    The HyperParameterOptimization config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L73`_ .
C
ceci3 已提交
96 97
                3. set ``ChannelPrune`` and ``Distillation`` to get channel prune and distillation compress config.
                    The ChannelPrune config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L82`_ .
C
ceci3 已提交
98
                    The Distillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L39`_ .
C
ceci3 已提交
99 100 101 102 103 104 105
                4. set ``ASPPrune`` and ``Distillation`` to get asp prune and distillation compress config.
                    The ASPPrune config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L82`_ .
                    The Distillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L39`_ .
                5. set ``TransformerPrune`` and ``Distillation`` to get transformer prune and distillation compress config.
                    The TransformerPrune config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L82`_ .
                    The Distillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L39`_ .
                6. set ``UnstructurePrune`` and ``Distillation`` to get unstructureprune and distillation compress config.
C
ceci3 已提交
106 107
                    The UnstructurePrune config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L91`_ .
                    The Distillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L39`_ .
C
ceci3 已提交
108
                7. set ``Distillation`` to use one teacher modol to distillation student model.
C
ceci3 已提交
109
                    The Distillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L39`_ .
C
ceci3 已提交
110
                8. set ``MultiTeacherDistillation`` to use multi-teacher to distillation student model.
C
ceci3 已提交
111 112 113 114 115
                    The MultiTeacherDistillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L56`_ .

                If set to None, will choose a strategy automatically. Default: None.
            target_speedup(float, optional): target speedup ratio by the way of auto compress. Default: None.
            eval_callback(function, optional): eval function, define by yourself to return the metric of the inference program, can be used to judge the metric of compressed model. The documents of how to write eval function is `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/api_cn/static/auto-compression/custom_function.rst`_ . ``eval_callback`` and ``eval_dataloader`` cannot be None at the same time. Dafault: None.
116 117 118
            eval_dataloader(paddle.io.Dataloader, optional):  The Generator or Dataloader provides eval data, and it could
                 return a batch every time. If eval_dataloader is None, will take first 5000 sample from train_dataloader 
                 as eval_dataloader, and the metric of eval_dataloader for reference only. Dafault: None.
C
ceci3 已提交
119 120
            deploy_hardware(str, optional): The hardware you want to deploy. Default: 'gpu'.
        """
G
Guanghua Yu 已提交
121
        self.model_dir = model_dir.rstrip('/')
122 123
        self.updated_model_dir, self.model_filename, self.params_filename = get_model_dir(
            model_dir, model_filename, params_filename)
C
ceci3 已提交
124

C
ceci3 已提交
125
        self.final_dir = save_dir
W
whs 已提交
126 127
        if not os.path.exists(self.final_dir):
            os.makedirs(self.final_dir)
W
whs 已提交
128 129 130 131

        # load config
        if isinstance(config, str):
            config = load_config(config)
C
ceci3 已提交
132 133 134
            self.train_config = extract_train_config(config)
        elif isinstance(config, dict):
            if 'TrainConfig' in config:
C
ceci3 已提交
135
                self.train_config = TrainConfig(**config.pop('TrainConfig'))
C
ceci3 已提交
136 137
            else:
                self.train_config = None
C
ceci3 已提交
138 139
        else:
            self.train_config = None
C
ceci3 已提交
140
        self.strategy_config = extract_strategy_config(config)
W
whs 已提交
141 142

        # prepare dataloader
G
Guanghua Yu 已提交
143
        self.feed_vars = get_feed_vars(self.model_dir, model_filename,
W
whs 已提交
144 145 146 147
                                       params_filename)
        self.train_dataloader = wrap_dataloader(train_dataloader,
                                                self.feed_vars)
        self.eval_dataloader = wrap_dataloader(eval_dataloader, self.feed_vars)
C
ceci3 已提交
148 149 150
        if self.eval_dataloader is None:
            self.eval_dataloader = self._get_eval_dataloader(
                self.train_dataloader)
W
whs 已提交
151

C
ceci3 已提交
152 153
        self.target_speedup = target_speedup
        self.eval_function = eval_callback
154
        self.deploy_hardware = deploy_hardware
155

C
ceci3 已提交
156
        paddle.enable_static()
C
ceci3 已提交
157
        self._exe, self._places = self._prepare_envs()
158
        self.model_type = self._get_model_type()
C
ceci3 已提交
159

160
        if self.train_config is not None and self.train_config.use_fleet:
C
ceci3 已提交
161 162
            fleet.init(is_collective=True)

163 164 165 166 167 168 169
        if with_variable_shape(
                self.model_dir,
                model_filename=model_filename,
                params_filename=params_filename) and input_shapes is not None:

            infer_shape_model = self.create_tmp_dir(
                self.final_dir, prefix="infer_shape_model_")
G
Guanghua Yu 已提交
170
            self._infer_shape(self.model_dir, self.model_filename,
171 172 173 174 175
                              self.params_filename, input_shapes,
                              infer_shape_model)
            self.model_dir = infer_shape_model
            self.model_filename = "infered_shape.pdmodel"
            self.params_filename = "infered_shape.pdiparams"
W
whs 已提交
176

C
ceci3 已提交
177 178
        if self.strategy_config is None:
            strategy_config = prepare_strategy(
C
ceci3 已提交
179 180 181
                self._exe, self._places, self.model_dir, self.model_filename,
                self.params_filename, self.target_speedup, self.deploy_hardware,
                self.model_type)
C
ceci3 已提交
182 183 184 185 186 187 188 189 190 191
            self.strategy_config = strategy_config
        elif isinstance(self.strategy_config, dict):
            self.strategy_config = [self.strategy_config]
        elif isinstance(self.strategy_config, str):
            strategy_config = create_strategy_config(self.strategy_config,
                                                     self.model_type)

        self._strategy, self._config = self._prepare_strategy(
            self.strategy_config)

C
ceci3 已提交
192
        self.train_config = self._get_final_train_config(
193 194
            self.train_config, self._strategy, self.model_type)
        _logger.info(f"Selected strategies: {self._strategy}")
C
ceci3 已提交
195 196 197

    def _get_final_train_config(self, train_config, strategy_config,
                                model_type):
198
        # If train_config is None, set default train_config
C
ceci3 已提交
199 200 201 202 203
        if train_config is None:
            train_config = create_train_config(strategy_config, model_type)

        train_configs = [train_config]
        for idx in range(1, len(self._strategy)):
C
ceci3 已提交
204 205 206
            if 'qat' in self._strategy[idx] or 'ptq' in self._strategy[idx]:
                ### If compress strategy more than one, the TrainConfig in the yaml only used in prune.
                ### The TrainConfig for quantization is extrapolate from above.
C
ceci3 已提交
207 208
                tmp_train_config = copy.deepcopy(train_config.__dict__)
                ### the epoch, train_iter, learning rate of quant is 10% of the prune compress
C
ceci3 已提交
209 210 211
                if self.model_type != 'transformer':
                    tmp_train_config['epochs'] = max(
                        int(train_config.epochs * 0.1), 1)
C
ceci3 已提交
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
                if train_config.train_iter is not None:
                    tmp_train_config['train_iter'] = int(
                        train_config.train_iter * 0.1)
                if isinstance(train_config.learning_rate, float):
                    tmp_train_config[
                        'learning_rate'] = train_config.learning_rate * 0.1
                else:
                    if 'learning_rate' in train_config.learning_rate:
                        tmp_train_config['learning_rate'][
                            'learning_rate'] = train_config.learning_rate[
                                'learning_rate'] * 0.1
                    else:  ### learning rate decay is PiecewiseDecay
                        tmp_train_config['learning_rate']['values'] = list(
                            map(lambda x: x * 0.1, train_config.learning_rate[
                                'values']))
                train_cfg = TrainConfig(**tmp_train_config)
            else:
                tmp_train_config = copy.deepcopy(train_config.__dict__)
                train_cfg = TrainConfig(**tmp_train_config)

            train_configs.append(train_cfg)
        return train_configs
234

235 236 237 238 239 240 241
    def _infer_shape(self, model_dir, model_filename, params_filename,
                     input_shapes, save_path):
        assert type(input_shapes) in [
            dict, list, tuple
        ], f'Type of input_shapes should be in [dict, tuple or list] but got {type(input_shapes)}.'
        paddle.enable_static()
        exe = paddle.static.Executor(paddle.CPUPlace())
C
ceci3 已提交
242
        [inference_program, feed_target_names,
243 244
         fetch_targets] = load_inference_model(model_dir, exe, model_filename,
                                               params_filename)
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265

        if type(input_shapes) in [list, tuple]:
            assert len(
                feed_target_names
            ) == 1, f"The number of model's inputs should be 1 but got {feed_target_names}."
            input_shapes = {feed_target_names[0]: input_shapes}

        feed_vars = []
        for var_ in inference_program.list_vars():
            if var_.name in feed_target_names:
                feed_vars.append(var_)
                var_.desc.set_shape(input_shapes[var_.name])

        for block in inference_program.blocks:
            for op in block.ops:
                if op.type not in ["feed", "fetch"]:
                    op.desc.infer_shape(block.desc)

        save_path = os.path.join(save_path, "infered_shape")
        os.makedirs(save_path)
        paddle.static.save_inference_model(
C
ceci3 已提交
266 267 268 269 270 271
            save_path,
            feed_vars,
            fetch_targets,
            exe,
            program=inference_program,
            clip_extra=False)
272 273 274 275 276 277 278 279
        _logger.info(f"Saved model infered shape to {save_path}")

    @property
    def deploy_hardware(self):
        return self._deploy_hardware

    @deploy_hardware.setter
    def deploy_hardware(self, value):
280 281 282 283
        supported_hardware = TableLatencyPredictor.hardware_list + [
            'gpu',  # nvidia gpu
            "cpu",  # intel cpu
        ]
284 285 286
        if value is not None:
            # Fail-fast when deploy hardware is set explicitly
            assert (
287 288
                value in supported_hardware
            ), f"Hardware should be in supported list {supported_hardware} but got {value}. Or you can set deploy_hardware None."
289 290
        self._deploy_hardware = value

291 292 293 294 295 296 297 298 299 300 301
    def _get_eval_dataloader(self, train_dataloader):
        def _gen():
            len_loader = len(list(train_dataloader()))
            ### max eval_dataloader is 5000 if use train_dataloader as eval_dataloader
            slice_len = min(5000, len_loader)
            ret = list(itertools.islice(train_dataloader(), slice_len))
            for i in ret:
                yield i

        return _gen

C
ceci3 已提交
302 303
    def _prepare_envs(self):
        devices = paddle.device.get_device().split(':')[0]
C
ceci3 已提交
304
        places = paddle.device._convert_to_place(devices)
W
whs 已提交
305
        _logger.info(f"devices: {devices}")
C
ceci3 已提交
306 307 308
        exe = paddle.static.Executor(places)
        return exe, places

309 310 311 312 313 314
    def _get_model_type(self):
        [inference_program, _, _] = (load_inference_model(
            self.model_dir,
            model_filename=self.model_filename,
            params_filename=self.params_filename,
            executor=self._exe))
C
ceci3 已提交
315
        _, _, model_type = get_patterns(inference_program)
C
ceci3 已提交
316
        if self.model_filename is None:
317
            opt_model_filename = '__opt_model__'
C
ceci3 已提交
318
        else:
319
            opt_model_filename = 'opt_' + self.model_filename
C
ceci3 已提交
320 321
        program_bytes = inference_program._remove_training_info(
            clip_extra=False).desc.serialize_to_string()
322 323 324
        with open(
                os.path.join(self.updated_model_dir, opt_model_filename),
                "wb") as f:
C
ceci3 已提交
325 326
            f.write(program_bytes)
        shutil.move(
327 328
            os.path.join(self.updated_model_dir, opt_model_filename),
            os.path.join(self.updated_model_dir, self.model_filename))
W
whs 已提交
329
        _logger.info(f"Detect model type: {model_type}")
C
ceci3 已提交
330 331 332 333 334 335 336 337 338 339 340
        return model_type

    def _prepare_strategy(self, strategy_config):
        if not isinstance(strategy_config, list):
            strategy_config = list(list(strategy_config))

        strategy = []
        config = []
        for strategy_c in strategy_config:
            quant_config = strategy_c.get("Quantization", None)
            hpo_config = strategy_c.get("HyperParameterOptimization", None)
C
ceci3 已提交
341 342 343
            prune_config = strategy_c.get("ChannelPrune", None)
            asp_config = strategy_c.get("ASPPrune", None)
            transformer_prune_config = strategy_c.get("TransformerPrune", None)
C
ceci3 已提交
344 345 346
            unstructure_prune_config = strategy_c.get("UnstructurePrune", None)
            single_teacher_distill_config = strategy_c.get("Distillation", None)
            if single_teacher_distill_config is not None and single_teacher_distill_config.teacher_model_dir is None:
C
ceci3 已提交
347 348 349
                single_teacher_distill_config.teacher_model_dir = self.model_dir
                single_teacher_distill_config.teacher_model_filename = self.model_filename
                single_teacher_distill_config.teacher_params_filename = self.params_filename
C
ceci3 已提交
350 351 352 353 354 355 356 357 358 359

            multi_teacher_distill_config = strategy_c.get(
                "MultiTeacherDistillation", None)

            assert (single_teacher_distill_config is None) or (multi_teacher_distill_config is None), \
                "Distillation and MultiTeacherDistillation cannot be set at the same time."
            self._distill_config = single_teacher_distill_config if \
                   single_teacher_distill_config is not None else \
                   multi_teacher_distill_config

C
ceci3 已提交
360
            only_distillation = True
C
ceci3 已提交
361

C
ceci3 已提交
362 363 364
            ### case1: prune_config & distill config
            if prune_config is not None and self._distill_config is not None:
                only_distillation = False
C
ceci3 已提交
365
                strategy.append('channel_prune_dis')
C
ceci3 已提交
366 367
                config.append(merge_config(prune_config, self._distill_config))

C
ceci3 已提交
368 369 370
            ### case2: asp_config & distill config
            if asp_config is not None and self._distill_config is not None:
                only_distillation = False
C
ceci3 已提交
371 372 373
                strategy.append('asp_prune_dis')
                config.append(merge_config(asp_config, self._distill_config))

C
ceci3 已提交
374 375 376
            ### case3: transformer_prune_config & distill config
            if transformer_prune_config is not None and self._distill_config is not None:
                only_distillation = False
C
ceci3 已提交
377 378 379 380 381
                strategy.append('transformer_prune_dis')
                config.append(
                    merge_config(transformer_prune_config,
                                 self._distill_config))

C
ceci3 已提交
382 383 384
            ### case4: unstructure_config & distill config
            if unstructure_prune_config is not None and self._distill_config is not None:
                only_distillation = False
C
ceci3 已提交
385 386 387 388 389
                strategy.append('unstructure_prune_dis')
                config.append(
                    merge_config(unstructure_prune_config,
                                 self._distill_config))

C
ceci3 已提交
390 391 392 393 394 395 396
            ### case5: quant_config & hpo_config ==> PTQ & HPO
            if quant_config is not None and hpo_config is not None:
                only_distillation = False
                strategy.append('ptq_hpo')
                config.append(merge_config(quant_config, hpo_config))

            ### case6: quant_config & distill config ==> QAT & Distill
C
ceci3 已提交
397
            if quant_config is not None and self._distill_config is not None and 'ptq_hpo' not in strategy:
C
ceci3 已提交
398 399 400 401
                only_distillation = False
                strategy.append('qat_dis')
                config.append(merge_config(quant_config, self._distill_config))

C
ceci3 已提交
402
            ### case7: distill_config
C
ceci3 已提交
403
            if only_distillation == True and self._distill_config is not None:
C
ceci3 已提交
404 405 406 407 408 409
                if single_teacher_distill_config is not None:
                    strategy.append('single_teacher_dis')
                    config.append(single_teacher_distill_config)
                else:
                    strategy.append('multi_teacher_dis')
                    config.append(multi_teacher_distill_config)
C
ceci3 已提交
410

C
ceci3 已提交
411 412 413 414 415 416 417 418 419 420 421 422
        ### NOTE: keep quantation in the last step
        idx = -1
        if 'qat_dis' in strategy and strategy.index('qat_dis') != (
                len(strategy) - 1):
            idx = strategy.index('qat_dis')
        elif 'ptq_hpo' in strategy and strategy.index('ptq_hpo') != (
                len(strategy) - 1):
            idx = strategy.index('ptq_hpo')

        if idx != -1:
            strategy = strategy[:idx] + strategy[idx + 1:] + [strategy[idx]]
            config = config[:idx] + config[idx + 1:] + [config[idx]]
C
ceci3 已提交
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440

        return strategy, config

    def _prepare_fleet_strategy(train_config):
        build_strategy = paddle.static.BuildStrategy()
        exec_strategy = paddle.static.ExecutionStrategy()

        strategy = fleet.DistributedStrategy()
        strategy.build_strategy = build_strategy
        if train_config.recompute_config is not None:
            strategy.recompute = True
            strategy.recompute_configs = { ** train_config.recompute_config}
        if train_config.sharding_config is not None:
            strategy.sharding = True
            strategy.sharding_configs = { ** train_config.sharding_config}
        if train_config.amp_config is not None:
            strategy.amp = True
            strategy.amp_configs = { ** train_config.amp_config}
441 442
        if train_config.asp_config is not None:
            strategy.asp = True
C
ceci3 已提交
443 444
        return strategy

C
ceci3 已提交
445
    def _prepare_program(self, program, feed_target_names, fetch_targets,
C
ceci3 已提交
446 447
                         patterns, default_distill_node_pair, strategy, config,
                         train_config):
C
ceci3 已提交
448 449 450 451 452
        train_program = recover_inference_program(program)
        startup_program = paddle.static.Program()
        train_program_info = ProgramInfo(startup_program, train_program,
                                         feed_target_names, fetch_targets)

C
ceci3 已提交
453
        config_dict = config.__dict__
454 455 456
        if "prune_strategy" in config_dict and config_dict[
                "prune_strategy"] == "gmp" and config_dict[
                    'gmp_config'] is None:
Z
zhouzj 已提交
457 458 459
            _logger.info(
                "Calculating the iterations per epoch……(It will take some time)")
            # NOTE:XXX: This way of calculating the iters needs to be improved.
C
ceci3 已提交
460
            if train_config.epochs:
G
Guanghua Yu 已提交
461
                iters_per_epoch = len(list(self.train_dataloader()))
C
ceci3 已提交
462 463 464
                total_iters = train_config.epochs * iters_per_epoch
            elif train_config.train_iter:
                total_iters = train_config.train_iter
G
Guanghua Yu 已提交
465 466 467
            else:
                raise RuntimeError(
                    'train_config must has `epochs` or `train_iter` field.')
Z
zhouzj 已提交
468 469
            config_dict['gmp_config'] = {
                'stable_iterations': 0,
C
ceci3 已提交
470 471
                'pruning_iterations': max(0.45 * total_iters, 30),
                'tunning_iterations': max(0.45 * total_iters, 30),
Z
zhouzj 已提交
472
                'resume_iteration': -1,
C
ceci3 已提交
473
                'pruning_steps': 100 if (0.45 * total_iters) > 1000 else 1,
Z
zhouzj 已提交
474 475
                'initial_ratio': 0.15,
            }
C
ceci3 已提交
476 477
        ### add prune program
        self._pruner = None
C
ceci3 已提交
478
        if 'prune' in strategy:
C
ceci3 已提交
479 480
            self._pruner, train_program_info = build_prune_program(
                self._exe, self._places, config_dict, train_program_info,
C
ceci3 已提交
481
                strategy, patterns, self.eval_dataloader)
C
ceci3 已提交
482

C
ceci3 已提交
483 484
        if train_config.use_fleet:
            dist_strategy = _prepare_fleet_strategy(train_config)
C
ceci3 已提交
485 486 487 488
        else:
            dist_strategy = None

        ### add distill program
C
ceci3 已提交
489
        if 'dis' in strategy:
C
ceci3 已提交
490 491 492 493
            train_program_info, test_program_info = build_distill_program(
                self._exe,
                self._places,
                config_dict,
C
ceci3 已提交
494
                train_config.__dict__,
C
ceci3 已提交
495 496
                train_program_info,
                pruner=self._pruner,
C
ceci3 已提交
497 498
                dist_strategy=dist_strategy,
                default_distill_node_pair=default_distill_node_pair)
C
ceci3 已提交
499 500 501

        self._quant_config = None
        ### add quant_aware program, quant always is last step
C
ceci3 已提交
502
        if 'qat' in strategy:
C
ceci3 已提交
503 504 505
            train_program_info, test_program_info, self._quant_config = build_quant_program(
                self._exe, self._places, config_dict, train_program_info,
                test_program_info)
C
ceci3 已提交
506
        if train_config.sparse_model:
Z
zhouzj 已提交
507
            from ..prune.unstructured_pruner import UnstructuredPruner
Z
zhouzj 已提交
508
            # NOTE: The initialization parameter of this pruner doesn't work, it is only used to call the 'set_static_masks' function
Z
zhouzj 已提交
509 510 511 512 513 514
            self._pruner = UnstructuredPruner(
                train_program_info.program,
                mode='ratio',
                ratio=0.75,
                prune_params_type='conv1x1_only',
                place=self._places)
Z
zhouzj 已提交
515
            self._pruner.set_static_masks()  # Fixed model sparsity
C
ceci3 已提交
516 517 518

        self._exe.run(train_program_info.startup_program)

C
ceci3 已提交
519 520 521 522
        if (not train_config.use_fleet) and train_config.amp_config is not None:
            if hasattr(
                    train_config.amp_config,
                    'use_pure_fp16') and train_config.amp_config.use_pure_fp16:
C
ceci3 已提交
523 524 525
                train_program_info.optimizer.amp_init(
                    self._places, scope=paddle.static.global_scope())

C
ceci3 已提交
526
        if 'asp' in strategy:
C
ceci3 已提交
527 528 529
            ### prune weight in scope
            self._pruner.prune_model(train_program_info.program)

C
ceci3 已提交
530
        if not train_config.use_fleet:
C
ceci3 已提交
531
            train_program_info = self._compiled_program(train_program_info,
C
ceci3 已提交
532
                                                        strategy)
C
ceci3 已提交
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
            test_program_info = self._compiled_program(test_program_info,
                                                       self._strategy)
        return train_program_info, test_program_info

    def _compiled_program(self, program_info, strategy):
        compiled_prog = paddle.static.CompiledProgram(program_info.program)
        build_strategy = paddle.static.BuildStrategy()
        exec_strategy = paddle.static.ExecutionStrategy()
        if 'qat' in strategy:
            build_strategy.memory_optimize = False
            build_strategy.enable_inplace = False
            build_strategy.fuse_all_reduce_ops = False
            build_strategy.sync_batch_norm = False

        compiled_prog = compiled_prog.with_data_parallel(
            loss_name=program_info.fetch_targets[0].name,
            build_strategy=build_strategy,
            exec_strategy=exec_strategy)
        program_info.program = compiled_prog
        return program_info

554
    def create_tmp_dir(self, base_dir, prefix="tmp"):
W
whs 已提交
555
        # create a new temp directory in final dir
556
        s_datetime = strftime("%Y_%m_%d_%H_%M_%S", gmtime())
557 558 559 560 561
        tmp_base_name = "_".join([prefix, str(os.getpid()), s_datetime])
        tmp_dir = os.path.join(base_dir, tmp_base_name)
        if not os.path.exists(tmp_dir):
            os.makedirs(tmp_dir)
        return tmp_dir
W
whs 已提交
562

563
    def compress(self):
564
        assert len(self._strategy) > 0
565
        self.tmp_dir = self.create_tmp_dir(self.final_dir)
566 567 568 569
        strategy = None
        config = None
        train_config = None
        strategy_idx = None
C
ceci3 已提交
570
        for strategy_idx, (
C
ceci3 已提交
571 572 573 574
                strategy, config, train_config
        ) in enumerate(zip(self._strategy, self._config, self.train_config)):
            self.single_strategy_compress(strategy, config, strategy_idx,
                                          train_config)
C
ceci3 已提交
575 576 577

        if strategy == 'ptq_hpo' and config.max_quant_count == 1 and platform.system(
        ).lower() == 'linux':
C
ceci3 已提交
578
            ptq_loss = post_quant_hpo.g_min_emd_loss
C
ceci3 已提交
579

C
ceci3 已提交
580 581
            final_quant_config = get_final_quant_config(ptq_loss,
                                                        self.model_type)
C
ceci3 已提交
582 583 584 585
            if final_quant_config is not None:
                quant_strategy, quant_config = self._prepare_strategy(
                    final_quant_config)
                self.single_strategy_compress(quant_strategy[0],
C
ceci3 已提交
586 587
                                              quant_config[0], strategy_idx,
                                              train_config)
588
        tmp_model_path = os.path.join(
W
whs 已提交
589
            self.tmp_dir, 'strategy_{}'.format(str(strategy_idx + 1)))
C
ceci3 已提交
590
        final_model_path = os.path.join(self.final_dir)
C
ceci3 已提交
591
        if paddle.distributed.get_rank() == 0:
W
whs 已提交
592 593 594 595
            for _file in os.listdir(tmp_model_path):
                _file_path = os.path.join(tmp_model_path, _file)
                if os.path.isfile(_file_path):
                    shutil.copy(_file_path, final_model_path)
W
whs 已提交
596
            shutil.rmtree(self.tmp_dir)
C
ceci3 已提交
597
            _logger.info(
G
Guanghua Yu 已提交
598
                "==> The ACT compression has been completed and the final model is saved in `{}`".
C
ceci3 已提交
599
                format(final_model_path))
C
ceci3 已提交
600

C
ceci3 已提交
601 602
    def single_strategy_compress(self, strategy, config, strategy_idx,
                                 train_config):
603 604
        # start compress, including train/eval model
        # TODO: add the emd loss of evaluation model.
C
ceci3 已提交
605 606 607 608 609 610 611
        if strategy_idx == 0:
            model_dir = self.model_dir
        else:
            model_dir = os.path.join(self.tmp_dir,
                                     'strategy_{}'.format(str(strategy_idx)))

        if self.updated_model_dir != model_dir:
G
Guanghua Yu 已提交
612 613
            # If model is ONNX, convert it to inference model firstly.
            load_inference_model(
C
ceci3 已提交
614
                model_dir,
G
Guanghua Yu 已提交
615 616 617
                model_filename=self.model_filename,
                params_filename=self.params_filename,
                executor=self._exe)
618 619 620
        if strategy == 'quant_post':
            quant_post(
                self._exe,
C
ceci3 已提交
621
                model_dir=model_dir,
622
                quantize_model_path=os.path.join(
W
whs 已提交
623
                    self.tmp_dir, 'strategy_{}'.format(str(strategy_idx + 1))),
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
                data_loader=self.train_dataloader,
                model_filename=self.model_filename,
                params_filename=self.params_filename,
                save_model_filename=self.model_filename,
                save_params_filename=self.params_filename,
                batch_size=1,
                batch_nums=config.batch_num,
                algo=config.ptq_algo,
                round_type='round',
                bias_correct=config.bias_correct,
                hist_percent=config.hist_percent,
                quantizable_op_type=config.quantize_op_types,
                is_full_quantize=config.is_full_quantize,
                weight_bits=config.weight_bits,
                activation_bits=config.activation_bits,
                activation_quantize_type='range_abs_max',
                weight_quantize_type=config.weight_quantize_type,
                onnx_format=False)

        elif strategy == 'ptq_hpo':
644 645 646
            if platform.system().lower() != 'linux':
                raise NotImplementedError(
                    "post-quant-hpo is not support in system other than linux")
C
ceci3 已提交
647
            if self.updated_model_dir != model_dir:
G
Guanghua Yu 已提交
648 649
                # If model is ONNX, convert it to inference model firstly.
                load_inference_model(
C
ceci3 已提交
650
                    model_dir,
G
Guanghua Yu 已提交
651 652 653
                    model_filename=self.model_filename,
                    params_filename=self.params_filename,
                    executor=self._exe)
C
ceci3 已提交
654 655 656 657 658
            if self.eval_function is None:
                # If eval function is None, ptq_hpo will use emd distance to eval the quantized model, so need the dataloader without label
                eval_dataloader = self.train_dataloader
            else:
                eval_dataloader = self.eval_dataloader
C
ceci3 已提交
659
            post_quant_hpo.quant_post_hpo(
C
ceci3 已提交
660 661
                self._exe,
                self._places,
C
ceci3 已提交
662
                model_dir=model_dir,
C
ceci3 已提交
663
                quantize_model_path=os.path.join(
W
whs 已提交
664
                    self.tmp_dir, 'strategy_{}'.format(str(strategy_idx + 1))),
C
ceci3 已提交
665
                train_dataloader=self.train_dataloader,
C
ceci3 已提交
666
                eval_dataloader=eval_dataloader,
C
ceci3 已提交
667 668 669 670 671
                eval_function=self.eval_function,
                model_filename=self.model_filename,
                params_filename=self.params_filename,
                save_model_filename=self.model_filename,
                save_params_filename=self.params_filename,
C
ceci3 已提交
672 673 674 675 676 677 678 679
                quantizable_op_type=config.quantize_op_types,
                weight_bits=config.weight_bits,
                activation_bits=config.activation_bits,
                weight_quantize_type=config.weight_quantize_type,
                is_full_quantize=config.is_full_quantize,
                algo=config.ptq_algo,
                bias_correct=config.bias_correct,
                hist_percent=config.hist_percent,
C
ceci3 已提交
680
                batch_size=[1],
C
ceci3 已提交
681
                batch_num=config.batch_num,
C
ceci3 已提交
682
                onnx_format=config.onnx_format,
C
ceci3 已提交
683
                runcount_limit=config.max_quant_count)
C
ceci3 已提交
684 685

        else:
C
ceci3 已提交
686 687
            assert 'dis' in strategy, "Only support optimizer compressed model by distillation loss."

C
ceci3 已提交
688 689
            [inference_program, feed_target_names, fetch_targets]= load_inference_model( \
                model_dir, \
C
ceci3 已提交
690 691 692 693
                model_filename=self.model_filename, params_filename=self.params_filename,
                executor=self._exe)

            ### used to check whether the dataloader is right
C
ceci3 已提交
694
            self.metric_before_compressed = None
C
ceci3 已提交
695
            if self.eval_function is not None and train_config.origin_metric is not None:
C
ceci3 已提交
696
                _logger.info("start to test metric before compress")
C
ceci3 已提交
697 698 699 700
                metric = self.eval_function(self._exe, inference_program,
                                            feed_target_names, fetch_targets)
                _logger.info("metric of compressed model is: {}".format(metric))
                buf = 0.05
C
ceci3 已提交
701 702
                if metric < (float(train_config.origin_metric) - buf) or \
                        metric > (float(train_config.origin_metric) + buf):
C
ceci3 已提交
703 704 705 706
                    raise RuntimeError("target metric of pretrained model is {}, \
                          but now is {}, Please check the format of evaluation dataset \
                          or check the origin_metric in train_config"
                                                                     .format(\
C
ceci3 已提交
707
                          train_config.origin_metric, metric))
C
ceci3 已提交
708 709 710 711
                self.metric_before_compressed = metric

            patterns, default_distill_node_pair, _ = get_patterns(
                inference_program)
C
ceci3 已提交
712 713

            train_program_info, test_program_info = self._prepare_program(
C
ceci3 已提交
714
                inference_program, feed_target_names, fetch_targets, patterns,
C
ceci3 已提交
715
                default_distill_node_pair, strategy, config, train_config)
Z
zhouzj 已提交
716 717 718
            if 'unstructure' in self._strategy:
                test_program_info.program._program = remove_unused_var_nodes(
                    test_program_info.program._program)
C
ceci3 已提交
719 720
            test_program_info = self._start_train(
                train_program_info, test_program_info, strategy, train_config)
C
ceci3 已提交
721
            self._save_model(test_program_info, strategy, strategy_idx)
C
ceci3 已提交
722

C
ceci3 已提交
723 724
    def _start_train(self, train_program_info, test_program_info, strategy,
                     train_config):
C
ceci3 已提交
725
        best_metric = -1.0
C
ceci3 已提交
726
        total_epochs = train_config.epochs if train_config.epochs else 100
G
Guanghua Yu 已提交
727
        total_train_iter = 0
728
        stop_training = False
G
Guanghua Yu 已提交
729
        for epoch_id in range(total_epochs):
730 731
            if stop_training:
                break
C
ceci3 已提交
732 733 734 735
            for batch_id, data in enumerate(self.train_dataloader()):
                np_probs_float, = self._exe.run(train_program_info.program, \
                    feed=data, \
                    fetch_list=train_program_info.fetch_targets)
736 737
                if not isinstance(train_program_info.learning_rate, float):
                    train_program_info.learning_rate.step()
C
ceci3 已提交
738
                if 'unstructure' in strategy:
C
ceci3 已提交
739 740
                    self._pruner.step()

C
ceci3 已提交
741
                if train_config.logging_iter is None:
C
ceci3 已提交
742 743
                    logging_iter = 10
                else:
C
ceci3 已提交
744
                    logging_iter = train_config.logging_iter
C
ceci3 已提交
745
                if batch_id % int(logging_iter) == 0:
G
Guanghua Yu 已提交
746 747 748 749 750
                    _logger.info(
                        "Total iter: {}, epoch: {}, batch: {}, loss: {}".format(
                            total_train_iter, epoch_id, batch_id,
                            np_probs_float))
                total_train_iter += 1
C
ceci3 已提交
751 752
                if total_train_iter % int(
                        train_config.eval_iter) == 0 and total_train_iter != 0:
C
ceci3 已提交
753 754 755
                    if self.eval_function is not None:

                        # GMP pruner step 3: update params before summrizing sparsity, saving model or evaluation. 
C
ceci3 已提交
756
                        if 'unstructure' in strategy:
C
ceci3 已提交
757 758 759 760 761 762 763 764 765 766
                            self._pruner.update_params()

                        metric = self.eval_function(
                            self._exe, test_program_info.program,
                            test_program_info.feed_target_names,
                            test_program_info.fetch_targets)

                        if metric > best_metric:
                            paddle.static.save(
                                program=test_program_info.program._program,
W
whs 已提交
767
                                model_path=os.path.join(self.tmp_dir,
C
ceci3 已提交
768
                                                        'best_model'))
C
ceci3 已提交
769
                            best_metric = metric
770 771 772
                            _logger.info(
                                "epoch: {} metric of compressed model is: {:.6f}, best metric of compressed model is {:.6f}".
                                format(epoch_id, metric, best_metric))
C
ceci3 已提交
773 774 775 776
                            if self.metric_before_compressed is not None and float(
                                    abs(best_metric -
                                        self.metric_before_compressed)
                            ) / self.metric_before_compressed <= 0.005:
777 778 779 780
                                _logger.info(
                                    "The error rate between the compressed model and original model is less than 5%. The training process ends."
                                )
                                stop_training = True
C
ceci3 已提交
781
                                break
782 783 784 785
                        else:
                            _logger.info(
                                "epoch: {} metric of compressed model is: {:.6f}, best metric of compressed model is {:.6f}".
                                format(epoch_id, metric, best_metric))
C
ceci3 已提交
786 787
                        if train_config.target_metric is not None:
                            if metric > float(train_config.target_metric):
788 789 790 791
                                stop_training = True
                                _logger.info(
                                    "The metric of compressed model has reached the target metric. The training process ends."
                                )
C
ceci3 已提交
792
                                break
C
ceci3 已提交
793 794

                    else:
795 796 797
                        _logger.warning(
                            "Not set eval function, so unable to test accuracy performance."
                        )
798 799
                if (train_config.train_iter and total_train_iter >=
                        train_config.train_iter) or stop_training:
G
Guanghua Yu 已提交
800
                    break
C
ceci3 已提交
801

C
ceci3 已提交
802
        if 'unstructure' in self._strategy or train_config.sparse_model:
Z
zhouzj 已提交
803 804
            self._pruner.update_params()

C
ceci3 已提交
805 806
        return test_program_info

C
ceci3 已提交
807
    def _save_model(self, test_program_info, strategy, strategy_idx):
C
ceci3 已提交
808 809 810
        test_program = test_program_info.program._program if isinstance(
            test_program_info.program,
            paddle.static.CompiledProgram) else test_program_info.program
C
ceci3 已提交
811

W
whs 已提交
812
        if os.path.exists(os.path.join(self.tmp_dir, 'best_model.pdparams')):
813
            paddle.static.load(test_program,
W
whs 已提交
814 815 816 817
                               os.path.join(self.tmp_dir, 'best_model'))
            os.remove(os.path.join(self.tmp_dir, 'best_model.pdmodel'))
            os.remove(os.path.join(self.tmp_dir, 'best_model.pdopt'))
            os.remove(os.path.join(self.tmp_dir, 'best_model.pdparams'))
C
ceci3 已提交
818

W
whs 已提交
819
        model_dir = os.path.join(self.tmp_dir,
C
ceci3 已提交
820 821 822
                                 'strategy_{}'.format(str(strategy_idx + 1)))
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)
823 824 825 826 827 828

        if 'qat' in strategy:
            test_program = convert(
                test_program,
                self._places,
                self._quant_config,
829 830
                scope=paddle.static.global_scope(),
                save_clip_ranges_path=self.final_dir)
831

C
ceci3 已提交
832 833 834 835 836
        feed_vars = [
            test_program.global_block().var(name)
            for name in test_program_info.feed_target_names
        ]

837 838 839 840 841 842 843 844
        model_name = None
        if self.model_filename is None:
            model_name = "model"
        elif self.model_filename.endswith(".pdmodel"):
            model_name = self.model_filename.rsplit(".", 1)[0]
        else:
            model_name = self.model_filename

C
ceci3 已提交
845
        path_prefix = os.path.join(model_dir, model_name)
C
ceci3 已提交
846
        paddle.static.save_inference_model(
C
ceci3 已提交
847
            path_prefix=path_prefix,
C
ceci3 已提交
848 849
            feed_vars=feed_vars,
            fetch_vars=test_program_info.fetch_targets,
C
ceci3 已提交
850
            executor=self._exe,
C
ceci3 已提交
851 852
            program=test_program,
            clip_extra=False)
G
Guanghua Yu 已提交
853 854 855 856 857 858 859 860

    def export_onnx(self,
                    model_name='quant_model.onnx',
                    deploy_backend='tensorrt'):
        infer_model_path = os.path.join(self.final_dir, self.model_filename)
        assert os.path.exists(
            infer_model_path), 'Not found {}, please check it.'.format(
                infer_model_path)
G
Guanghua Yu 已提交
861 862 863
        onnx_save_path = os.path.join(self.final_dir, 'ONNX')
        if not os.path.exists(onnx_save_path):
            os.makedirs(onnx_save_path)
G
Guanghua Yu 已提交
864 865 866 867
        export_onnx(
            self.final_dir,
            model_filename=self.model_filename,
            params_filename=self.params_filename,
G
Guanghua Yu 已提交
868
            save_file_path=os.path.join(onnx_save_path, model_name),
G
Guanghua Yu 已提交
869
            deploy_backend=deploy_backend)