compressor.py 41.2 KB
Newer Older
C
ceci3 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging
import os
import sys
W
whs 已提交
18
import copy
C
ceci3 已提交
19
import numpy as np
C
ceci3 已提交
20
import copy
C
ceci3 已提交
21
import inspect
C
ceci3 已提交
22
import shutil
W
whs 已提交
23
from time import gmtime, strftime
24
import platform
C
ceci3 已提交
25
import paddle
W
whs 已提交
26
import itertools
C
ceci3 已提交
27
import paddle.distributed.fleet as fleet
28
from ..quant.quanter import convert, quant_post
C
ceci3 已提交
29 30
from ..common.recover_program import recover_inference_program
from ..common import get_logger
C
ceci3 已提交
31 32
from ..common.patterns import get_patterns
from ..analysis import TableLatencyPredictor
Z
zhouzj 已提交
33
from .create_compressed_program import build_distill_program, build_quant_program, build_prune_program, remove_unused_var_nodes
C
ceci3 已提交
34
from .strategy_config import TrainConfig, ProgramInfo, merge_config
35
from .auto_strategy import prepare_strategy, get_final_quant_config, create_strategy_config, create_train_config
W
whs 已提交
36
from .config_helpers import load_config, extract_strategy_config, extract_train_config
37
from .utils.predict import with_variable_shape
38
from .utils import get_feed_vars, wrap_dataloader, load_inference_model, get_model_dir
C
ceci3 已提交
39 40 41

_logger = get_logger(__name__, level=logging.INFO)

C
ceci3 已提交
42 43
try:
    if platform.system().lower() == 'linux':
C
ceci3 已提交
44
        from ..quant import post_quant_hpo
C
ceci3 已提交
45 46 47
except Exception as e:
    _logger.warning(e)

C
ceci3 已提交
48 49 50 51 52

class AutoCompression:
    def __init__(self,
                 model_dir,
                 train_dataloader,
53 54 55
                 model_filename=None,
                 params_filename=None,
                 save_dir='./output',
W
whs 已提交
56
                 config=None,
57
                 input_shapes=None,
C
ceci3 已提交
58
                 target_speedup=None,
59
                 eval_callback=None,
C
ceci3 已提交
60 61 62 63 64 65 66
                 eval_dataloader=None,
                 deploy_hardware='gpu'):
        """
        Compress inference model automatically.

        Args:
            model_dir(str): The path of inference model that will be compressed, and
C
ceci3 已提交
67
                the model and params that saved by ``paddle.static.save_inference_model``
C
ceci3 已提交
68
                are under the path.
G
Guanghua Yu 已提交
69
            train_dataloader(Python Generator, Paddle.io.DataLoader): The
70 71
                Generator or Dataloader provides train data, and it could
                return a batch every time.
C
ceci3 已提交
72 73
            model_filename(str):  The name of model file. 
            params_filename(str): The name of params file.
W
whs 已提交
74 75
            save_dir(str): The path to save compressed model. The models in this directory will be overwrited
                after calling 'compress()' function.
76 77 78 79 80 81 82
            input_shapes(dict|tuple|list): It is used when the model has implicit dimensions except batch size. 
                If it is a dict, the key is the name of input and the value is the shape. 
                Given the input shape of input "X" is [-1, 3, -1, -1] which means the batch size, hight
                and width is variable. And the input_shapes can be set {"X": [-1, 3, 512, 512]}.
                If it is a list or tuple, the number of model's inputs should be 1. And the shape of input
                will be set input_shapes. None means keeping the original shapes, then
                the compression strategies searching may be skipped. Default: None.
C
ceci3 已提交
83 84 85 86 87 88 89 90 91 92 93
            train_config(dict, optional): The train config in the compression process, the key can 
                reference `<https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L103>`_ . 
                Only one strategy(quant_post with hyperparameter optimization) can set train_config 
                to None. Default: None. 
            strategy_config(dict, list(dict), optional): The strategy config. You can set single config to get multi-strategy config, such as
                1. set ``Quantization`` and ``Distillation`` to get quant_aware and distillation compress config.
                    The Quantization config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L24`_ .
                    The Distillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L39`_ .
                2. set ``Quantization`` and ``HyperParameterOptimization`` to get quant_post and hyperparameter optimization compress config.
                    The Quantization config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L24`_ .
                    The HyperParameterOptimization config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L73`_ .
C
ceci3 已提交
94 95
                3. set ``ChannelPrune`` and ``Distillation`` to get channel prune and distillation compress config.
                    The ChannelPrune config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L82`_ .
C
ceci3 已提交
96
                    The Distillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L39`_ .
C
ceci3 已提交
97 98 99 100 101 102 103
                4. set ``ASPPrune`` and ``Distillation`` to get asp prune and distillation compress config.
                    The ASPPrune config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L82`_ .
                    The Distillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L39`_ .
                5. set ``TransformerPrune`` and ``Distillation`` to get transformer prune and distillation compress config.
                    The TransformerPrune config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L82`_ .
                    The Distillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L39`_ .
                6. set ``UnstructurePrune`` and ``Distillation`` to get unstructureprune and distillation compress config.
C
ceci3 已提交
104 105
                    The UnstructurePrune config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L91`_ .
                    The Distillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L39`_ .
C
ceci3 已提交
106
                7. set ``Distillation`` to use one teacher modol to distillation student model.
C
ceci3 已提交
107
                    The Distillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L39`_ .
C
ceci3 已提交
108
                8. set ``MultiTeacherDistillation`` to use multi-teacher to distillation student model.
C
ceci3 已提交
109 110 111 112 113
                    The MultiTeacherDistillation config can reference `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/strategy_config.py#L56`_ .

                If set to None, will choose a strategy automatically. Default: None.
            target_speedup(float, optional): target speedup ratio by the way of auto compress. Default: None.
            eval_callback(function, optional): eval function, define by yourself to return the metric of the inference program, can be used to judge the metric of compressed model. The documents of how to write eval function is `https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/api_cn/static/auto-compression/custom_function.rst`_ . ``eval_callback`` and ``eval_dataloader`` cannot be None at the same time. Dafault: None.
114 115 116
            eval_dataloader(paddle.io.Dataloader, optional):  The Generator or Dataloader provides eval data, and it could
                 return a batch every time. If eval_dataloader is None, will take first 5000 sample from train_dataloader 
                 as eval_dataloader, and the metric of eval_dataloader for reference only. Dafault: None.
C
ceci3 已提交
117 118
            deploy_hardware(str, optional): The hardware you want to deploy. Default: 'gpu'.
        """
G
Guanghua Yu 已提交
119
        self.model_dir = model_dir.rstrip('/')
120 121
        self.updated_model_dir, self.model_filename, self.params_filename = get_model_dir(
            model_dir, model_filename, params_filename)
C
ceci3 已提交
122

C
ceci3 已提交
123
        self.final_dir = save_dir
W
whs 已提交
124 125
        if not os.path.exists(self.final_dir):
            os.makedirs(self.final_dir)
W
whs 已提交
126 127 128 129

        # load config
        if isinstance(config, str):
            config = load_config(config)
C
ceci3 已提交
130 131 132
            self.train_config = extract_train_config(config)
        elif isinstance(config, dict):
            if 'TrainConfig' in config:
C
ceci3 已提交
133
                self.train_config = TrainConfig(**config.pop('TrainConfig'))
C
ceci3 已提交
134 135
            else:
                self.train_config = None
C
ceci3 已提交
136 137
        else:
            self.train_config = None
C
ceci3 已提交
138
        self.strategy_config = extract_strategy_config(config)
W
whs 已提交
139 140

        # prepare dataloader
G
Guanghua Yu 已提交
141
        self.feed_vars = get_feed_vars(self.model_dir, model_filename,
W
whs 已提交
142 143 144 145
                                       params_filename)
        self.train_dataloader = wrap_dataloader(train_dataloader,
                                                self.feed_vars)
        self.eval_dataloader = wrap_dataloader(eval_dataloader, self.feed_vars)
C
ceci3 已提交
146 147 148
        if self.eval_dataloader is None:
            self.eval_dataloader = self._get_eval_dataloader(
                self.train_dataloader)
W
whs 已提交
149

C
ceci3 已提交
150 151
        self.target_speedup = target_speedup
        self.eval_function = eval_callback
152
        self.deploy_hardware = deploy_hardware
153

C
ceci3 已提交
154
        paddle.enable_static()
C
ceci3 已提交
155
        self._exe, self._places = self._prepare_envs()
156
        self.model_type = self._get_model_type()
C
ceci3 已提交
157

158
        if self.train_config is not None and self.train_config.use_fleet:
C
ceci3 已提交
159 160
            fleet.init(is_collective=True)

161 162 163 164 165 166 167
        if with_variable_shape(
                self.model_dir,
                model_filename=model_filename,
                params_filename=params_filename) and input_shapes is not None:

            infer_shape_model = self.create_tmp_dir(
                self.final_dir, prefix="infer_shape_model_")
G
Guanghua Yu 已提交
168
            self._infer_shape(self.model_dir, self.model_filename,
169 170 171 172 173
                              self.params_filename, input_shapes,
                              infer_shape_model)
            self.model_dir = infer_shape_model
            self.model_filename = "infered_shape.pdmodel"
            self.params_filename = "infered_shape.pdiparams"
W
whs 已提交
174

C
ceci3 已提交
175 176
        if self.strategy_config is None:
            strategy_config = prepare_strategy(
C
ceci3 已提交
177 178 179
                self._exe, self._places, self.model_dir, self.model_filename,
                self.params_filename, self.target_speedup, self.deploy_hardware,
                self.model_type)
C
ceci3 已提交
180 181 182 183 184 185 186 187 188 189
            self.strategy_config = strategy_config
        elif isinstance(self.strategy_config, dict):
            self.strategy_config = [self.strategy_config]
        elif isinstance(self.strategy_config, str):
            strategy_config = create_strategy_config(self.strategy_config,
                                                     self.model_type)

        self._strategy, self._config = self._prepare_strategy(
            self.strategy_config)

C
ceci3 已提交
190
        self.train_config = self._get_final_train_config(
191 192
            self.train_config, self._strategy, self.model_type)
        _logger.info(f"Selected strategies: {self._strategy}")
C
ceci3 已提交
193 194 195

    def _get_final_train_config(self, train_config, strategy_config,
                                model_type):
196
        # If train_config is None, set default train_config
C
ceci3 已提交
197 198 199 200 201
        if train_config is None:
            train_config = create_train_config(strategy_config, model_type)

        train_configs = [train_config]
        for idx in range(1, len(self._strategy)):
C
ceci3 已提交
202 203 204
            if 'qat' in self._strategy[idx] or 'ptq' in self._strategy[idx]:
                ### If compress strategy more than one, the TrainConfig in the yaml only used in prune.
                ### The TrainConfig for quantization is extrapolate from above.
C
ceci3 已提交
205 206
                tmp_train_config = copy.deepcopy(train_config.__dict__)
                ### the epoch, train_iter, learning rate of quant is 10% of the prune compress
C
ceci3 已提交
207 208 209
                if self.model_type != 'transformer':
                    tmp_train_config['epochs'] = max(
                        int(train_config.epochs * 0.1), 1)
C
ceci3 已提交
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
                if train_config.train_iter is not None:
                    tmp_train_config['train_iter'] = int(
                        train_config.train_iter * 0.1)
                if isinstance(train_config.learning_rate, float):
                    tmp_train_config[
                        'learning_rate'] = train_config.learning_rate * 0.1
                else:
                    if 'learning_rate' in train_config.learning_rate:
                        tmp_train_config['learning_rate'][
                            'learning_rate'] = train_config.learning_rate[
                                'learning_rate'] * 0.1
                    else:  ### learning rate decay is PiecewiseDecay
                        tmp_train_config['learning_rate']['values'] = list(
                            map(lambda x: x * 0.1, train_config.learning_rate[
                                'values']))
                train_cfg = TrainConfig(**tmp_train_config)
            else:
                tmp_train_config = copy.deepcopy(train_config.__dict__)
                train_cfg = TrainConfig(**tmp_train_config)

            train_configs.append(train_cfg)
        return train_configs
232

233 234 235 236 237 238 239
    def _infer_shape(self, model_dir, model_filename, params_filename,
                     input_shapes, save_path):
        assert type(input_shapes) in [
            dict, list, tuple
        ], f'Type of input_shapes should be in [dict, tuple or list] but got {type(input_shapes)}.'
        paddle.enable_static()
        exe = paddle.static.Executor(paddle.CPUPlace())
C
ceci3 已提交
240
        [inference_program, feed_target_names,
241 242
         fetch_targets] = load_inference_model(model_dir, exe, model_filename,
                                               params_filename)
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272

        if type(input_shapes) in [list, tuple]:
            assert len(
                feed_target_names
            ) == 1, f"The number of model's inputs should be 1 but got {feed_target_names}."
            input_shapes = {feed_target_names[0]: input_shapes}

        feed_vars = []
        for var_ in inference_program.list_vars():
            if var_.name in feed_target_names:
                feed_vars.append(var_)
                var_.desc.set_shape(input_shapes[var_.name])

        for block in inference_program.blocks:
            for op in block.ops:
                if op.type not in ["feed", "fetch"]:
                    op.desc.infer_shape(block.desc)

        save_path = os.path.join(save_path, "infered_shape")
        os.makedirs(save_path)
        paddle.static.save_inference_model(
            save_path, feed_vars, fetch_targets, exe, program=inference_program)
        _logger.info(f"Saved model infered shape to {save_path}")

    @property
    def deploy_hardware(self):
        return self._deploy_hardware

    @deploy_hardware.setter
    def deploy_hardware(self, value):
273 274 275 276
        supported_hardware = TableLatencyPredictor.hardware_list + [
            'gpu',  # nvidia gpu
            "cpu",  # intel cpu
        ]
277 278 279
        if value is not None:
            # Fail-fast when deploy hardware is set explicitly
            assert (
280 281
                value in supported_hardware
            ), f"Hardware should be in supported list {supported_hardware} but got {value}. Or you can set deploy_hardware None."
282 283
        self._deploy_hardware = value

284 285 286 287 288 289 290 291 292 293 294
    def _get_eval_dataloader(self, train_dataloader):
        def _gen():
            len_loader = len(list(train_dataloader()))
            ### max eval_dataloader is 5000 if use train_dataloader as eval_dataloader
            slice_len = min(5000, len_loader)
            ret = list(itertools.islice(train_dataloader(), slice_len))
            for i in ret:
                yield i

        return _gen

C
ceci3 已提交
295 296
    def _prepare_envs(self):
        devices = paddle.device.get_device().split(':')[0]
C
ceci3 已提交
297
        places = paddle.device._convert_to_place(devices)
W
whs 已提交
298
        _logger.info(f"devices: {devices}")
C
ceci3 已提交
299 300 301
        exe = paddle.static.Executor(places)
        return exe, places

302 303 304 305 306 307
    def _get_model_type(self):
        [inference_program, _, _] = (load_inference_model(
            self.model_dir,
            model_filename=self.model_filename,
            params_filename=self.params_filename,
            executor=self._exe))
C
ceci3 已提交
308
        _, _, model_type = get_patterns(inference_program)
C
ceci3 已提交
309
        if self.model_filename is None:
310
            opt_model_filename = '__opt_model__'
C
ceci3 已提交
311
        else:
312
            opt_model_filename = 'opt_' + self.model_filename
C
ceci3 已提交
313 314
        program_bytes = inference_program._remove_training_info(
            clip_extra=False).desc.serialize_to_string()
315 316 317
        with open(
                os.path.join(self.updated_model_dir, opt_model_filename),
                "wb") as f:
C
ceci3 已提交
318 319
            f.write(program_bytes)
        shutil.move(
320 321
            os.path.join(self.updated_model_dir, opt_model_filename),
            os.path.join(self.updated_model_dir, self.model_filename))
W
whs 已提交
322
        _logger.info(f"Detect model type: {model_type}")
C
ceci3 已提交
323 324 325 326 327 328 329 330 331 332 333
        return model_type

    def _prepare_strategy(self, strategy_config):
        if not isinstance(strategy_config, list):
            strategy_config = list(list(strategy_config))

        strategy = []
        config = []
        for strategy_c in strategy_config:
            quant_config = strategy_c.get("Quantization", None)
            hpo_config = strategy_c.get("HyperParameterOptimization", None)
C
ceci3 已提交
334 335 336
            prune_config = strategy_c.get("ChannelPrune", None)
            asp_config = strategy_c.get("ASPPrune", None)
            transformer_prune_config = strategy_c.get("TransformerPrune", None)
C
ceci3 已提交
337 338 339
            unstructure_prune_config = strategy_c.get("UnstructurePrune", None)
            single_teacher_distill_config = strategy_c.get("Distillation", None)
            if single_teacher_distill_config is not None and single_teacher_distill_config.teacher_model_dir is None:
C
ceci3 已提交
340 341 342
                single_teacher_distill_config.teacher_model_dir = self.model_dir
                single_teacher_distill_config.teacher_model_filename = self.model_filename
                single_teacher_distill_config.teacher_params_filename = self.params_filename
C
ceci3 已提交
343 344 345 346 347 348 349 350 351 352

            multi_teacher_distill_config = strategy_c.get(
                "MultiTeacherDistillation", None)

            assert (single_teacher_distill_config is None) or (multi_teacher_distill_config is None), \
                "Distillation and MultiTeacherDistillation cannot be set at the same time."
            self._distill_config = single_teacher_distill_config if \
                   single_teacher_distill_config is not None else \
                   multi_teacher_distill_config

C
ceci3 已提交
353
            only_distillation = True
C
ceci3 已提交
354

C
ceci3 已提交
355 356 357
            ### case1: prune_config & distill config
            if prune_config is not None and self._distill_config is not None:
                only_distillation = False
C
ceci3 已提交
358
                strategy.append('channel_prune_dis')
C
ceci3 已提交
359 360
                config.append(merge_config(prune_config, self._distill_config))

C
ceci3 已提交
361 362 363
            ### case2: asp_config & distill config
            if asp_config is not None and self._distill_config is not None:
                only_distillation = False
C
ceci3 已提交
364 365 366
                strategy.append('asp_prune_dis')
                config.append(merge_config(asp_config, self._distill_config))

C
ceci3 已提交
367 368 369
            ### case3: transformer_prune_config & distill config
            if transformer_prune_config is not None and self._distill_config is not None:
                only_distillation = False
C
ceci3 已提交
370 371 372 373 374
                strategy.append('transformer_prune_dis')
                config.append(
                    merge_config(transformer_prune_config,
                                 self._distill_config))

C
ceci3 已提交
375 376 377
            ### case4: unstructure_config & distill config
            if unstructure_prune_config is not None and self._distill_config is not None:
                only_distillation = False
C
ceci3 已提交
378 379 380 381 382
                strategy.append('unstructure_prune_dis')
                config.append(
                    merge_config(unstructure_prune_config,
                                 self._distill_config))

C
ceci3 已提交
383 384 385 386 387 388 389 390 391 392 393 394
            ### case5: quant_config & hpo_config ==> PTQ & HPO
            if quant_config is not None and hpo_config is not None:
                only_distillation = False
                strategy.append('ptq_hpo')
                config.append(merge_config(quant_config, hpo_config))

            ### case6: quant_config & distill config ==> QAT & Distill
            if quant_config is not None and self._distill_config is not None:
                only_distillation = False
                strategy.append('qat_dis')
                config.append(merge_config(quant_config, self._distill_config))

C
ceci3 已提交
395
            ### case7: distill_config
C
ceci3 已提交
396
            if only_distillation == True and self._distill_config is not None:
C
ceci3 已提交
397 398 399 400 401 402
                if single_teacher_distill_config is not None:
                    strategy.append('single_teacher_dis')
                    config.append(single_teacher_distill_config)
                else:
                    strategy.append('multi_teacher_dis')
                    config.append(multi_teacher_distill_config)
C
ceci3 已提交
403

C
ceci3 已提交
404 405 406 407 408 409 410 411 412 413 414 415
        ### NOTE: keep quantation in the last step
        idx = -1
        if 'qat_dis' in strategy and strategy.index('qat_dis') != (
                len(strategy) - 1):
            idx = strategy.index('qat_dis')
        elif 'ptq_hpo' in strategy and strategy.index('ptq_hpo') != (
                len(strategy) - 1):
            idx = strategy.index('ptq_hpo')

        if idx != -1:
            strategy = strategy[:idx] + strategy[idx + 1:] + [strategy[idx]]
            config = config[:idx] + config[idx + 1:] + [config[idx]]
C
ceci3 已提交
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433

        return strategy, config

    def _prepare_fleet_strategy(train_config):
        build_strategy = paddle.static.BuildStrategy()
        exec_strategy = paddle.static.ExecutionStrategy()

        strategy = fleet.DistributedStrategy()
        strategy.build_strategy = build_strategy
        if train_config.recompute_config is not None:
            strategy.recompute = True
            strategy.recompute_configs = { ** train_config.recompute_config}
        if train_config.sharding_config is not None:
            strategy.sharding = True
            strategy.sharding_configs = { ** train_config.sharding_config}
        if train_config.amp_config is not None:
            strategy.amp = True
            strategy.amp_configs = { ** train_config.amp_config}
434 435
        if train_config.asp_config is not None:
            strategy.asp = True
C
ceci3 已提交
436 437
        return strategy

C
ceci3 已提交
438
    def _prepare_program(self, program, feed_target_names, fetch_targets,
C
ceci3 已提交
439 440
                         patterns, default_distill_node_pair, strategy, config,
                         train_config):
C
ceci3 已提交
441 442 443 444 445
        train_program = recover_inference_program(program)
        startup_program = paddle.static.Program()
        train_program_info = ProgramInfo(startup_program, train_program,
                                         feed_target_names, fetch_targets)

C
ceci3 已提交
446
        config_dict = config.__dict__
447 448 449
        if "prune_strategy" in config_dict and config_dict[
                "prune_strategy"] == "gmp" and config_dict[
                    'gmp_config'] is None:
Z
zhouzj 已提交
450 451 452
            _logger.info(
                "Calculating the iterations per epoch……(It will take some time)")
            # NOTE:XXX: This way of calculating the iters needs to be improved.
C
ceci3 已提交
453
            if train_config.epochs:
G
Guanghua Yu 已提交
454
                iters_per_epoch = len(list(self.train_dataloader()))
C
ceci3 已提交
455 456 457
                total_iters = train_config.epochs * iters_per_epoch
            elif train_config.train_iter:
                total_iters = train_config.train_iter
G
Guanghua Yu 已提交
458 459 460
            else:
                raise RuntimeError(
                    'train_config must has `epochs` or `train_iter` field.')
Z
zhouzj 已提交
461 462
            config_dict['gmp_config'] = {
                'stable_iterations': 0,
C
ceci3 已提交
463 464
                'pruning_iterations': max(0.45 * total_iters, 30),
                'tunning_iterations': max(0.45 * total_iters, 30),
Z
zhouzj 已提交
465
                'resume_iteration': -1,
C
ceci3 已提交
466
                'pruning_steps': 100 if (0.45 * total_iters) > 1000 else 1,
Z
zhouzj 已提交
467 468
                'initial_ratio': 0.15,
            }
C
ceci3 已提交
469 470
        ### add prune program
        self._pruner = None
C
ceci3 已提交
471
        if 'prune' in strategy:
C
ceci3 已提交
472 473
            self._pruner, train_program_info = build_prune_program(
                self._exe, self._places, config_dict, train_program_info,
C
ceci3 已提交
474
                strategy, patterns, self.eval_dataloader)
C
ceci3 已提交
475

C
ceci3 已提交
476 477
        if train_config.use_fleet:
            dist_strategy = _prepare_fleet_strategy(train_config)
C
ceci3 已提交
478 479 480 481
        else:
            dist_strategy = None

        ### add distill program
C
ceci3 已提交
482
        if 'dis' in strategy:
C
ceci3 已提交
483 484 485 486
            train_program_info, test_program_info = build_distill_program(
                self._exe,
                self._places,
                config_dict,
C
ceci3 已提交
487
                train_config.__dict__,
C
ceci3 已提交
488 489
                train_program_info,
                pruner=self._pruner,
C
ceci3 已提交
490 491
                dist_strategy=dist_strategy,
                default_distill_node_pair=default_distill_node_pair)
C
ceci3 已提交
492 493 494

        self._quant_config = None
        ### add quant_aware program, quant always is last step
C
ceci3 已提交
495
        if 'qat' in strategy:
C
ceci3 已提交
496 497 498
            train_program_info, test_program_info, self._quant_config = build_quant_program(
                self._exe, self._places, config_dict, train_program_info,
                test_program_info)
C
ceci3 已提交
499
        if train_config.sparse_model:
Z
zhouzj 已提交
500
            from ..prune.unstructured_pruner import UnstructuredPruner
Z
zhouzj 已提交
501
            # NOTE: The initialization parameter of this pruner doesn't work, it is only used to call the 'set_static_masks' function
Z
zhouzj 已提交
502 503 504 505 506 507
            self._pruner = UnstructuredPruner(
                train_program_info.program,
                mode='ratio',
                ratio=0.75,
                prune_params_type='conv1x1_only',
                place=self._places)
Z
zhouzj 已提交
508
            self._pruner.set_static_masks()  # Fixed model sparsity
C
ceci3 已提交
509 510 511

        self._exe.run(train_program_info.startup_program)

C
ceci3 已提交
512 513 514 515
        if (not train_config.use_fleet) and train_config.amp_config is not None:
            if hasattr(
                    train_config.amp_config,
                    'use_pure_fp16') and train_config.amp_config.use_pure_fp16:
C
ceci3 已提交
516 517 518
                train_program_info.optimizer.amp_init(
                    self._places, scope=paddle.static.global_scope())

C
ceci3 已提交
519
        if 'asp' in strategy:
C
ceci3 已提交
520 521 522
            ### prune weight in scope
            self._pruner.prune_model(train_program_info.program)

C
ceci3 已提交
523
        if not train_config.use_fleet:
C
ceci3 已提交
524
            train_program_info = self._compiled_program(train_program_info,
C
ceci3 已提交
525
                                                        strategy)
C
ceci3 已提交
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
            test_program_info = self._compiled_program(test_program_info,
                                                       self._strategy)
        return train_program_info, test_program_info

    def _compiled_program(self, program_info, strategy):
        compiled_prog = paddle.static.CompiledProgram(program_info.program)
        build_strategy = paddle.static.BuildStrategy()
        exec_strategy = paddle.static.ExecutionStrategy()
        if 'qat' in strategy:
            build_strategy.memory_optimize = False
            build_strategy.enable_inplace = False
            build_strategy.fuse_all_reduce_ops = False
            build_strategy.sync_batch_norm = False

        compiled_prog = compiled_prog.with_data_parallel(
            loss_name=program_info.fetch_targets[0].name,
            build_strategy=build_strategy,
            exec_strategy=exec_strategy)
        program_info.program = compiled_prog
        return program_info

547
    def create_tmp_dir(self, base_dir, prefix="tmp"):
W
whs 已提交
548
        # create a new temp directory in final dir
549
        s_datetime = strftime("%Y_%m_%d_%H_%M_%S", gmtime())
550 551 552 553 554
        tmp_base_name = "_".join([prefix, str(os.getpid()), s_datetime])
        tmp_dir = os.path.join(base_dir, tmp_base_name)
        if not os.path.exists(tmp_dir):
            os.makedirs(tmp_dir)
        return tmp_dir
W
whs 已提交
555

556
    def compress(self):
557
        assert len(self._strategy) > 0
558
        self.tmp_dir = self.create_tmp_dir(self.final_dir)
559 560 561 562
        strategy = None
        config = None
        train_config = None
        strategy_idx = None
C
ceci3 已提交
563
        for strategy_idx, (
C
ceci3 已提交
564 565 566 567
                strategy, config, train_config
        ) in enumerate(zip(self._strategy, self._config, self.train_config)):
            self.single_strategy_compress(strategy, config, strategy_idx,
                                          train_config)
C
ceci3 已提交
568 569 570

        if strategy == 'ptq_hpo' and config.max_quant_count == 1 and platform.system(
        ).lower() == 'linux':
C
ceci3 已提交
571
            ptq_loss = post_quant_hpo.g_min_emd_loss
C
ceci3 已提交
572

C
ceci3 已提交
573 574
            final_quant_config = get_final_quant_config(ptq_loss,
                                                        self.model_type)
C
ceci3 已提交
575 576 577 578
            if final_quant_config is not None:
                quant_strategy, quant_config = self._prepare_strategy(
                    final_quant_config)
                self.single_strategy_compress(quant_strategy[0],
C
ceci3 已提交
579 580
                                              quant_config[0], strategy_idx,
                                              train_config)
581
        tmp_model_path = os.path.join(
W
whs 已提交
582
            self.tmp_dir, 'strategy_{}'.format(str(strategy_idx + 1)))
C
ceci3 已提交
583
        final_model_path = os.path.join(self.final_dir)
C
ceci3 已提交
584
        if paddle.distributed.get_rank() == 0:
W
whs 已提交
585 586 587 588
            for _file in os.listdir(tmp_model_path):
                _file_path = os.path.join(tmp_model_path, _file)
                if os.path.isfile(_file_path):
                    shutil.copy(_file_path, final_model_path)
W
whs 已提交
589
            shutil.rmtree(self.tmp_dir)
C
ceci3 已提交
590
            _logger.info(
G
Guanghua Yu 已提交
591
                "==> The ACT compression has been completed and the final model is saved in `{}`".
C
ceci3 已提交
592
                format(final_model_path))
C
ceci3 已提交
593

C
ceci3 已提交
594 595
    def single_strategy_compress(self, strategy, config, strategy_idx,
                                 train_config):
596 597
        # start compress, including train/eval model
        # TODO: add the emd loss of evaluation model.
G
Guanghua Yu 已提交
598 599 600 601 602 603 604
        if self.updated_model_dir != self.model_dir:
            # If model is ONNX, convert it to inference model firstly.
            load_inference_model(
                self.model_dir,
                model_filename=self.model_filename,
                params_filename=self.params_filename,
                executor=self._exe)
605 606 607
        if strategy == 'quant_post':
            quant_post(
                self._exe,
608
                model_dir=self.updated_model_dir,
609
                quantize_model_path=os.path.join(
W
whs 已提交
610
                    self.tmp_dir, 'strategy_{}'.format(str(strategy_idx + 1))),
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
                data_loader=self.train_dataloader,
                model_filename=self.model_filename,
                params_filename=self.params_filename,
                save_model_filename=self.model_filename,
                save_params_filename=self.params_filename,
                batch_size=1,
                batch_nums=config.batch_num,
                algo=config.ptq_algo,
                round_type='round',
                bias_correct=config.bias_correct,
                hist_percent=config.hist_percent,
                quantizable_op_type=config.quantize_op_types,
                is_full_quantize=config.is_full_quantize,
                weight_bits=config.weight_bits,
                activation_bits=config.activation_bits,
                activation_quantize_type='range_abs_max',
                weight_quantize_type=config.weight_quantize_type,
                onnx_format=False)

        elif strategy == 'ptq_hpo':
631 632 633
            if platform.system().lower() != 'linux':
                raise NotImplementedError(
                    "post-quant-hpo is not support in system other than linux")
G
Guanghua Yu 已提交
634 635 636 637 638 639 640
            if self.updated_model_dir != self.model_dir:
                # If model is ONNX, convert it to inference model firstly.
                load_inference_model(
                    self.model_dir,
                    model_filename=self.model_filename,
                    params_filename=self.params_filename,
                    executor=self._exe)
C
ceci3 已提交
641
            post_quant_hpo.quant_post_hpo(
C
ceci3 已提交
642 643
                self._exe,
                self._places,
644
                model_dir=self.updated_model_dir,
C
ceci3 已提交
645
                quantize_model_path=os.path.join(
W
whs 已提交
646
                    self.tmp_dir, 'strategy_{}'.format(str(strategy_idx + 1))),
C
ceci3 已提交
647 648 649 650 651 652 653
                train_dataloader=self.train_dataloader,
                eval_dataloader=self.eval_dataloader,
                eval_function=self.eval_function,
                model_filename=self.model_filename,
                params_filename=self.params_filename,
                save_model_filename=self.model_filename,
                save_params_filename=self.params_filename,
C
ceci3 已提交
654 655 656 657 658 659 660 661
                quantizable_op_type=config.quantize_op_types,
                weight_bits=config.weight_bits,
                activation_bits=config.activation_bits,
                weight_quantize_type=config.weight_quantize_type,
                is_full_quantize=config.is_full_quantize,
                algo=config.ptq_algo,
                bias_correct=config.bias_correct,
                hist_percent=config.hist_percent,
C
ceci3 已提交
662
                batch_size=[1],
C
ceci3 已提交
663 664
                batch_num=config.batch_num,
                runcount_limit=config.max_quant_count)
C
ceci3 已提交
665 666

        else:
C
ceci3 已提交
667 668 669 670 671 672
            assert 'dis' in strategy, "Only support optimizer compressed model by distillation loss."

            if strategy_idx == 0:
                model_dir = self.model_dir
            else:
                model_dir = os.path.join(
W
whs 已提交
673
                    self.tmp_dir, 'strategy_{}'.format(str(strategy_idx)))
C
ceci3 已提交
674

C
ceci3 已提交
675 676
            [inference_program, feed_target_names, fetch_targets]= load_inference_model( \
                model_dir, \
C
ceci3 已提交
677 678 679 680
                model_filename=self.model_filename, params_filename=self.params_filename,
                executor=self._exe)

            ### used to check whether the dataloader is right
C
ceci3 已提交
681
            self.metric_before_compressed = None
C
ceci3 已提交
682
            if self.eval_function is not None and train_config.origin_metric is not None:
C
ceci3 已提交
683
                _logger.info("start to test metric before compress")
C
ceci3 已提交
684 685 686 687
                metric = self.eval_function(self._exe, inference_program,
                                            feed_target_names, fetch_targets)
                _logger.info("metric of compressed model is: {}".format(metric))
                buf = 0.05
C
ceci3 已提交
688 689
                if metric < (float(train_config.origin_metric) - buf) or \
                        metric > (float(train_config.origin_metric) + buf):
C
ceci3 已提交
690 691 692 693
                    raise RuntimeError("target metric of pretrained model is {}, \
                          but now is {}, Please check the format of evaluation dataset \
                          or check the origin_metric in train_config"
                                                                     .format(\
C
ceci3 已提交
694
                          train_config.origin_metric, metric))
C
ceci3 已提交
695 696 697 698
                self.metric_before_compressed = metric

            patterns, default_distill_node_pair, _ = get_patterns(
                inference_program)
C
ceci3 已提交
699 700

            train_program_info, test_program_info = self._prepare_program(
C
ceci3 已提交
701
                inference_program, feed_target_names, fetch_targets, patterns,
C
ceci3 已提交
702
                default_distill_node_pair, strategy, config, train_config)
Z
zhouzj 已提交
703 704 705
            if 'unstructure' in self._strategy:
                test_program_info.program._program = remove_unused_var_nodes(
                    test_program_info.program._program)
C
ceci3 已提交
706 707
            test_program_info = self._start_train(
                train_program_info, test_program_info, strategy, train_config)
C
ceci3 已提交
708
            self._save_model(test_program_info, strategy, strategy_idx)
C
ceci3 已提交
709

C
ceci3 已提交
710 711
    def _start_train(self, train_program_info, test_program_info, strategy,
                     train_config):
C
ceci3 已提交
712
        best_metric = -1.0
C
ceci3 已提交
713
        total_epochs = train_config.epochs if train_config.epochs else 100
G
Guanghua Yu 已提交
714
        total_train_iter = 0
G
Guanghua Yu 已提交
715
        for epoch_id in range(total_epochs):
C
ceci3 已提交
716 717 718 719
            for batch_id, data in enumerate(self.train_dataloader()):
                np_probs_float, = self._exe.run(train_program_info.program, \
                    feed=data, \
                    fetch_list=train_program_info.fetch_targets)
720 721
                if not isinstance(train_program_info.learning_rate, float):
                    train_program_info.learning_rate.step()
C
ceci3 已提交
722
                if 'unstructure' in strategy:
C
ceci3 已提交
723 724
                    self._pruner.step()

C
ceci3 已提交
725
                if train_config.logging_iter is None:
C
ceci3 已提交
726 727
                    logging_iter = 10
                else:
C
ceci3 已提交
728
                    logging_iter = train_config.logging_iter
C
ceci3 已提交
729
                if batch_id % int(logging_iter) == 0:
G
Guanghua Yu 已提交
730 731 732 733 734
                    _logger.info(
                        "Total iter: {}, epoch: {}, batch: {}, loss: {}".format(
                            total_train_iter, epoch_id, batch_id,
                            np_probs_float))
                total_train_iter += 1
C
ceci3 已提交
735 736
                if total_train_iter % int(
                        train_config.eval_iter) == 0 and total_train_iter != 0:
C
ceci3 已提交
737 738 739
                    if self.eval_function is not None:

                        # GMP pruner step 3: update params before summrizing sparsity, saving model or evaluation. 
C
ceci3 已提交
740
                        if 'unstructure' in strategy:
C
ceci3 已提交
741 742 743 744 745 746 747 748 749 750
                            self._pruner.update_params()

                        metric = self.eval_function(
                            self._exe, test_program_info.program,
                            test_program_info.feed_target_names,
                            test_program_info.fetch_targets)

                        if metric > best_metric:
                            paddle.static.save(
                                program=test_program_info.program._program,
W
whs 已提交
751
                                model_path=os.path.join(self.tmp_dir,
C
ceci3 已提交
752
                                                        'best_model'))
C
ceci3 已提交
753
                            best_metric = metric
754 755 756
                            _logger.info(
                                "epoch: {} metric of compressed model is: {:.6f}, best metric of compressed model is {:.6f}".
                                format(epoch_id, metric, best_metric))
C
ceci3 已提交
757 758 759 760 761
                            if self.metric_before_compressed is not None and float(
                                    abs(best_metric -
                                        self.metric_before_compressed)
                            ) / self.metric_before_compressed <= 0.005:
                                break
762 763 764 765
                        else:
                            _logger.info(
                                "epoch: {} metric of compressed model is: {:.6f}, best metric of compressed model is {:.6f}".
                                format(epoch_id, metric, best_metric))
C
ceci3 已提交
766 767
                        if train_config.target_metric is not None:
                            if metric > float(train_config.target_metric):
C
ceci3 已提交
768
                                break
C
ceci3 已提交
769 770

                    else:
771 772 773
                        _logger.warning(
                            "Not set eval function, so unable to test accuracy performance."
                        )
C
ceci3 已提交
774
                if train_config.train_iter and total_train_iter >= train_config.train_iter:
775
                    epoch_id = total_epochs
G
Guanghua Yu 已提交
776
                    break
C
ceci3 已提交
777

C
ceci3 已提交
778
        if 'unstructure' in self._strategy or train_config.sparse_model:
Z
zhouzj 已提交
779 780
            self._pruner.update_params()

C
ceci3 已提交
781 782
        return test_program_info

C
ceci3 已提交
783
    def _save_model(self, test_program_info, strategy, strategy_idx):
C
ceci3 已提交
784 785 786
        test_program = test_program_info.program._program if isinstance(
            test_program_info.program,
            paddle.static.CompiledProgram) else test_program_info.program
C
ceci3 已提交
787

W
whs 已提交
788
        if os.path.exists(os.path.join(self.tmp_dir, 'best_model.pdparams')):
789
            paddle.static.load(test_program,
W
whs 已提交
790 791 792 793
                               os.path.join(self.tmp_dir, 'best_model'))
            os.remove(os.path.join(self.tmp_dir, 'best_model.pdmodel'))
            os.remove(os.path.join(self.tmp_dir, 'best_model.pdopt'))
            os.remove(os.path.join(self.tmp_dir, 'best_model.pdparams'))
C
ceci3 已提交
794

W
whs 已提交
795
        model_dir = os.path.join(self.tmp_dir,
C
ceci3 已提交
796 797 798
                                 'strategy_{}'.format(str(strategy_idx + 1)))
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)
799 800 801 802 803 804 805 806

        if 'qat' in strategy:
            test_program = convert(
                test_program,
                self._places,
                self._quant_config,
                scope=paddle.static.global_scope())

C
ceci3 已提交
807 808 809 810 811
        feed_vars = [
            test_program.global_block().var(name)
            for name in test_program_info.feed_target_names
        ]

812 813 814 815 816 817 818 819
        model_name = None
        if self.model_filename is None:
            model_name = "model"
        elif self.model_filename.endswith(".pdmodel"):
            model_name = self.model_filename.rsplit(".", 1)[0]
        else:
            model_name = self.model_filename

C
ceci3 已提交
820
        path_prefix = os.path.join(model_dir, model_name)
C
ceci3 已提交
821
        paddle.static.save_inference_model(
C
ceci3 已提交
822
            path_prefix=path_prefix,
C
ceci3 已提交
823 824
            feed_vars=feed_vars,
            fetch_vars=test_program_info.fetch_targets,
C
ceci3 已提交
825
            executor=self._exe,
C
ceci3 已提交
826
            program=test_program)