From 4ab7c7c079b9b4713c39e93e172e9e1475504bd6 Mon Sep 17 00:00:00 2001 From: Chang Xu Date: Thu, 23 Dec 2021 21:16:44 +0800 Subject: [PATCH] add ofa picodet demo (#4923) --- configs/slim/ofa/ofa_picodet_demo.yml | 85 +++++++++++++++++++++++++ ppdet/engine/trainer.py | 6 +- ppdet/slim/__init__.py | 9 +++ ppdet/slim/ofa.py | 89 +++++++++++++++++++++++++++ 4 files changed, 188 insertions(+), 1 deletion(-) create mode 100644 configs/slim/ofa/ofa_picodet_demo.yml create mode 100644 ppdet/slim/ofa.py diff --git a/configs/slim/ofa/ofa_picodet_demo.yml b/configs/slim/ofa/ofa_picodet_demo.yml new file mode 100644 index 000000000..b770918c8 --- /dev/null +++ b/configs/slim/ofa/ofa_picodet_demo.yml @@ -0,0 +1,85 @@ +weights: https://paddledet.bj.bcebos.com/models/pretrained/ESNet_x1_0_pretrained.pdparams +slim: OFA +OFA: + ofa_config: + task: expand_ratio + expand_ratio: [0.5, 1] + + skip_neck: True + skip_head: True + + RunConfig: + # Skip the output layer of each block by layer name + skip_layers: ['backbone._conv1._conv','backbone.2_1._conv_linear_1._conv', + 'backbone.2_1._conv_linear_2._conv', 'backbone.2_1._conv_dw_mv1._conv', + 'backbone.2_1._conv_pw_mv1._conv', 'backbone.2_2._conv_linear._conv', + 'backbone.2_3._conv_linear._conv', 'backbone.3_1._conv_linear_1._conv', + 'backbone.3_1._conv_linear_2._conv', 'backbone.3_1._conv_dw_mv1._conv', + 'backbone.3_1._conv_pw_mv1._conv', 'backbone.3_2._conv_linear._conv', + 'backbone.3_3._conv_linear._conv', 'backbone.3_4._conv_linear._conv', + 'backbone.3_5._conv_linear._conv', 'backbone.3_6._conv_linear._conv', + 'backbone.3_7._conv_linear._conv', 'backbone.4_1._conv_linear_1._conv', + 'backbone.4_1._conv_linear_2._conv', 'backbone.4_1._conv_dw_mv1._conv', + 'backbone.4_1._conv_pw_mv1._conv', 'backbone.4_2._conv_linear._conv', + 'backbone.4_3._conv_linear._conv'] + + # For block-wise search, make layers in each block in the same search sapce + same_search_space: [ + ['backbone.2_1._conv_dw_1._conv', 'backbone.2_1._conv_pw_2._conv', + 'backbone.2_1._conv_dw_2._conv', 'backbone.2_1._se.conv1', 'backbone.2_1._se.conv2'], + ['backbone.2_2._conv_pw._conv', 'backbone.2_2._conv_dw._conv', + 'backbone.2_2._se.conv1', 'backbone.2_2._se.conv2'], + ['backbone.2_3._conv_pw._conv', 'backbone.2_3._conv_dw._conv', + 'backbone.2_3._se.conv1', 'backbone.2_3._se.conv2'], + ['backbone.3_1._conv_dw_1._conv', 'backbone.3_1._conv_pw_2._conv', + 'backbone.3_1._conv_dw_2._conv', 'backbone.3_1._se.conv1', 'backbone.3_1._se.conv2'], + ['backbone.3_2._conv_pw._conv', 'backbone.3_2._conv_dw._conv', + 'backbone.3_2._se.conv1', 'backbone.3_2._se.conv2'], + ['backbone.3_3._conv_pw._conv', 'backbone.3_3._conv_dw._conv', + 'backbone.3_3._se.conv1', 'backbone.3_3._se.conv2'], + ['backbone.3_4._conv_pw._conv', 'backbone.3_4._conv_dw._conv', + 'backbone.3_4._se.conv1', 'backbone.3_4._se.conv2'], + ['backbone.3_5._conv_pw._conv', 'backbone.3_5._conv_dw._conv', + 'backbone.3_5._se.conv1', 'backbone.3_5._se.conv2'], + ['backbone.3_6._conv_pw._conv', 'backbone.3_6._conv_dw._conv', + 'backbone.3_6._se.conv1', 'backbone.3_6._se.conv2'], + ['backbone.3_7._conv_pw._conv', 'backbone.3_7._conv_dw._conv', + 'backbone.3_7._se.conv1', 'backbone.3_7._se.conv2'], + ['backbone.4_1._conv_dw_1._conv', 'backbone.4_1._conv_pw_2._conv', + 'backbone.4_1._conv_dw_2._conv', 'backbone.4_1._se.conv1', 'backbone.4_1._se.conv2'], + ['backbone.4_2._conv_pw._conv', 'backbone.4_2._conv_dw._conv', + 'backbone.4_2._se.conv1', 'backbone.4_2._se.conv2'], + ['backbone.4_3._conv_pw._conv', 'backbone.4_3._conv_dw._conv', + 'backbone.4_3._se.conv1', 'backbone.4_3._se.conv2']] + + # demo expand ratio + # Generally, for expand ratio, float in (0, 1] is available. + # But please be careful if the model is complicated. + # For picodet, there are many split and concat, the choice of channel number is important. + ofa_layers: + 'backbone.2_1._conv_dw_1._conv': + 'expand_ratio': [0.5, 1] + 'backbone.2_2._conv_pw._conv': + 'expand_ratio': [0.5, 1] + 'backbone.2_3._conv_pw._conv': + 'expand_ratio': [0.5, 1] + 'backbone.3_1._conv_dw_1._conv': + 'expand_ratio': [0.5, 1] + 'backbone.3_2._conv_pw._conv': + 'expand_ratio': [0.5, 1] + 'backbone.3_3._conv_pw._conv': + 'expand_ratio': [0.5, 1] + 'backbone.3_4._conv_pw._conv': + 'expand_ratio': [0.5, 1] + 'backbone.3_5._conv_pw._conv': + 'expand_ratio': [0.5, 1] + 'backbone.3_6._conv_pw._conv': + 'expand_ratio': [0.5, 1] + 'backbone.3_7._conv_pw._conv': + 'expand_ratio': [0.5, 1] + 'backbone.4_1._conv_dw_1._conv': + 'expand_ratio': [0.5, 1] + 'backbone.4_2._conv_pw._conv': + 'expand_ratio': [0.5, 1] + 'backbone.4_3._conv_pw._conv': + 'expand_ratio': [0.5, 1] diff --git a/ppdet/engine/trainer.py b/ppdet/engine/trainer.py index 24f846b28..157be4e31 100644 --- a/ppdet/engine/trainer.py +++ b/ppdet/engine/trainer.py @@ -95,7 +95,11 @@ class Trainer(object): self.is_loaded_weights = True #normalize params for deploy - self.model.load_meanstd(cfg['TestReader']['sample_transforms']) + if 'slim' in cfg and cfg['slim_type'] == 'OFA': + self.model.model.load_meanstd(cfg['TestReader'][ + 'sample_transforms']) + else: + self.model.load_meanstd(cfg['TestReader']['sample_transforms']) self.use_ema = ('use_ema' in cfg and cfg['use_ema']) if self.use_ema: diff --git a/ppdet/slim/__init__.py b/ppdet/slim/__init__.py index dc22d0717..e71481d1c 100644 --- a/ppdet/slim/__init__.py +++ b/ppdet/slim/__init__.py @@ -21,6 +21,7 @@ from .prune import * from .quant import * from .distill import * from .unstructured_prune import * +from .ofa import * import yaml from ppdet.core.workspace import load_config @@ -36,6 +37,14 @@ def build_slim_model(cfg, slim_cfg, mode='train'): if slim_load_cfg['slim'] == 'Distill': model = DistillModel(cfg, slim_cfg) cfg['model'] = model + elif slim_load_cfg['slim'] == 'OFA': + load_config(slim_cfg) + model = create(cfg.architecture) + load_pretrain_weight(model, cfg.weights) + slim = create(cfg.slim) + cfg['slim_type'] = cfg.slim + cfg['model'] = slim(model, model.state_dict()) + cfg['slim'] = slim elif slim_load_cfg['slim'] == 'DistillPrune': if mode == 'train': model = DistillModel(cfg, slim_cfg) diff --git a/ppdet/slim/ofa.py b/ppdet/slim/ofa.py new file mode 100644 index 000000000..b75edacdf --- /dev/null +++ b/ppdet/slim/ofa.py @@ -0,0 +1,89 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from ppdet.core.workspace import load_config, merge_config, create +from ppdet.utils.checkpoint import load_weight, load_pretrain_weight +from ppdet.utils.logger import setup_logger +from ppdet.core.workspace import register, serializable + +from paddle.utils import try_import + +logger = setup_logger(__name__) + + +@register +@serializable +class OFA(object): + def __init__(self, ofa_config): + super(OFA, self).__init__() + self.ofa_config = ofa_config + + def __call__(self, model, param_state_dict): + + paddleslim = try_import('paddleslim') + from paddleslim.nas.ofa import OFA, RunConfig, utils + from paddleslim.nas.ofa.convert_super import Convert, supernet + task = self.ofa_config['task'] + expand_ratio = self.ofa_config['expand_ratio'] + + skip_neck = self.ofa_config['skip_neck'] + skip_head = self.ofa_config['skip_head'] + + run_config = self.ofa_config['RunConfig'] + if 'skip_layers' in run_config: + skip_layers = run_config['skip_layers'] + else: + skip_layers = [] + + # supernet config + sp_config = supernet(expand_ratio=expand_ratio) + # convert to supernet + model = Convert(sp_config).convert(model) + + skip_names = [] + if skip_neck: + skip_names.append('neck.') + if skip_head: + skip_names.append('head.') + + for name, sublayer in model.named_sublayers(): + for n in skip_names: + if n in name: + skip_layers.append(name) + + run_config['skip_layers'] = skip_layers + run_config = RunConfig(**run_config) + + # build ofa model + ofa_model = OFA(model, run_config=run_config) + + ofa_model.set_epoch(0) + ofa_model.set_task(task) + + input_spec = [{ + "image": paddle.ones( + shape=[1, 3, 640, 640], dtype='float32'), + "im_shape": paddle.full( + [1, 2], 640, dtype='float32'), + "scale_factor": paddle.ones( + shape=[1, 2], dtype='float32') + }] + + ofa_model._clear_search_space(input_spec=input_spec) + ofa_model._build_ss = True + check_ss = ofa_model._sample_config('expand_ratio', phase=None) + # tokenize the search space + ofa_model.tokenize() + # check token map, search cands and search space + logger.info('Token map is {}'.format(ofa_model.token_map)) + logger.info('Search candidates is {}'.format(ofa_model.search_cands)) + logger.info('The length of search_space is {}, search_space is {}'. + format(len(ofa_model._ofa_layers), ofa_model._ofa_layers)) + # set model state dict into ofa model + utils.set_state_dict(ofa_model.model, param_state_dict) + return ofa_model -- GitLab