未验证 提交 4ab7c7c0 编写于 作者: C Chang Xu 提交者: GitHub

add ofa picodet demo (#4923)

上级 14eedfa3
weights: https://paddledet.bj.bcebos.com/models/pretrained/ESNet_x1_0_pretrained.pdparams
slim: OFA
OFA:
ofa_config:
task: expand_ratio
expand_ratio: [0.5, 1]
skip_neck: True
skip_head: True
RunConfig:
# Skip the output layer of each block by layer name
skip_layers: ['backbone._conv1._conv','backbone.2_1._conv_linear_1._conv',
'backbone.2_1._conv_linear_2._conv', 'backbone.2_1._conv_dw_mv1._conv',
'backbone.2_1._conv_pw_mv1._conv', 'backbone.2_2._conv_linear._conv',
'backbone.2_3._conv_linear._conv', 'backbone.3_1._conv_linear_1._conv',
'backbone.3_1._conv_linear_2._conv', 'backbone.3_1._conv_dw_mv1._conv',
'backbone.3_1._conv_pw_mv1._conv', 'backbone.3_2._conv_linear._conv',
'backbone.3_3._conv_linear._conv', 'backbone.3_4._conv_linear._conv',
'backbone.3_5._conv_linear._conv', 'backbone.3_6._conv_linear._conv',
'backbone.3_7._conv_linear._conv', 'backbone.4_1._conv_linear_1._conv',
'backbone.4_1._conv_linear_2._conv', 'backbone.4_1._conv_dw_mv1._conv',
'backbone.4_1._conv_pw_mv1._conv', 'backbone.4_2._conv_linear._conv',
'backbone.4_3._conv_linear._conv']
# For block-wise search, make layers in each block in the same search sapce
same_search_space: [
['backbone.2_1._conv_dw_1._conv', 'backbone.2_1._conv_pw_2._conv',
'backbone.2_1._conv_dw_2._conv', 'backbone.2_1._se.conv1', 'backbone.2_1._se.conv2'],
['backbone.2_2._conv_pw._conv', 'backbone.2_2._conv_dw._conv',
'backbone.2_2._se.conv1', 'backbone.2_2._se.conv2'],
['backbone.2_3._conv_pw._conv', 'backbone.2_3._conv_dw._conv',
'backbone.2_3._se.conv1', 'backbone.2_3._se.conv2'],
['backbone.3_1._conv_dw_1._conv', 'backbone.3_1._conv_pw_2._conv',
'backbone.3_1._conv_dw_2._conv', 'backbone.3_1._se.conv1', 'backbone.3_1._se.conv2'],
['backbone.3_2._conv_pw._conv', 'backbone.3_2._conv_dw._conv',
'backbone.3_2._se.conv1', 'backbone.3_2._se.conv2'],
['backbone.3_3._conv_pw._conv', 'backbone.3_3._conv_dw._conv',
'backbone.3_3._se.conv1', 'backbone.3_3._se.conv2'],
['backbone.3_4._conv_pw._conv', 'backbone.3_4._conv_dw._conv',
'backbone.3_4._se.conv1', 'backbone.3_4._se.conv2'],
['backbone.3_5._conv_pw._conv', 'backbone.3_5._conv_dw._conv',
'backbone.3_5._se.conv1', 'backbone.3_5._se.conv2'],
['backbone.3_6._conv_pw._conv', 'backbone.3_6._conv_dw._conv',
'backbone.3_6._se.conv1', 'backbone.3_6._se.conv2'],
['backbone.3_7._conv_pw._conv', 'backbone.3_7._conv_dw._conv',
'backbone.3_7._se.conv1', 'backbone.3_7._se.conv2'],
['backbone.4_1._conv_dw_1._conv', 'backbone.4_1._conv_pw_2._conv',
'backbone.4_1._conv_dw_2._conv', 'backbone.4_1._se.conv1', 'backbone.4_1._se.conv2'],
['backbone.4_2._conv_pw._conv', 'backbone.4_2._conv_dw._conv',
'backbone.4_2._se.conv1', 'backbone.4_2._se.conv2'],
['backbone.4_3._conv_pw._conv', 'backbone.4_3._conv_dw._conv',
'backbone.4_3._se.conv1', 'backbone.4_3._se.conv2']]
# demo expand ratio
# Generally, for expand ratio, float in (0, 1] is available.
# But please be careful if the model is complicated.
# For picodet, there are many split and concat, the choice of channel number is important.
ofa_layers:
'backbone.2_1._conv_dw_1._conv':
'expand_ratio': [0.5, 1]
'backbone.2_2._conv_pw._conv':
'expand_ratio': [0.5, 1]
'backbone.2_3._conv_pw._conv':
'expand_ratio': [0.5, 1]
'backbone.3_1._conv_dw_1._conv':
'expand_ratio': [0.5, 1]
'backbone.3_2._conv_pw._conv':
'expand_ratio': [0.5, 1]
'backbone.3_3._conv_pw._conv':
'expand_ratio': [0.5, 1]
'backbone.3_4._conv_pw._conv':
'expand_ratio': [0.5, 1]
'backbone.3_5._conv_pw._conv':
'expand_ratio': [0.5, 1]
'backbone.3_6._conv_pw._conv':
'expand_ratio': [0.5, 1]
'backbone.3_7._conv_pw._conv':
'expand_ratio': [0.5, 1]
'backbone.4_1._conv_dw_1._conv':
'expand_ratio': [0.5, 1]
'backbone.4_2._conv_pw._conv':
'expand_ratio': [0.5, 1]
'backbone.4_3._conv_pw._conv':
'expand_ratio': [0.5, 1]
...@@ -95,6 +95,10 @@ class Trainer(object): ...@@ -95,6 +95,10 @@ class Trainer(object):
self.is_loaded_weights = True self.is_loaded_weights = True
#normalize params for deploy #normalize params for deploy
if 'slim' in cfg and cfg['slim_type'] == 'OFA':
self.model.model.load_meanstd(cfg['TestReader'][
'sample_transforms'])
else:
self.model.load_meanstd(cfg['TestReader']['sample_transforms']) self.model.load_meanstd(cfg['TestReader']['sample_transforms'])
self.use_ema = ('use_ema' in cfg and cfg['use_ema']) self.use_ema = ('use_ema' in cfg and cfg['use_ema'])
......
...@@ -21,6 +21,7 @@ from .prune import * ...@@ -21,6 +21,7 @@ from .prune import *
from .quant import * from .quant import *
from .distill import * from .distill import *
from .unstructured_prune import * from .unstructured_prune import *
from .ofa import *
import yaml import yaml
from ppdet.core.workspace import load_config from ppdet.core.workspace import load_config
...@@ -36,6 +37,14 @@ def build_slim_model(cfg, slim_cfg, mode='train'): ...@@ -36,6 +37,14 @@ def build_slim_model(cfg, slim_cfg, mode='train'):
if slim_load_cfg['slim'] == 'Distill': if slim_load_cfg['slim'] == 'Distill':
model = DistillModel(cfg, slim_cfg) model = DistillModel(cfg, slim_cfg)
cfg['model'] = model cfg['model'] = model
elif slim_load_cfg['slim'] == 'OFA':
load_config(slim_cfg)
model = create(cfg.architecture)
load_pretrain_weight(model, cfg.weights)
slim = create(cfg.slim)
cfg['slim_type'] = cfg.slim
cfg['model'] = slim(model, model.state_dict())
cfg['slim'] = slim
elif slim_load_cfg['slim'] == 'DistillPrune': elif slim_load_cfg['slim'] == 'DistillPrune':
if mode == 'train': if mode == 'train':
model = DistillModel(cfg, slim_cfg) model = DistillModel(cfg, slim_cfg)
......
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import load_config, merge_config, create
from ppdet.utils.checkpoint import load_weight, load_pretrain_weight
from ppdet.utils.logger import setup_logger
from ppdet.core.workspace import register, serializable
from paddle.utils import try_import
logger = setup_logger(__name__)
@register
@serializable
class OFA(object):
def __init__(self, ofa_config):
super(OFA, self).__init__()
self.ofa_config = ofa_config
def __call__(self, model, param_state_dict):
paddleslim = try_import('paddleslim')
from paddleslim.nas.ofa import OFA, RunConfig, utils
from paddleslim.nas.ofa.convert_super import Convert, supernet
task = self.ofa_config['task']
expand_ratio = self.ofa_config['expand_ratio']
skip_neck = self.ofa_config['skip_neck']
skip_head = self.ofa_config['skip_head']
run_config = self.ofa_config['RunConfig']
if 'skip_layers' in run_config:
skip_layers = run_config['skip_layers']
else:
skip_layers = []
# supernet config
sp_config = supernet(expand_ratio=expand_ratio)
# convert to supernet
model = Convert(sp_config).convert(model)
skip_names = []
if skip_neck:
skip_names.append('neck.')
if skip_head:
skip_names.append('head.')
for name, sublayer in model.named_sublayers():
for n in skip_names:
if n in name:
skip_layers.append(name)
run_config['skip_layers'] = skip_layers
run_config = RunConfig(**run_config)
# build ofa model
ofa_model = OFA(model, run_config=run_config)
ofa_model.set_epoch(0)
ofa_model.set_task(task)
input_spec = [{
"image": paddle.ones(
shape=[1, 3, 640, 640], dtype='float32'),
"im_shape": paddle.full(
[1, 2], 640, dtype='float32'),
"scale_factor": paddle.ones(
shape=[1, 2], dtype='float32')
}]
ofa_model._clear_search_space(input_spec=input_spec)
ofa_model._build_ss = True
check_ss = ofa_model._sample_config('expand_ratio', phase=None)
# tokenize the search space
ofa_model.tokenize()
# check token map, search cands and search space
logger.info('Token map is {}'.format(ofa_model.token_map))
logger.info('Search candidates is {}'.format(ofa_model.search_cands))
logger.info('The length of search_space is {}, search_space is {}'.
format(len(ofa_model._ofa_layers), ofa_model._ofa_layers))
# set model state dict into ofa model
utils.set_state_dict(ofa_model.model, param_state_dict)
return ofa_model
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册