Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
4ab7c7c0
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
4ab7c7c0
编写于
12月 23, 2021
作者:
C
Chang Xu
提交者:
GitHub
12月 23, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add ofa picodet demo (#4923)
上级
14eedfa3
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
188 addition
and
1 deletion
+188
-1
configs/slim/ofa/ofa_picodet_demo.yml
configs/slim/ofa/ofa_picodet_demo.yml
+85
-0
ppdet/engine/trainer.py
ppdet/engine/trainer.py
+5
-1
ppdet/slim/__init__.py
ppdet/slim/__init__.py
+9
-0
ppdet/slim/ofa.py
ppdet/slim/ofa.py
+89
-0
未找到文件。
configs/slim/ofa/ofa_picodet_demo.yml
0 → 100644
浏览文件 @
4ab7c7c0
weights
:
https://paddledet.bj.bcebos.com/models/pretrained/ESNet_x1_0_pretrained.pdparams
slim
:
OFA
OFA
:
ofa_config
:
task
:
expand_ratio
expand_ratio
:
[
0.5
,
1
]
skip_neck
:
True
skip_head
:
True
RunConfig
:
# Skip the output layer of each block by layer name
skip_layers
:
[
'
backbone._conv1._conv'
,
'
backbone.2_1._conv_linear_1._conv'
,
'
backbone.2_1._conv_linear_2._conv'
,
'
backbone.2_1._conv_dw_mv1._conv'
,
'
backbone.2_1._conv_pw_mv1._conv'
,
'
backbone.2_2._conv_linear._conv'
,
'
backbone.2_3._conv_linear._conv'
,
'
backbone.3_1._conv_linear_1._conv'
,
'
backbone.3_1._conv_linear_2._conv'
,
'
backbone.3_1._conv_dw_mv1._conv'
,
'
backbone.3_1._conv_pw_mv1._conv'
,
'
backbone.3_2._conv_linear._conv'
,
'
backbone.3_3._conv_linear._conv'
,
'
backbone.3_4._conv_linear._conv'
,
'
backbone.3_5._conv_linear._conv'
,
'
backbone.3_6._conv_linear._conv'
,
'
backbone.3_7._conv_linear._conv'
,
'
backbone.4_1._conv_linear_1._conv'
,
'
backbone.4_1._conv_linear_2._conv'
,
'
backbone.4_1._conv_dw_mv1._conv'
,
'
backbone.4_1._conv_pw_mv1._conv'
,
'
backbone.4_2._conv_linear._conv'
,
'
backbone.4_3._conv_linear._conv'
]
# For block-wise search, make layers in each block in the same search sapce
same_search_space
:
[
[
'
backbone.2_1._conv_dw_1._conv'
,
'
backbone.2_1._conv_pw_2._conv'
,
'
backbone.2_1._conv_dw_2._conv'
,
'
backbone.2_1._se.conv1'
,
'
backbone.2_1._se.conv2'
],
[
'
backbone.2_2._conv_pw._conv'
,
'
backbone.2_2._conv_dw._conv'
,
'
backbone.2_2._se.conv1'
,
'
backbone.2_2._se.conv2'
],
[
'
backbone.2_3._conv_pw._conv'
,
'
backbone.2_3._conv_dw._conv'
,
'
backbone.2_3._se.conv1'
,
'
backbone.2_3._se.conv2'
],
[
'
backbone.3_1._conv_dw_1._conv'
,
'
backbone.3_1._conv_pw_2._conv'
,
'
backbone.3_1._conv_dw_2._conv'
,
'
backbone.3_1._se.conv1'
,
'
backbone.3_1._se.conv2'
],
[
'
backbone.3_2._conv_pw._conv'
,
'
backbone.3_2._conv_dw._conv'
,
'
backbone.3_2._se.conv1'
,
'
backbone.3_2._se.conv2'
],
[
'
backbone.3_3._conv_pw._conv'
,
'
backbone.3_3._conv_dw._conv'
,
'
backbone.3_3._se.conv1'
,
'
backbone.3_3._se.conv2'
],
[
'
backbone.3_4._conv_pw._conv'
,
'
backbone.3_4._conv_dw._conv'
,
'
backbone.3_4._se.conv1'
,
'
backbone.3_4._se.conv2'
],
[
'
backbone.3_5._conv_pw._conv'
,
'
backbone.3_5._conv_dw._conv'
,
'
backbone.3_5._se.conv1'
,
'
backbone.3_5._se.conv2'
],
[
'
backbone.3_6._conv_pw._conv'
,
'
backbone.3_6._conv_dw._conv'
,
'
backbone.3_6._se.conv1'
,
'
backbone.3_6._se.conv2'
],
[
'
backbone.3_7._conv_pw._conv'
,
'
backbone.3_7._conv_dw._conv'
,
'
backbone.3_7._se.conv1'
,
'
backbone.3_7._se.conv2'
],
[
'
backbone.4_1._conv_dw_1._conv'
,
'
backbone.4_1._conv_pw_2._conv'
,
'
backbone.4_1._conv_dw_2._conv'
,
'
backbone.4_1._se.conv1'
,
'
backbone.4_1._se.conv2'
],
[
'
backbone.4_2._conv_pw._conv'
,
'
backbone.4_2._conv_dw._conv'
,
'
backbone.4_2._se.conv1'
,
'
backbone.4_2._se.conv2'
],
[
'
backbone.4_3._conv_pw._conv'
,
'
backbone.4_3._conv_dw._conv'
,
'
backbone.4_3._se.conv1'
,
'
backbone.4_3._se.conv2'
]]
# demo expand ratio
# Generally, for expand ratio, float in (0, 1] is available.
# But please be careful if the model is complicated.
# For picodet, there are many split and concat, the choice of channel number is important.
ofa_layers
:
'
backbone.2_1._conv_dw_1._conv'
:
'
expand_ratio'
:
[
0.5
,
1
]
'
backbone.2_2._conv_pw._conv'
:
'
expand_ratio'
:
[
0.5
,
1
]
'
backbone.2_3._conv_pw._conv'
:
'
expand_ratio'
:
[
0.5
,
1
]
'
backbone.3_1._conv_dw_1._conv'
:
'
expand_ratio'
:
[
0.5
,
1
]
'
backbone.3_2._conv_pw._conv'
:
'
expand_ratio'
:
[
0.5
,
1
]
'
backbone.3_3._conv_pw._conv'
:
'
expand_ratio'
:
[
0.5
,
1
]
'
backbone.3_4._conv_pw._conv'
:
'
expand_ratio'
:
[
0.5
,
1
]
'
backbone.3_5._conv_pw._conv'
:
'
expand_ratio'
:
[
0.5
,
1
]
'
backbone.3_6._conv_pw._conv'
:
'
expand_ratio'
:
[
0.5
,
1
]
'
backbone.3_7._conv_pw._conv'
:
'
expand_ratio'
:
[
0.5
,
1
]
'
backbone.4_1._conv_dw_1._conv'
:
'
expand_ratio'
:
[
0.5
,
1
]
'
backbone.4_2._conv_pw._conv'
:
'
expand_ratio'
:
[
0.5
,
1
]
'
backbone.4_3._conv_pw._conv'
:
'
expand_ratio'
:
[
0.5
,
1
]
ppdet/engine/trainer.py
浏览文件 @
4ab7c7c0
...
...
@@ -95,7 +95,11 @@ class Trainer(object):
self
.
is_loaded_weights
=
True
#normalize params for deploy
self
.
model
.
load_meanstd
(
cfg
[
'TestReader'
][
'sample_transforms'
])
if
'slim'
in
cfg
and
cfg
[
'slim_type'
]
==
'OFA'
:
self
.
model
.
model
.
load_meanstd
(
cfg
[
'TestReader'
][
'sample_transforms'
])
else
:
self
.
model
.
load_meanstd
(
cfg
[
'TestReader'
][
'sample_transforms'
])
self
.
use_ema
=
(
'use_ema'
in
cfg
and
cfg
[
'use_ema'
])
if
self
.
use_ema
:
...
...
ppdet/slim/__init__.py
浏览文件 @
4ab7c7c0
...
...
@@ -21,6 +21,7 @@ from .prune import *
from
.quant
import
*
from
.distill
import
*
from
.unstructured_prune
import
*
from
.ofa
import
*
import
yaml
from
ppdet.core.workspace
import
load_config
...
...
@@ -36,6 +37,14 @@ def build_slim_model(cfg, slim_cfg, mode='train'):
if
slim_load_cfg
[
'slim'
]
==
'Distill'
:
model
=
DistillModel
(
cfg
,
slim_cfg
)
cfg
[
'model'
]
=
model
elif
slim_load_cfg
[
'slim'
]
==
'OFA'
:
load_config
(
slim_cfg
)
model
=
create
(
cfg
.
architecture
)
load_pretrain_weight
(
model
,
cfg
.
weights
)
slim
=
create
(
cfg
.
slim
)
cfg
[
'slim_type'
]
=
cfg
.
slim
cfg
[
'model'
]
=
slim
(
model
,
model
.
state_dict
())
cfg
[
'slim'
]
=
slim
elif
slim_load_cfg
[
'slim'
]
==
'DistillPrune'
:
if
mode
==
'train'
:
model
=
DistillModel
(
cfg
,
slim_cfg
)
...
...
ppdet/slim/ofa.py
0 → 100644
浏览文件 @
4ab7c7c0
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
paddle
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
ppdet.core.workspace
import
load_config
,
merge_config
,
create
from
ppdet.utils.checkpoint
import
load_weight
,
load_pretrain_weight
from
ppdet.utils.logger
import
setup_logger
from
ppdet.core.workspace
import
register
,
serializable
from
paddle.utils
import
try_import
logger
=
setup_logger
(
__name__
)
@
register
@
serializable
class
OFA
(
object
):
def
__init__
(
self
,
ofa_config
):
super
(
OFA
,
self
).
__init__
()
self
.
ofa_config
=
ofa_config
def
__call__
(
self
,
model
,
param_state_dict
):
paddleslim
=
try_import
(
'paddleslim'
)
from
paddleslim.nas.ofa
import
OFA
,
RunConfig
,
utils
from
paddleslim.nas.ofa.convert_super
import
Convert
,
supernet
task
=
self
.
ofa_config
[
'task'
]
expand_ratio
=
self
.
ofa_config
[
'expand_ratio'
]
skip_neck
=
self
.
ofa_config
[
'skip_neck'
]
skip_head
=
self
.
ofa_config
[
'skip_head'
]
run_config
=
self
.
ofa_config
[
'RunConfig'
]
if
'skip_layers'
in
run_config
:
skip_layers
=
run_config
[
'skip_layers'
]
else
:
skip_layers
=
[]
# supernet config
sp_config
=
supernet
(
expand_ratio
=
expand_ratio
)
# convert to supernet
model
=
Convert
(
sp_config
).
convert
(
model
)
skip_names
=
[]
if
skip_neck
:
skip_names
.
append
(
'neck.'
)
if
skip_head
:
skip_names
.
append
(
'head.'
)
for
name
,
sublayer
in
model
.
named_sublayers
():
for
n
in
skip_names
:
if
n
in
name
:
skip_layers
.
append
(
name
)
run_config
[
'skip_layers'
]
=
skip_layers
run_config
=
RunConfig
(
**
run_config
)
# build ofa model
ofa_model
=
OFA
(
model
,
run_config
=
run_config
)
ofa_model
.
set_epoch
(
0
)
ofa_model
.
set_task
(
task
)
input_spec
=
[{
"image"
:
paddle
.
ones
(
shape
=
[
1
,
3
,
640
,
640
],
dtype
=
'float32'
),
"im_shape"
:
paddle
.
full
(
[
1
,
2
],
640
,
dtype
=
'float32'
),
"scale_factor"
:
paddle
.
ones
(
shape
=
[
1
,
2
],
dtype
=
'float32'
)
}]
ofa_model
.
_clear_search_space
(
input_spec
=
input_spec
)
ofa_model
.
_build_ss
=
True
check_ss
=
ofa_model
.
_sample_config
(
'expand_ratio'
,
phase
=
None
)
# tokenize the search space
ofa_model
.
tokenize
()
# check token map, search cands and search space
logger
.
info
(
'Token map is {}'
.
format
(
ofa_model
.
token_map
))
logger
.
info
(
'Search candidates is {}'
.
format
(
ofa_model
.
search_cands
))
logger
.
info
(
'The length of search_space is {}, search_space is {}'
.
format
(
len
(
ofa_model
.
_ofa_layers
),
ofa_model
.
_ofa_layers
))
# set model state dict into ofa model
utils
.
set_state_dict
(
ofa_model
.
model
,
param_state_dict
)
return
ofa_model
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录