Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
75a20ba5
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
75a20ba5
编写于
2月 22, 2023
作者:
G
gaotingquan
提交者:
Wei Shengyu
3月 10, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor: add ClassModel to unify model forward interface
上级
376d83d4
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
47 addition
and
36 deletion
+47
-36
ppcls/arch/__init__.py
ppcls/arch/__init__.py
+43
-16
ppcls/engine/engine.py
ppcls/engine/engine.py
+2
-11
ppcls/engine/train/train.py
ppcls/engine/train/train.py
+2
-9
未找到文件。
ppcls/arch/__init__.py
浏览文件 @
75a20ba5
...
...
@@ -12,14 +12,14 @@
#See the License for the specific language governing permissions and
#limitations under the License.
import
sys
import
copy
import
importlib
import
paddle.nn
as
nn
from
paddle.jit
import
to_static
from
paddle.static
import
InputSpec
from
.
import
backbone
,
gears
from
.backbone
import
*
from
.
import
backbone
as
backbone_zoo
from
.gears
import
build_gear
from
.utils
import
*
from
.backbone.base.theseus_layer
import
TheseusLayer
...
...
@@ -35,20 +35,28 @@ def build_model(config, mode="train"):
arch_config
=
copy
.
deepcopy
(
config
[
"Arch"
])
model_type
=
arch_config
.
pop
(
"name"
)
use_sync_bn
=
arch_config
.
pop
(
"use_sync_bn"
,
False
)
mod
=
importlib
.
import_module
(
__name__
)
arch
=
getattr
(
mod
,
model_type
)(
**
arch_config
)
if
hasattr
(
backbone_zoo
,
model_type
):
model
=
ClassModel
(
model_type
,
**
arch_config
)
else
:
model
=
getattr
(
sys
.
modules
[
__name__
],
model_type
)(
"ClassModel"
,
**
arch_config
)
if
use_sync_bn
:
if
config
[
"Global"
][
"device"
]
==
"gpu"
:
arch
=
nn
.
SyncBatchNorm
.
convert_sync_batchnorm
(
arch
)
model
=
nn
.
SyncBatchNorm
.
convert_sync_batchnorm
(
model
)
else
:
msg
=
"SyncBatchNorm can only be used on GPU device. The releated setting has been ignored."
logger
.
warning
(
msg
)
if
isinstance
(
arch
,
TheseusLayer
):
prune_model
(
config
,
arch
)
quantize_model
(
config
,
arch
,
mode
)
if
isinstance
(
model
,
TheseusLayer
):
prune_model
(
config
,
model
)
quantize_model
(
config
,
model
,
mode
)
return
arch
# set @to_static for benchmark, skip this by default.
model
=
apply_to_static
(
config
,
model
)
return
model
def
apply_to_static
(
config
,
model
):
...
...
@@ -65,12 +73,29 @@ def apply_to_static(config, model):
return
model
# TODO(gaotingquan): export model
class
ClassModel
(
TheseusLayer
):
def
__init__
(
self
,
model_type
,
**
config
):
super
().
__init__
()
if
model_type
==
"ClassModel"
:
backbone_config
=
config
[
"Backbone"
]
backbone_name
=
backbone_config
.
pop
(
"name"
)
else
:
backbone_name
=
model_type
backbone_config
=
config
self
.
backbone
=
getattr
(
backbone_zoo
,
backbone_name
)(
**
backbone_config
)
def
forward
(
self
,
batch
):
x
,
label
=
batch
[
0
],
batch
[
1
]
return
self
.
backbone
(
x
)
class
RecModel
(
TheseusLayer
):
def
__init__
(
self
,
**
config
):
super
().
__init__
()
backbone_config
=
config
[
"Backbone"
]
backbone_name
=
backbone_config
.
pop
(
"name"
)
self
.
backbone
=
eval
(
backbone_name
)(
**
backbone_config
)
self
.
backbone
=
getattr
(
backbone_zoo
,
backbone_name
)(
**
backbone_config
)
self
.
head_feature_from
=
config
.
get
(
'head_feature_from'
,
'neck'
)
if
"BackboneStopLayer"
in
config
:
...
...
@@ -87,8 +112,8 @@ class RecModel(TheseusLayer):
else
:
self
.
head
=
None
def
forward
(
self
,
x
,
label
=
None
):
def
forward
(
self
,
batch
):
x
,
label
=
batch
[
0
],
batch
[
1
]
out
=
dict
()
x
=
self
.
backbone
(
x
)
out
[
"backbone"
]
=
x
...
...
@@ -140,7 +165,8 @@ class DistillationModel(nn.Layer):
load_dygraph_pretrain
(
self
.
model_name_list
[
idx
],
path
=
pretrained
)
def
forward
(
self
,
x
,
label
=
None
):
def
forward
(
self
,
batch
):
x
,
label
=
batch
[
0
],
batch
[
1
]
result_dict
=
dict
()
for
idx
,
model_name
in
enumerate
(
self
.
model_name_list
):
if
label
is
None
:
...
...
@@ -158,7 +184,8 @@ class AttentionModel(DistillationModel):
**
kargs
):
super
().
__init__
(
models
,
pretrained_list
,
freeze_params_list
,
**
kargs
)
def
forward
(
self
,
x
,
label
=
None
):
def
forward
(
self
,
batch
):
x
,
label
=
batch
[
0
],
batch
[
1
]
result_dict
=
dict
()
out
=
x
for
idx
,
model_name
in
enumerate
(
self
.
model_name_list
):
...
...
@@ -168,4 +195,4 @@ class AttentionModel(DistillationModel):
else
:
out
=
self
.
model_list
[
idx
](
out
,
label
)
result_dict
.
update
(
out
)
return
result_dict
\ No newline at end of file
return
result_dict
ppcls/engine/engine.py
浏览文件 @
75a20ba5
...
...
@@ -28,7 +28,6 @@ from ppcls.utils.logger import init_logger
from
ppcls.utils.config
import
print_config
from
ppcls.data
import
build_dataloader
from
ppcls.arch
import
build_model
,
RecModel
,
DistillationModel
,
TheseusLayer
from
ppcls.arch
import
apply_to_static
from
ppcls.loss
import
build_loss
from
ppcls.metric
import
build_metrics
from
ppcls.optimizer
import
build_optimizer
...
...
@@ -57,18 +56,10 @@ class Engine(object):
# init logger
init_logger
(
self
.
config
,
mode
=
mode
)
print_config
(
config
)
# for visualdl
self
.
vdl_writer
=
self
.
_init_vdl
()
# is_rec
if
"Head"
in
self
.
config
[
"Arch"
]
or
self
.
config
[
"Arch"
].
get
(
"is_rec"
,
False
):
self
.
is_rec
=
True
else
:
self
.
is_rec
=
False
# init train_func and eval_func
self
.
train_mode
=
self
.
config
[
"Global"
].
get
(
"train_mode"
,
None
)
if
self
.
train_mode
is
None
:
...
...
@@ -108,8 +99,6 @@ class Engine(object):
# build model
self
.
model
=
build_model
(
self
.
config
,
self
.
mode
)
# set @to_static for benchmark, skip this by default.
apply_to_static
(
self
.
config
,
self
.
model
)
# load_pretrain
self
.
_init_pretrained
()
...
...
@@ -125,6 +114,8 @@ class Engine(object):
# for distributed
self
.
_init_dist
()
print_config
(
config
)
def
train
(
self
):
assert
self
.
mode
==
"train"
print_batch_step
=
self
.
config
[
'Global'
][
'print_batch_step'
]
...
...
ppcls/engine/train/train.py
浏览文件 @
75a20ba5
...
...
@@ -55,10 +55,10 @@ def train_epoch(engine, epoch_id, print_batch_step):
"flatten_contiguous_range"
,
"greater_than"
},
level
=
amp_level
):
out
=
forward
(
engine
,
batch
)
out
=
engine
.
model
(
batch
)
loss_dict
=
engine
.
train_loss_func
(
out
,
batch
[
1
])
else
:
out
=
forward
(
engine
,
batch
)
out
=
engine
.
model
(
batch
)
loss_dict
=
engine
.
train_loss_func
(
out
,
batch
[
1
])
# loss
...
...
@@ -104,10 +104,3 @@ def train_epoch(engine, epoch_id, print_batch_step):
if
getattr
(
engine
.
lr_sch
[
i
],
"by_epoch"
,
False
)
and
\
type_name
(
engine
.
lr_sch
[
i
])
!=
"ReduceOnPlateau"
:
engine
.
lr_sch
[
i
].
step
()
def
forward
(
engine
,
batch
):
if
not
engine
.
is_rec
:
return
engine
.
model
(
batch
[
0
])
else
:
return
engine
.
model
(
batch
[
0
],
batch
[
1
])
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录