Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
790815f4
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
接近 2 年 前同步成功
通知
116
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
790815f4
编写于
5月 05, 2022
作者:
H
HydrogenSulfate
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refine code and docs
上级
1c31010b
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
11 addition
and
29 deletion
+11
-29
ppcls/arch/utils.py
ppcls/arch/utils.py
+4
-4
ppcls/data/preprocess/ops/operators.py
ppcls/data/preprocess/ops/operators.py
+0
-1
ppcls/loss/centerloss.py
ppcls/loss/centerloss.py
+3
-2
ppcls/optimizer/__init__.py
ppcls/optimizer/__init__.py
+4
-4
ppcls/optimizer/learning_rate.py
ppcls/optimizer/learning_rate.py
+0
-18
未找到文件。
ppcls/arch/utils.py
浏览文件 @
790815f4
...
@@ -33,8 +33,8 @@ def get_architectures():
...
@@ -33,8 +33,8 @@ def get_architectures():
def
get_blacklist_model_in_static_mode
():
def
get_blacklist_model_in_static_mode
():
from
ppcls.arch.backbone
import
(
distilled_vision_transformer
,
from
ppcls.arch.backbone
import
distilled_vision_transformer
vision_transformer
)
from
ppcls.arch.backbone
import
vision_transformer
blacklist
=
distilled_vision_transformer
.
__all__
+
vision_transformer
.
__all__
blacklist
=
distilled_vision_transformer
.
__all__
+
vision_transformer
.
__all__
return
blacklist
return
blacklist
...
@@ -60,10 +60,10 @@ def get_param_attr_dict(ParamAttr_config: Union[None, bool, Dict[str, Dict]]
...
@@ -60,10 +60,10 @@ def get_param_attr_dict(ParamAttr_config: Union[None, bool, Dict[str, Dict]]
"""parse ParamAttr from an dict
"""parse ParamAttr from an dict
Args:
Args:
ParamAttr_config (Union[
bool, Dict[str, Dict]]): ParamAttr_config
ParamAttr_config (Union[
None, bool, Dict[str, Dict]]): ParamAttr configure
Returns:
Returns:
Union[bool, paddle.ParamAttr]: Generated ParamAttr
Union[
None,
bool, paddle.ParamAttr]: Generated ParamAttr
"""
"""
if
ParamAttr_config
is
None
:
if
ParamAttr_config
is
None
:
return
None
return
None
...
...
ppcls/data/preprocess/ops/operators.py
浏览文件 @
790815f4
...
@@ -22,7 +22,6 @@ import six
...
@@ -22,7 +22,6 @@ import six
import
math
import
math
import
random
import
random
import
cv2
import
cv2
from
typing
import
Sequence
import
numpy
as
np
import
numpy
as
np
from
PIL
import
Image
,
ImageOps
,
__version__
as
PILLOW_VERSION
from
PIL
import
Image
,
ImageOps
,
__version__
as
PILLOW_VERSION
from
paddle.vision.transforms
import
ColorJitter
as
RawColorJitter
from
paddle.vision.transforms
import
ColorJitter
as
RawColorJitter
...
...
ppcls/loss/centerloss.py
浏览文件 @
790815f4
...
@@ -23,8 +23,9 @@ import paddle.nn as nn
...
@@ -23,8 +23,9 @@ import paddle.nn as nn
class
CenterLoss
(
nn
.
Layer
):
class
CenterLoss
(
nn
.
Layer
):
"""Center loss class
"""Center loss
paper : [A Discriminative Feature Learning Approach for Deep Face Recognition](https://link.springer.com/content/pdf/10.1007%2F978-3-319-46478-7_31.pdf)
code reference: https://github.com/michuanhaohao/reid-strong-baseline/blob/master/layers/center_loss.py#L7
Args:
Args:
num_classes (int): number of classes.
num_classes (int): number of classes.
feat_dim (int): number of feature dimensions.
feat_dim (int): number of feature dimensions.
...
...
ppcls/optimizer/__init__.py
浏览文件 @
790815f4
...
@@ -71,7 +71,7 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
...
@@ -71,7 +71,7 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
optim_cfg
=
optim_item
[
optim_name
]
# get optim_cfg
optim_cfg
=
optim_item
[
optim_name
]
# get optim_cfg
lr
=
build_lr_scheduler
(
optim_cfg
.
pop
(
'lr'
),
epochs
,
step_each_epoch
)
lr
=
build_lr_scheduler
(
optim_cfg
.
pop
(
'lr'
),
epochs
,
step_each_epoch
)
logger
.
info
(
"build lr ({}) for scope ({}) success.."
.
format
(
logger
.
debug
(
"build lr ({}) for scope ({}) success.."
.
format
(
lr
,
optim_scope
))
lr
,
optim_scope
))
# step2 build regularization
# step2 build regularization
if
'regularizer'
in
optim_cfg
and
optim_cfg
[
'regularizer'
]
is
not
None
:
if
'regularizer'
in
optim_cfg
and
optim_cfg
[
'regularizer'
]
is
not
None
:
...
@@ -83,8 +83,8 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
...
@@ -83,8 +83,8 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
reg_name
=
reg_config
.
pop
(
'name'
)
+
'Decay'
reg_name
=
reg_config
.
pop
(
'name'
)
+
'Decay'
reg
=
getattr
(
paddle
.
regularizer
,
reg_name
)(
**
reg_config
)
reg
=
getattr
(
paddle
.
regularizer
,
reg_name
)(
**
reg_config
)
optim_cfg
[
"weight_decay"
]
=
reg
optim_cfg
[
"weight_decay"
]
=
reg
logger
.
info
(
"build regularizer ({}) for scope ({}) success.."
.
logger
.
debug
(
"build regularizer ({}) for scope ({}) success.."
.
format
(
reg
,
optim_scope
))
format
(
reg
,
optim_scope
))
# step3 build optimizer
# step3 build optimizer
if
'clip_norm'
in
optim_cfg
:
if
'clip_norm'
in
optim_cfg
:
clip_norm
=
optim_cfg
.
pop
(
'clip_norm'
)
clip_norm
=
optim_cfg
.
pop
(
'clip_norm'
)
...
@@ -123,7 +123,7 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
...
@@ -123,7 +123,7 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
optim
=
getattr
(
optimizer
,
optim_name
)(
optim
=
getattr
(
optimizer
,
optim_name
)(
learning_rate
=
lr
,
grad_clip
=
grad_clip
,
learning_rate
=
lr
,
grad_clip
=
grad_clip
,
**
optim_cfg
)(
model_list
=
optim_model
)
**
optim_cfg
)(
model_list
=
optim_model
)
logger
.
info
(
"build optimizer ({}) for scope ({}) success.."
.
format
(
logger
.
debug
(
"build optimizer ({}) for scope ({}) success.."
.
format
(
optim
,
optim_scope
))
optim
,
optim_scope
))
optim_list
.
append
(
optim
)
optim_list
.
append
(
optim
)
lr_list
.
append
(
lr
)
lr_list
.
append
(
lr
)
...
...
ppcls/optimizer/learning_rate.py
浏览文件 @
790815f4
...
@@ -262,24 +262,6 @@ class Piecewise(object):
...
@@ -262,24 +262,6 @@ class Piecewise(object):
return
learning_rate
return
learning_rate
class
Constant
(
LRScheduler
):
"""
Constant learning rate
Args:
lr (float): The initial learning rate. It is a python float number.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
"""
def
__init__
(
self
,
learning_rate
,
last_epoch
=-
1
,
by_epoch
=
False
,
**
kwargs
):
self
.
learning_rate
=
learning_rate
self
.
last_epoch
=
last_epoch
self
.
by_epoch
=
by_epoch
super
().
__init__
()
def
get_lr
(
self
):
return
self
.
learning_rate
class
MultiStepDecay
(
LRScheduler
):
class
MultiStepDecay
(
LRScheduler
):
"""
"""
Update the learning rate by ``gamma`` once ``epoch`` reaches one of the milestones.
Update the learning rate by ``gamma`` once ``epoch`` reaches one of the milestones.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录