Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
6375ad39
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
6375ad39
编写于
10月 20, 2020
作者:
K
Kaipeng Deng
提交者:
GitHub
10月 20, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[cherry-pick] lr scheduler epoch2step (#28056)
* hapi/model step learning rate on batch end. test=develop
上级
11adb0f3
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
73 addition
and
1 deletion
+73
-1
python/paddle/hapi/model.py
python/paddle/hapi/model.py
+13
-0
python/paddle/tests/test_model.py
python/paddle/tests/test_model.py
+60
-1
未找到文件。
python/paddle/hapi/model.py
浏览文件 @
6375ad39
...
...
@@ -453,6 +453,12 @@ class StaticGraphAdapter(object):
if
len
(
name
)
>
0
:
rets
.
insert
(
i
,
feed
[
name
])
# step learning rate scheduler on each batch end
if
self
.
model
.
_optimizer
and
\
isinstance
(
self
.
model
.
_optimizer
.
_learning_rate
,
paddle
.
optimizer
.
lr
.
LRScheduler
):
self
.
model
.
_optimizer
.
_learning_rate
.
step
()
# LoDTensor cannot be fetch as numpy directly
rets
=
[
np
.
array
(
v
)
for
v
in
rets
]
if
self
.
mode
==
'test'
:
...
...
@@ -652,6 +658,13 @@ class DynamicGraphAdapter(object):
self
.
model
.
_optimizer
.
minimize
(
final_loss
)
self
.
model
.
network
.
clear_gradients
()
# step learning rate scheduler on each batch end
if
self
.
model
.
_optimizer
and
\
isinstance
(
self
.
model
.
_optimizer
.
_learning_rate
,
paddle
.
optimizer
.
lr
.
LRScheduler
):
self
.
model
.
_optimizer
.
_learning_rate
.
step
()
metrics
=
[]
for
metric
in
self
.
model
.
_metrics
:
metric_outs
=
metric
.
compute
(
*
(
to_list
(
outputs
)
+
labels
))
...
...
python/paddle/tests/test_model.py
浏览文件 @
6375ad39
...
...
@@ -33,7 +33,7 @@ from paddle.nn.layer.loss import CrossEntropyLoss
from
paddle.metric
import
Accuracy
from
paddle.vision.datasets
import
MNIST
from
paddle.vision.models
import
LeNet
from
paddle.io
import
DistributedBatchSampler
from
paddle.io
import
DistributedBatchSampler
,
Dataset
from
paddle.hapi.model
import
prepare_distributed_context
from
paddle.fluid.dygraph.jit
import
declarative
from
paddle.fluid.dygraph.dygraph_to_static.program_translator
import
ProgramTranslator
...
...
@@ -295,6 +295,15 @@ class MyModel(paddle.nn.Layer):
return
y
class
MyDataset
(
Dataset
):
def
__getitem__
(
self
,
idx
):
return
np
.
random
.
random
(
size
=
(
20
,)).
astype
(
np
.
float32
),
\
np
.
random
.
randint
(
0
,
10
,
size
=
(
1
,)).
astype
(
np
.
int64
)
def
__len__
(
self
):
return
40
class
TestModelFunction
(
unittest
.
TestCase
):
def
set_seed
(
self
,
seed
=
1024
):
paddle
.
manual_seed
(
seed
)
...
...
@@ -599,6 +608,56 @@ class TestModelFunction(unittest.TestCase):
shutil
.
rmtree
(
save_dir
)
class
TestModelWithLRScheduler
(
unittest
.
TestCase
):
def
test_fit
(
self
):
def
make_optimizer
(
parameters
=
None
):
base_lr
=
1e-3
momentum
=
0.9
weight_decay
=
5e-4
boundaries
=
[
5
,
8
]
values
=
[
base_lr
*
(
0.1
**
i
)
for
i
in
range
(
len
(
boundaries
)
+
1
)]
learning_rate
=
paddle
.
optimizer
.
lr
.
PiecewiseDecay
(
boundaries
=
boundaries
,
values
=
values
)
learning_rate
=
paddle
.
optimizer
.
lr
.
LinearWarmup
(
learning_rate
=
learning_rate
,
warmup_steps
=
4
,
start_lr
=
base_lr
/
5.
,
end_lr
=
base_lr
,
verbose
=
True
)
optimizer
=
paddle
.
optimizer
.
Momentum
(
learning_rate
=
learning_rate
,
weight_decay
=
weight_decay
,
momentum
=
momentum
,
parameters
=
parameters
)
return
optimizer
# dynamic test
device
=
paddle
.
set_device
(
'cpu'
)
fluid
.
enable_dygraph
(
device
)
net
=
MyModel
()
inputs
=
[
InputSpec
([
None
,
20
],
'float32'
,
'x'
)]
labels
=
[
InputSpec
([
None
,
1
],
'int64'
,
'label'
)]
optim
=
make_optimizer
(
net
.
parameters
())
model
=
Model
(
net
,
inputs
,
labels
)
model
.
prepare
(
optimizer
=
optim
,
loss
=
CrossEntropyLoss
(
reduction
=
"sum"
))
dataset
=
MyDataset
()
model
.
fit
(
dataset
,
dataset
,
batch_size
=
4
,
epochs
=
10
,
num_workers
=
0
)
# static test
paddle
.
enable_static
()
net
=
MyModel
()
inputs
=
[
InputSpec
([
None
,
20
],
'float32'
,
'x'
)]
labels
=
[
InputSpec
([
None
,
1
],
'int64'
,
'label'
)]
optim
=
make_optimizer
(
net
.
parameters
())
model
=
Model
(
net
,
inputs
,
labels
)
model
.
prepare
(
optimizer
=
optim
,
loss
=
CrossEntropyLoss
(
reduction
=
"sum"
))
dataset
=
MyDataset
()
model
.
fit
(
dataset
,
dataset
,
batch_size
=
4
,
epochs
=
10
,
num_workers
=
0
)
class
TestRaiseError
(
unittest
.
TestCase
):
def
test_input_without_name
(
self
):
net
=
MyModel
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录