Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
bf1d0031
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
bf1d0031
编写于
4月 28, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
4月 28, 2020
浏览文件
操作
浏览文件
下载
差异文件
!820 Update document about dynamic_lr
Merge pull request !820 from fanglei/r0.2
上级
3183579e
ba7ccf26
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
7 addition
and
6 deletion
+7
-6
mindspore/nn/dynamic_lr.py
mindspore/nn/dynamic_lr.py
+7
-6
未找到文件。
mindspore/nn/dynamic_lr.py
浏览文件 @
bf1d0031
...
...
@@ -32,6 +32,7 @@ def piecewise_constant_lr(milestone, learning_rates):
Args:
milestone (Union[list[int], tuple[int]]): A list of milestone. This list is a monotone increasing list.
Every element is a milestone step, and must be greater than 0.
learning_rates (Union[list[float], tuple[float]]): A list of learning rates.
Returns:
...
...
@@ -40,7 +41,7 @@ def piecewise_constant_lr(milestone, learning_rates):
Examples:
>>> milestone = [2, 5, 10]
>>> learning_rates = [0.1, 0.05, 0.01]
>>>
lr =
piecewise_constant_lr(milestone, learning_rates)
>>> piecewise_constant_lr(milestone, learning_rates)
[0.1, 0.1, 0.05, 0.05, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01]
"""
validator
.
check_value_type
(
'milestone'
,
milestone
,
(
tuple
,
list
),
None
)
...
...
@@ -100,7 +101,7 @@ def exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch,
>>> total_step = 6
>>> step_per_epoch = 2
>>> decay_epoch = 1
>>>
lr =
exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch)
>>> exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch)
[0.1, 0.1, 0.09000000000000001, 0.09000000000000001, 0.08100000000000002, 0.08100000000000002]
"""
_check_inputs
(
learning_rate
,
decay_rate
,
total_step
,
step_per_epoch
,
decay_epoch
,
is_stair
)
...
...
@@ -142,7 +143,7 @@ def natural_exp_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch,
>>> total_step = 6
>>> step_per_epoch = 2
>>> decay_epoch = 2
>>>
lr =
natural_exp_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True)
>>> natural_exp_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True)
[0.1, 0.1, 0.1, 0.1, 0.016529888822158657, 0.016529888822158657]
"""
_check_inputs
(
learning_rate
,
decay_rate
,
total_step
,
step_per_epoch
,
decay_epoch
,
is_stair
)
...
...
@@ -185,7 +186,7 @@ def inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, deca
>>> total_step = 6
>>> step_per_epoch = 1
>>> decay_epoch = 1
>>>
lr =
inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True)
>>> inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True)
[0.1, 0.06666666666666667, 0.05, 0.04, 0.03333333333333333, 0.028571428571428574]
"""
_check_inputs
(
learning_rate
,
decay_rate
,
total_step
,
step_per_epoch
,
decay_epoch
,
is_stair
)
...
...
@@ -227,7 +228,7 @@ def cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch):
>>> total_step = 6
>>> step_per_epoch = 2
>>> decay_epoch = 2
>>>
lr =
cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch)
>>> cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch)
[0.1, 0.1, 0.05500000000000001, 0.05500000000000001, 0.01, 0.01]
"""
validator
.
check_float_positive
(
'min_lr'
,
min_lr
,
None
)
...
...
@@ -282,7 +283,7 @@ def polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_e
>>> step_per_epoch = 2
>>> decay_epoch = 2
>>> power = 0.5
>>>
lr =
polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power)
>>> polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power)
[0.1, 0.1, 0.07363961030678928, 0.07363961030678928, 0.01, 0.01]
"""
validator
.
check_float_positive
(
'learning_rate'
,
learning_rate
,
None
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录