Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
3912c255
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 2 年 前同步成功
通知
210
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3912c255
编写于
8月 05, 2021
作者:
H
Hui Zhang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support noam lr and opt
上级
1cd4d4bf
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
23 addition
and
2 deletion
+23
-2
deepspeech/exps/u2/model.py
deepspeech/exps/u2/model.py
+12
-0
deepspeech/training/optimizer.py
deepspeech/training/optimizer.py
+3
-0
deepspeech/utils/dynamic_import.py
deepspeech/utils/dynamic_import.py
+8
-2
未找到文件。
deepspeech/exps/u2/model.py
浏览文件 @
3912c255
...
...
@@ -324,6 +324,9 @@ class U2Trainer(Trainer):
if
"warmup_steps"
in
scheduler_conf
else
None
,
"gamma"
:
scheduler_conf
.
lr_decay
if
"lr_decay"
in
scheduler_conf
else
None
,
"d_model"
:
model_conf
.
encoder_conf
.
output_size
if
scheduler_type
==
"noam"
else
None
,
}
lr_scheduler
=
LRSchedulerFactory
.
from_args
(
scheduler_type
,
scheduler_args
)
...
...
@@ -338,6 +341,12 @@ class U2Trainer(Trainer):
# learning_rate=optim_conf.lr,
# warmup_steps=scheduler_conf.warmup_steps,
# verbose=False)
# elif scheduler_type == 'noam':
# lr_scheduler = paddle.optimizer.lr.NoamDecay(
# learning_rate=optim_conf.lr,
# d_model=model_conf.encoder_conf.output_size,
# warmup_steps=scheduler_conf.warmup_steps,
# verbose=False)
# else:
# raise ValueError(f"Not support scheduler: {scheduler_type}")
...
...
@@ -356,6 +365,9 @@ class U2Trainer(Trainer):
"learning_rate"
:
lr_scheduler
if
lr_scheduler
else
optim_conf
.
lr
,
"parameters"
:
parameters
,
"epsilon"
:
1e-9
if
optim_type
==
'noam'
else
None
,
"beta1"
:
0.9
if
optim_type
==
'noam'
else
None
,
"beat2"
:
0.98
if
optim_type
==
'noam'
else
None
,
}
optimzer_args
=
optimizer_args
(
config
,
model
.
parameters
(),
lr_scheduler
)
...
...
deepspeech/training/optimizer.py
浏览文件 @
3912c255
...
...
@@ -20,6 +20,7 @@ from paddle.regularizer import L2Decay
from
deepspeech.training.gradclip
import
ClipGradByGlobalNormWithLog
from
deepspeech.utils.dynamic_import
import
dynamic_import
from
deepspeech.utils.dynamic_import
import
filter_valid_args
from
deepspeech.utils.log
import
Log
__all__
=
[
"OptimizerFactory"
]
...
...
@@ -78,4 +79,6 @@ class OptimizerFactory():
f
"Optimizer:
{
module_class
.
__name__
}
{
args
[
'learning_rate'
]
}
"
)
args
.
update
({
"grad_clip"
:
grad_clip
,
"weight_decay"
:
weight_decay
})
args
=
filter_valid_args
(
args
)
return
module_class
(
**
args
)
deepspeech/utils/dynamic_import.py
浏览文件 @
3912c255
...
...
@@ -20,7 +20,7 @@ from deepspeech.utils.log import Log
logger
=
Log
(
__name__
).
getlog
()
__all__
=
[
"dynamic_import"
,
"instance_class"
]
__all__
=
[
"dynamic_import"
,
"instance_class"
,
"filter_valid_args"
]
def
dynamic_import
(
import_path
,
alias
=
dict
()):
...
...
@@ -43,8 +43,14 @@ def dynamic_import(import_path, alias=dict()):
return
getattr
(
m
,
objname
)
def
instance_class
(
module_class
,
args
:
Dict
[
Text
,
Any
]):
def
filter_valid_args
(
args
:
Dict
[
Text
,
Any
]):
# filter out `val` which is None
new_args
=
{
key
:
val
for
key
,
val
in
args
.
items
()
if
val
is
not
None
}
return
new_args
def
instance_class
(
module_class
,
args
:
Dict
[
Text
,
Any
]):
# filter out `val` which is None
new_args
=
filter_valid_args
(
args
)
logger
.
info
(
f
"Instance:
{
module_class
.
__name__
}
{
new_args
}
."
)
return
module_class
(
**
new_args
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录