Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
c0f7aac8
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 2 年 前同步成功
通知
210
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c0f7aac8
编写于
6月 30, 2021
作者:
H
Haoxin Ma
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
revise conf/*.yaml
上级
08b6213b
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
49 addition
and
15 deletion
+49
-15
deepspeech/utils/checkpoint.py
deepspeech/utils/checkpoint.py
+13
-15
examples/aishell/s0/conf/deepspeech2.yaml
examples/aishell/s0/conf/deepspeech2.yaml
+3
-0
examples/aishell/s1/conf/chunk_conformer.yaml
examples/aishell/s1/conf/chunk_conformer.yaml
+3
-0
examples/aishell/s1/conf/conformer.yaml
examples/aishell/s1/conf/conformer.yaml
+3
-0
examples/librispeech/s0/conf/deepspeech2.yaml
examples/librispeech/s0/conf/deepspeech2.yaml
+3
-0
examples/librispeech/s1/conf/chunk_confermer.yaml
examples/librispeech/s1/conf/chunk_confermer.yaml
+3
-0
examples/librispeech/s1/conf/chunk_transformer.yaml
examples/librispeech/s1/conf/chunk_transformer.yaml
+3
-0
examples/librispeech/s1/conf/conformer.yaml
examples/librispeech/s1/conf/conformer.yaml
+3
-0
examples/librispeech/s1/conf/transformer.yaml
examples/librispeech/s1/conf/transformer.yaml
+3
-0
examples/tiny/s1/conf/chunk_confermer.yaml
examples/tiny/s1/conf/chunk_confermer.yaml
+3
-0
examples/tiny/s1/conf/chunk_transformer.yaml
examples/tiny/s1/conf/chunk_transformer.yaml
+3
-0
examples/tiny/s1/conf/conformer.yaml
examples/tiny/s1/conf/conformer.yaml
+3
-0
examples/tiny/s1/conf/transformer.yaml
examples/tiny/s1/conf/transformer.yaml
+3
-0
未找到文件。
deepspeech/utils/checkpoint.py
浏览文件 @
c0f7aac8
...
@@ -24,7 +24,6 @@ from paddle.optimizer import Optimizer
...
@@ -24,7 +24,6 @@ from paddle.optimizer import Optimizer
from
deepspeech.utils
import
mp_tools
from
deepspeech.utils
import
mp_tools
from
deepspeech.utils.log
import
Log
from
deepspeech.utils.log
import
Log
# import operator
logger
=
Log
(
__name__
).
getlog
()
logger
=
Log
(
__name__
).
getlog
()
...
@@ -38,7 +37,7 @@ class Checkpoint(object):
...
@@ -38,7 +37,7 @@ class Checkpoint(object):
self
.
kbest_n
=
kbest_n
self
.
kbest_n
=
kbest_n
self
.
latest_n
=
latest_n
self
.
latest_n
=
latest_n
self
.
_save_all
=
(
kbest_n
==
-
1
)
self
.
_save_all
=
(
kbest_n
==
-
1
)
def
add_checkpoint
(
self
,
def
add_checkpoint
(
self
,
checkpoint_dir
,
checkpoint_dir
,
tag_or_iteration
,
tag_or_iteration
,
...
@@ -64,10 +63,10 @@ class Checkpoint(object):
...
@@ -64,10 +63,10 @@ class Checkpoint(object):
self
.
_save_checkpoint_record
(
checkpoint_dir
,
tag_or_iteration
)
self
.
_save_checkpoint_record
(
checkpoint_dir
,
tag_or_iteration
)
def
load_latest_parameters
(
self
,
def
load_latest_parameters
(
self
,
model
,
model
,
optimizer
=
None
,
optimizer
=
None
,
checkpoint_dir
=
None
,
checkpoint_dir
=
None
,
checkpoint_path
=
None
):
checkpoint_path
=
None
):
"""Load a last model checkpoint from disk.
"""Load a last model checkpoint from disk.
Args:
Args:
model (Layer): model to load parameters.
model (Layer): model to load parameters.
...
@@ -80,14 +79,14 @@ class Checkpoint(object):
...
@@ -80,14 +79,14 @@ class Checkpoint(object):
Returns:
Returns:
configs (dict): epoch or step, lr and other meta info should be saved.
configs (dict): epoch or step, lr and other meta info should be saved.
"""
"""
return
self
.
_load_parameters
(
model
,
optimizer
,
checkpoint_dir
,
checkpoint_path
,
return
self
.
_load_parameters
(
model
,
optimizer
,
checkpoint_dir
,
"checkpoint_latest"
)
checkpoint_path
,
"checkpoint_latest"
)
def
load_best_parameters
(
self
,
def
load_best_parameters
(
self
,
model
,
model
,
optimizer
=
None
,
optimizer
=
None
,
checkpoint_dir
=
None
,
checkpoint_dir
=
None
,
checkpoint_path
=
None
):
checkpoint_path
=
None
):
"""Load a last model checkpoint from disk.
"""Load a last model checkpoint from disk.
Args:
Args:
model (Layer): model to load parameters.
model (Layer): model to load parameters.
...
@@ -100,8 +99,8 @@ class Checkpoint(object):
...
@@ -100,8 +99,8 @@ class Checkpoint(object):
Returns:
Returns:
configs (dict): epoch or step, lr and other meta info should be saved.
configs (dict): epoch or step, lr and other meta info should be saved.
"""
"""
return
self
.
_load_parameters
(
model
,
optimizer
,
checkpoint_dir
,
checkpoint_path
,
return
self
.
_load_parameters
(
model
,
optimizer
,
checkpoint_dir
,
"checkpoint_best"
)
checkpoint_path
,
"checkpoint_best"
)
def
_should_save_best
(
self
,
metric
:
float
)
->
bool
:
def
_should_save_best
(
self
,
metric
:
float
)
->
bool
:
if
not
self
.
_best_full
():
if
not
self
.
_best_full
():
...
@@ -248,7 +247,6 @@ class Checkpoint(object):
...
@@ -248,7 +247,6 @@ class Checkpoint(object):
configs
=
json
.
load
(
fin
)
configs
=
json
.
load
(
fin
)
return
configs
return
configs
@
mp_tools
.
rank_zero_only
@
mp_tools
.
rank_zero_only
def
_save_parameters
(
self
,
def
_save_parameters
(
self
,
checkpoint_dir
:
str
,
checkpoint_dir
:
str
,
...
...
examples/aishell/s0/conf/deepspeech2.yaml
浏览文件 @
c0f7aac8
...
@@ -48,6 +48,9 @@ training:
...
@@ -48,6 +48,9 @@ training:
weight_decay
:
1e-06
weight_decay
:
1e-06
global_grad_clip
:
3.0
global_grad_clip
:
3.0
log_interval
:
100
log_interval
:
100
checkpoint
:
kbest_n
:
50
latest_n
:
5
decoding
:
decoding
:
batch_size
:
128
batch_size
:
128
...
...
examples/aishell/s1/conf/chunk_conformer.yaml
浏览文件 @
c0f7aac8
...
@@ -90,6 +90,9 @@ training:
...
@@ -90,6 +90,9 @@ training:
warmup_steps
:
25000
warmup_steps
:
25000
lr_decay
:
1.0
lr_decay
:
1.0
log_interval
:
100
log_interval
:
100
checkpoint
:
kbest_n
:
50
latest_n
:
5
decoding
:
decoding
:
...
...
examples/aishell/s1/conf/conformer.yaml
浏览文件 @
c0f7aac8
...
@@ -88,6 +88,9 @@ training:
...
@@ -88,6 +88,9 @@ training:
warmup_steps
:
25000
warmup_steps
:
25000
lr_decay
:
1.0
lr_decay
:
1.0
log_interval
:
100
log_interval
:
100
checkpoint
:
kbest_n
:
50
latest_n
:
5
decoding
:
decoding
:
...
...
examples/librispeech/s0/conf/deepspeech2.yaml
浏览文件 @
c0f7aac8
...
@@ -43,6 +43,9 @@ training:
...
@@ -43,6 +43,9 @@ training:
weight_decay
:
1e-06
weight_decay
:
1e-06
global_grad_clip
:
5.0
global_grad_clip
:
5.0
log_interval
:
100
log_interval
:
100
checkpoint
:
kbest_n
:
50
latest_n
:
5
decoding
:
decoding
:
batch_size
:
128
batch_size
:
128
...
...
examples/librispeech/s1/conf/chunk_confermer.yaml
浏览文件 @
c0f7aac8
...
@@ -91,6 +91,9 @@ training:
...
@@ -91,6 +91,9 @@ training:
warmup_steps
:
25000
warmup_steps
:
25000
lr_decay
:
1.0
lr_decay
:
1.0
log_interval
:
100
log_interval
:
100
checkpoint
:
kbest_n
:
50
latest_n
:
5
decoding
:
decoding
:
...
...
examples/librispeech/s1/conf/chunk_transformer.yaml
浏览文件 @
c0f7aac8
...
@@ -84,6 +84,9 @@ training:
...
@@ -84,6 +84,9 @@ training:
warmup_steps
:
25000
warmup_steps
:
25000
lr_decay
:
1.0
lr_decay
:
1.0
log_interval
:
100
log_interval
:
100
checkpoint
:
kbest_n
:
50
latest_n
:
5
decoding
:
decoding
:
...
...
examples/librispeech/s1/conf/conformer.yaml
浏览文件 @
c0f7aac8
...
@@ -87,6 +87,9 @@ training:
...
@@ -87,6 +87,9 @@ training:
warmup_steps
:
25000
warmup_steps
:
25000
lr_decay
:
1.0
lr_decay
:
1.0
log_interval
:
100
log_interval
:
100
checkpoint
:
kbest_n
:
50
latest_n
:
5
decoding
:
decoding
:
...
...
examples/librispeech/s1/conf/transformer.yaml
浏览文件 @
c0f7aac8
...
@@ -82,6 +82,9 @@ training:
...
@@ -82,6 +82,9 @@ training:
warmup_steps
:
25000
warmup_steps
:
25000
lr_decay
:
1.0
lr_decay
:
1.0
log_interval
:
100
log_interval
:
100
checkpoint
:
kbest_n
:
50
latest_n
:
5
decoding
:
decoding
:
...
...
examples/tiny/s1/conf/chunk_confermer.yaml
浏览文件 @
c0f7aac8
...
@@ -91,6 +91,9 @@ training:
...
@@ -91,6 +91,9 @@ training:
warmup_steps
:
25000
warmup_steps
:
25000
lr_decay
:
1.0
lr_decay
:
1.0
log_interval
:
1
log_interval
:
1
checkpoint
:
kbest_n
:
10
latest_n
:
1
decoding
:
decoding
:
...
...
examples/tiny/s1/conf/chunk_transformer.yaml
浏览文件 @
c0f7aac8
...
@@ -84,6 +84,9 @@ training:
...
@@ -84,6 +84,9 @@ training:
warmup_steps
:
25000
warmup_steps
:
25000
lr_decay
:
1.0
lr_decay
:
1.0
log_interval
:
1
log_interval
:
1
checkpoint
:
kbest_n
:
10
latest_n
:
1
decoding
:
decoding
:
...
...
examples/tiny/s1/conf/conformer.yaml
浏览文件 @
c0f7aac8
...
@@ -87,6 +87,9 @@ training:
...
@@ -87,6 +87,9 @@ training:
warmup_steps
:
25000
warmup_steps
:
25000
lr_decay
:
1.0
lr_decay
:
1.0
log_interval
:
1
log_interval
:
1
checkpoint
:
kbest_n
:
10
latest_n
:
1
decoding
:
decoding
:
...
...
examples/tiny/s1/conf/transformer.yaml
浏览文件 @
c0f7aac8
...
@@ -84,6 +84,9 @@ training:
...
@@ -84,6 +84,9 @@ training:
warmup_steps
:
25000
warmup_steps
:
25000
lr_decay
:
1.0
lr_decay
:
1.0
log_interval
:
1
log_interval
:
1
checkpoint
:
kbest_n
:
10
latest_n
:
1
decoding
:
decoding
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录