Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
bbc442b8
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 1 年 前同步成功
通知
207
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
bbc442b8
编写于
7月 01, 2022
作者:
小湉湉
提交者:
GitHub
7月 01, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #2107 from yt605155624/rm_more_log
[cli]rm extra log
上级
0d91b497
cf846f9e
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
1 addition
and
11 deletion
+1
-11
paddlespeech/cli/executor.py
paddlespeech/cli/executor.py
+1
-1
paddlespeech/cli/tts/infer.py
paddlespeech/cli/tts/infer.py
+0
-6
paddlespeech/t2s/models/fastspeech2/fastspeech2.py
paddlespeech/t2s/models/fastspeech2/fastspeech2.py
+0
-4
未找到文件。
paddlespeech/cli/executor.py
浏览文件 @
bbc442b8
...
@@ -217,7 +217,7 @@ class BaseExecutor(ABC):
...
@@ -217,7 +217,7 @@ class BaseExecutor(ABC):
logging
.
getLogger
(
name
)
for
name
in
logging
.
root
.
manager
.
loggerDict
logging
.
getLogger
(
name
)
for
name
in
logging
.
root
.
manager
.
loggerDict
]
]
for
l
in
loggers
:
for
l
in
loggers
:
l
.
disabled
=
True
l
.
setLevel
(
logging
.
ERROR
)
def
show_rtf
(
self
,
info
:
Dict
[
str
,
List
[
float
]]):
def
show_rtf
(
self
,
info
:
Dict
[
str
,
List
[
float
]]):
"""
"""
...
...
paddlespeech/cli/tts/infer.py
浏览文件 @
bbc442b8
...
@@ -267,21 +267,18 @@ class TTSExecutor(BaseExecutor):
...
@@ -267,21 +267,18 @@ class TTSExecutor(BaseExecutor):
with
open
(
self
.
phones_dict
,
"r"
)
as
f
:
with
open
(
self
.
phones_dict
,
"r"
)
as
f
:
phn_id
=
[
line
.
strip
().
split
()
for
line
in
f
.
readlines
()]
phn_id
=
[
line
.
strip
().
split
()
for
line
in
f
.
readlines
()]
vocab_size
=
len
(
phn_id
)
vocab_size
=
len
(
phn_id
)
print
(
"vocab_size:"
,
vocab_size
)
tone_size
=
None
tone_size
=
None
if
self
.
tones_dict
:
if
self
.
tones_dict
:
with
open
(
self
.
tones_dict
,
"r"
)
as
f
:
with
open
(
self
.
tones_dict
,
"r"
)
as
f
:
tone_id
=
[
line
.
strip
().
split
()
for
line
in
f
.
readlines
()]
tone_id
=
[
line
.
strip
().
split
()
for
line
in
f
.
readlines
()]
tone_size
=
len
(
tone_id
)
tone_size
=
len
(
tone_id
)
print
(
"tone_size:"
,
tone_size
)
spk_num
=
None
spk_num
=
None
if
self
.
speaker_dict
:
if
self
.
speaker_dict
:
with
open
(
self
.
speaker_dict
,
'rt'
)
as
f
:
with
open
(
self
.
speaker_dict
,
'rt'
)
as
f
:
spk_id
=
[
line
.
strip
().
split
()
for
line
in
f
.
readlines
()]
spk_id
=
[
line
.
strip
().
split
()
for
line
in
f
.
readlines
()]
spk_num
=
len
(
spk_id
)
spk_num
=
len
(
spk_id
)
print
(
"spk_num:"
,
spk_num
)
# frontend
# frontend
if
lang
==
'zh'
:
if
lang
==
'zh'
:
...
@@ -291,7 +288,6 @@ class TTSExecutor(BaseExecutor):
...
@@ -291,7 +288,6 @@ class TTSExecutor(BaseExecutor):
elif
lang
==
'en'
:
elif
lang
==
'en'
:
self
.
frontend
=
English
(
phone_vocab_path
=
self
.
phones_dict
)
self
.
frontend
=
English
(
phone_vocab_path
=
self
.
phones_dict
)
print
(
"frontend done!"
)
# acoustic model
# acoustic model
odim
=
self
.
am_config
.
n_mels
odim
=
self
.
am_config
.
n_mels
...
@@ -324,7 +320,6 @@ class TTSExecutor(BaseExecutor):
...
@@ -324,7 +320,6 @@ class TTSExecutor(BaseExecutor):
am_normalizer
=
ZScore
(
am_mu
,
am_std
)
am_normalizer
=
ZScore
(
am_mu
,
am_std
)
self
.
am_inference
=
am_inference_class
(
am_normalizer
,
am
)
self
.
am_inference
=
am_inference_class
(
am_normalizer
,
am
)
self
.
am_inference
.
eval
()
self
.
am_inference
.
eval
()
print
(
"acoustic model done!"
)
# vocoder
# vocoder
# model: {model_name}_{dataset}
# model: {model_name}_{dataset}
...
@@ -347,7 +342,6 @@ class TTSExecutor(BaseExecutor):
...
@@ -347,7 +342,6 @@ class TTSExecutor(BaseExecutor):
voc_normalizer
=
ZScore
(
voc_mu
,
voc_std
)
voc_normalizer
=
ZScore
(
voc_mu
,
voc_std
)
self
.
voc_inference
=
voc_inference_class
(
voc_normalizer
,
voc
)
self
.
voc_inference
=
voc_inference_class
(
voc_normalizer
,
voc
)
self
.
voc_inference
.
eval
()
self
.
voc_inference
.
eval
()
print
(
"voc done!"
)
def
preprocess
(
self
,
input
:
Any
,
*
args
,
**
kwargs
):
def
preprocess
(
self
,
input
:
Any
,
*
args
,
**
kwargs
):
"""
"""
...
...
paddlespeech/t2s/models/fastspeech2/fastspeech2.py
浏览文件 @
bbc442b8
...
@@ -258,7 +258,6 @@ class FastSpeech2(nn.Layer):
...
@@ -258,7 +258,6 @@ class FastSpeech2(nn.Layer):
padding_idx
=
self
.
padding_idx
)
padding_idx
=
self
.
padding_idx
)
if
encoder_type
==
"transformer"
:
if
encoder_type
==
"transformer"
:
print
(
"encoder_type is transformer"
)
self
.
encoder
=
TransformerEncoder
(
self
.
encoder
=
TransformerEncoder
(
idim
=
idim
,
idim
=
idim
,
attention_dim
=
adim
,
attention_dim
=
adim
,
...
@@ -275,7 +274,6 @@ class FastSpeech2(nn.Layer):
...
@@ -275,7 +274,6 @@ class FastSpeech2(nn.Layer):
positionwise_layer_type
=
positionwise_layer_type
,
positionwise_layer_type
=
positionwise_layer_type
,
positionwise_conv_kernel_size
=
positionwise_conv_kernel_size
,
)
positionwise_conv_kernel_size
=
positionwise_conv_kernel_size
,
)
elif
encoder_type
==
"conformer"
:
elif
encoder_type
==
"conformer"
:
print
(
"encoder_type is conformer"
)
self
.
encoder
=
ConformerEncoder
(
self
.
encoder
=
ConformerEncoder
(
idim
=
idim
,
idim
=
idim
,
attention_dim
=
adim
,
attention_dim
=
adim
,
...
@@ -362,7 +360,6 @@ class FastSpeech2(nn.Layer):
...
@@ -362,7 +360,6 @@ class FastSpeech2(nn.Layer):
# NOTE: we use encoder as decoder
# NOTE: we use encoder as decoder
# because fastspeech's decoder is the same as encoder
# because fastspeech's decoder is the same as encoder
if
decoder_type
==
"transformer"
:
if
decoder_type
==
"transformer"
:
print
(
"decoder_type is transformer"
)
self
.
decoder
=
TransformerEncoder
(
self
.
decoder
=
TransformerEncoder
(
idim
=
0
,
idim
=
0
,
attention_dim
=
adim
,
attention_dim
=
adim
,
...
@@ -380,7 +377,6 @@ class FastSpeech2(nn.Layer):
...
@@ -380,7 +377,6 @@ class FastSpeech2(nn.Layer):
positionwise_layer_type
=
positionwise_layer_type
,
positionwise_layer_type
=
positionwise_layer_type
,
positionwise_conv_kernel_size
=
positionwise_conv_kernel_size
,
)
positionwise_conv_kernel_size
=
positionwise_conv_kernel_size
,
)
elif
decoder_type
==
"conformer"
:
elif
decoder_type
==
"conformer"
:
print
(
"decoder_type is conformer"
)
self
.
decoder
=
ConformerEncoder
(
self
.
decoder
=
ConformerEncoder
(
idim
=
0
,
idim
=
0
,
attention_dim
=
adim
,
attention_dim
=
adim
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录