Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
c7d9b115
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 2 年 前同步成功
通知
210
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c7d9b115
编写于
4月 21, 2022
作者:
H
Hui Zhang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
format
上级
caf72258
变更
18
显示空白变更内容
内联
并排
Showing
18 changed file
with
910 addition
and
823 deletion
+910
-823
.flake8
.flake8
+2
-0
paddlespeech/cli/asr/infer.py
paddlespeech/cli/asr/infer.py
+4
-2
paddlespeech/s2t/models/u2/u2.py
paddlespeech/s2t/models/u2/u2.py
+1
-1
paddlespeech/s2t/modules/ctc.py
paddlespeech/s2t/modules/ctc.py
+1
-1
paddlespeech/server/README.md
paddlespeech/server/README.md
+1
-1
paddlespeech/server/README_cn.md
paddlespeech/server/README_cn.md
+1
-1
paddlespeech/server/bin/paddlespeech_client.py
paddlespeech/server/bin/paddlespeech_client.py
+1
-0
paddlespeech/server/engine/asr/online/ctc_search.py
paddlespeech/server/engine/asr/online/ctc_search.py
+2
-0
paddlespeech/server/tests/asr/online/websocket_client.py
paddlespeech/server/tests/asr/online/websocket_client.py
+2
-2
paddlespeech/t2s/exps/synthesize.py
paddlespeech/t2s/exps/synthesize.py
+1
-1
paddlespeech/vector/cluster/diarization.py
paddlespeech/vector/cluster/diarization.py
+1
-1
speechx/examples/ngram/zh/local/text_to_lexicon.py
speechx/examples/ngram/zh/local/text_to_lexicon.py
+6
-10
speechx/examples/text_lm/local/mmseg.py
speechx/examples/text_lm/local/mmseg.py
+325
-313
speechx/examples/wfst/README.md
speechx/examples/wfst/README.md
+1
-1
utils/DER.py
utils/DER.py
+1
-1
utils/compute-wer.py
utils/compute-wer.py
+509
-455
utils/format_rsl.py
utils/format_rsl.py
+46
-31
utils/fst/prepare_dict.py
utils/fst/prepare_dict.py
+5
-2
未找到文件。
.flake8
浏览文件 @
c7d9b115
...
...
@@ -12,6 +12,8 @@ exclude =
.git,
# python cache
__pycache__,
# third party
utils/compute-wer.py,
third_party/,
# Provide a comma-separate list of glob patterns to include for checks.
filename =
...
...
paddlespeech/cli/asr/infer.py
浏览文件 @
c7d9b115
...
...
@@ -40,6 +40,7 @@ from paddlespeech.s2t.utils.utility import UpdateConfig
__all__
=
[
'ASRExecutor'
]
@
cli_register
(
name
=
'paddlespeech.asr'
,
description
=
'Speech to text infer command.'
)
class
ASRExecutor
(
BaseExecutor
):
...
...
@@ -278,7 +279,8 @@ class ASRExecutor(BaseExecutor):
self
.
_outputs
[
"result"
]
=
result_transcripts
[
0
]
elif
"conformer"
in
model_type
or
"transformer"
in
model_type
:
logger
.
info
(
f
"we will use the transformer like model :
{
model_type
}
"
)
logger
.
info
(
f
"we will use the transformer like model :
{
model_type
}
"
)
try
:
result_transcripts
=
self
.
model
.
decode
(
audio
,
...
...
paddlespeech/s2t/models/u2/u2.py
浏览文件 @
c7d9b115
paddlespeech/s2t/modules/ctc.py
浏览文件 @
c7d9b115
paddlespeech/server/README.md
浏览文件 @
c7d9b115
paddlespeech/server/README_cn.md
浏览文件 @
c7d9b115
paddlespeech/server/bin/paddlespeech_client.py
浏览文件 @
c7d9b115
...
...
@@ -305,6 +305,7 @@ class ASRClientExecutor(BaseExecutor):
return
res
[
'asr_results'
]
@
cli_client_register
(
name
=
'paddlespeech_client.cls'
,
description
=
'visit cls service'
)
class
CLSClientExecutor
(
BaseExecutor
):
...
...
paddlespeech/server/engine/asr/online/ctc_search.py
浏览文件 @
c7d9b115
...
...
@@ -12,7 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
collections
import
defaultdict
import
paddle
from
paddlespeech.cli.log
import
logger
from
paddlespeech.s2t.utils.utility
import
log_add
...
...
paddlespeech/server/tests/asr/online/websocket_client.py
浏览文件 @
c7d9b115
...
...
@@ -36,7 +36,7 @@ class ASRAudioHandler:
x_len
=
len
(
samples
)
chunk_size
=
85
*
16
#80ms, sample_rate = 16kHz
if
x_len
%
chunk_size
!=
0
:
if
x_len
%
chunk_size
!=
0
:
padding_len_x
=
chunk_size
-
x_len
%
chunk_size
else
:
padding_len_x
=
0
...
...
paddlespeech/t2s/exps/synthesize.py
浏览文件 @
c7d9b115
paddlespeech/vector/cluster/diarization.py
浏览文件 @
c7d9b115
...
...
@@ -20,11 +20,11 @@ A few sklearn functions are modified in this script as per requirement.
import
argparse
import
copy
import
warnings
from
distutils.util
import
strtobool
import
numpy
as
np
import
scipy
import
sklearn
from
distutils.util
import
strtobool
from
scipy
import
linalg
from
scipy
import
sparse
from
scipy.sparse.csgraph
import
connected_components
...
...
speechx/examples/ngram/zh/local/text_to_lexicon.py
浏览文件 @
c7d9b115
...
...
@@ -2,6 +2,7 @@
import
argparse
from
collections
import
Counter
def
main
(
args
):
counter
=
Counter
()
with
open
(
args
.
text
,
'r'
)
as
fin
,
open
(
args
.
lexicon
,
'w'
)
as
fout
:
...
...
@@ -20,21 +21,16 @@ def main(args):
fout
.
write
(
f
"
{
word
}
\t
{
val
}
\n
"
)
fout
.
flush
()
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
(
description
=
'text(line:utt1 中国 人) to lexicon(line:中国 中 国).'
)
parser
.
add_argument
(
'--has_key'
,
default
=
True
,
help
=
'text path, with utt or not'
)
'--has_key'
,
default
=
True
,
help
=
'text path, with utt or not'
)
parser
.
add_argument
(
'--text'
,
required
=
True
,
help
=
'text path. line: utt1 中国 人 or 中国 人'
)
'--text'
,
required
=
True
,
help
=
'text path. line: utt1 中国 人 or 中国 人'
)
parser
.
add_argument
(
'--lexicon'
,
required
=
True
,
help
=
'lexicon path. line:中国 中 国'
)
'--lexicon'
,
required
=
True
,
help
=
'lexicon path. line:中国 中 国'
)
args
=
parser
.
parse_args
()
print
(
args
)
...
...
speechx/examples/text_lm/local/mmseg.py
浏览文件 @
c7d9b115
#!/usr/bin/env python3
# modify from https://sites.google.com/site/homepageoffuyanwei/Home/remarksandexcellentdiscussion/page-2
class
Word
:
def
__init__
(
self
,
text
=
''
,
freq
=
0
):
def
__init__
(
self
,
text
=
''
,
freq
=
0
):
self
.
text
=
text
self
.
freq
=
freq
self
.
length
=
len
(
text
)
class
Chunk
:
def
__init__
(
self
,
w1
,
w2
=
None
,
w3
=
None
):
def
__init__
(
self
,
w1
,
w2
=
None
,
w3
=
None
):
self
.
words
=
[]
self
.
words
.
append
(
w1
)
if
w2
:
...
...
@@ -44,8 +45,8 @@ class Chunk:
sum
+=
word
.
freq
return
sum
class
ComplexCompare
:
class
ComplexCompare
:
def
takeHightest
(
self
,
chunks
,
comparator
):
i
=
1
for
j
in
range
(
1
,
len
(
chunks
)):
...
...
@@ -59,23 +60,27 @@ class ComplexCompare:
#以下四个函数是mmseg算法的四种过滤原则,核心算法
def
mmFilter
(
self
,
chunks
):
def
comparator
(
a
,
b
):
def
comparator
(
a
,
b
):
return
a
.
totalWordLength
()
-
b
.
totalWordLength
()
return
self
.
takeHightest
(
chunks
,
comparator
)
def
lawlFilter
(
self
,
chunks
):
def
comparator
(
a
,
b
):
def
lawlFilter
(
self
,
chunks
):
def
comparator
(
a
,
b
):
return
a
.
averageWordLength
()
-
b
.
averageWordLength
()
return
self
.
takeHightest
(
chunks
,
comparator
)
def
svmlFilter
(
self
,
chunks
):
def
comparator
(
a
,
b
):
return
self
.
takeHightest
(
chunks
,
comparator
)
def
svmlFilter
(
self
,
chunks
):
def
comparator
(
a
,
b
):
return
b
.
standardDeviation
()
-
a
.
standardDeviation
()
return
self
.
takeHightest
(
chunks
,
comparator
)
def
logFreqFilter
(
self
,
chunks
):
def
comparator
(
a
,
b
):
def
logFreqFilter
(
self
,
chunks
):
def
comparator
(
a
,
b
):
return
a
.
wordFrequency
()
-
b
.
wordFrequency
()
return
self
.
takeHightest
(
chunks
,
comparator
)
...
...
@@ -83,6 +88,7 @@ class ComplexCompare:
dictWord
=
{}
maxWordLength
=
0
def
loadDictChars
(
filepath
):
global
maxWordLength
fsock
=
open
(
filepath
)
...
...
@@ -90,18 +96,22 @@ def loadDictChars(filepath):
freq
,
word
=
line
.
split
()
word
=
word
.
strip
()
dictWord
[
word
]
=
(
len
(
word
),
int
(
freq
))
maxWordLength
=
len
(
word
)
if
maxWordLength
<
len
(
word
)
else
maxWordLength
maxWordLength
=
len
(
word
)
if
maxWordLength
<
len
(
word
)
else
maxWordLength
fsock
.
close
()
def
loadDictWords
(
filepath
):
global
maxWordLength
fsock
=
open
(
filepath
)
for
line
in
fsock
.
readlines
():
word
=
line
.
strip
()
dictWord
[
word
]
=
(
len
(
word
),
0
)
maxWordLength
=
len
(
word
)
if
maxWordLength
<
len
(
word
)
else
maxWordLength
maxWordLength
=
len
(
word
)
if
maxWordLength
<
len
(
word
)
else
maxWordLength
fsock
.
close
()
#判断该词word是否在字典dictWord中
def
getDictWord
(
word
):
result
=
dictWord
.
get
(
word
)
...
...
@@ -109,14 +119,15 @@ def getDictWord(word):
return
Word
(
word
,
result
[
1
])
return
None
#开始加载字典
def
run
():
from
os.path
import
join
,
dirname
loadDictChars
(
join
(
dirname
(
__file__
),
'data'
,
'chars.dic'
))
loadDictWords
(
join
(
dirname
(
__file__
),
'data'
,
'words.dic'
))
class
Analysis
:
class
Analysis
:
def
__init__
(
self
,
text
):
self
.
text
=
text
self
.
cacheSize
=
3
...
...
@@ -134,11 +145,10 @@ class Analysis:
if
not
dictWord
:
run
()
def
__iter__
(
self
):
while
True
:
token
=
self
.
getNextToken
()
if
token
==
None
:
if
token
is
None
:
raise
StopIteration
yield
token
...
...
@@ -146,7 +156,7 @@ class Analysis:
return
self
.
text
[
self
.
pos
]
#判断该字符是否是中文字符(不包括中文标点)
def
isChineseChar
(
self
,
charater
):
def
isChineseChar
(
self
,
charater
):
return
0x4e00
<=
ord
(
charater
)
<
0x9fa6
#判断是否是ASCII码
...
...
@@ -163,8 +173,8 @@ class Analysis:
while
self
.
pos
<
self
.
textLength
:
if
self
.
isChineseChar
(
self
.
getNextChar
()):
token
=
self
.
getChineseWords
()
else
:
token
=
self
.
getASCIIWords
()
+
'/'
else
:
token
=
self
.
getASCIIWords
()
+
'/'
if
len
(
token
)
>
0
:
return
token
return
None
...
...
@@ -211,7 +221,7 @@ class Analysis:
chunks
=
self
.
complexCompare
.
svmlFilter
(
chunks
)
if
len
(
chunks
)
>
1
:
chunks
=
self
.
complexCompare
.
logFreqFilter
(
chunks
)
if
len
(
chunks
)
==
0
:
if
len
(
chunks
)
==
0
:
return
''
#最后只有一种切割方法
...
...
@@ -242,13 +252,13 @@ class Analysis:
for
word3
in
words3
:
# print(word3.length, word3.text)
if
word3
.
length
==
-
1
:
chunk
=
Chunk
(
word1
,
word2
)
chunk
=
Chunk
(
word1
,
word2
)
# print("Ture")
else
:
chunk
=
Chunk
(
word1
,
word2
,
word3
)
else
:
chunk
=
Chunk
(
word1
,
word2
,
word3
)
chunks
.
append
(
chunk
)
elif
self
.
pos
==
self
.
textLength
:
chunks
.
append
(
Chunk
(
word1
,
word2
))
chunks
.
append
(
Chunk
(
word1
,
word2
))
self
.
pos
-=
len
(
word2
.
text
)
elif
self
.
pos
==
self
.
textLength
:
chunks
.
append
(
Chunk
(
word1
))
...
...
@@ -268,7 +278,7 @@ class Analysis:
words
=
[]
index
=
0
while
self
.
pos
<
self
.
textLength
:
if
index
>=
maxWordLength
:
if
index
>=
maxWordLength
:
break
if
not
self
.
isChineseChar
(
self
.
getNextChar
()):
break
...
...
@@ -288,18 +298,18 @@ class Analysis:
word
.
text
=
'X'
words
.
append
(
word
)
self
.
cache
[
self
.
cacheIndex
]
=
(
self
.
pos
,
words
)
self
.
cache
[
self
.
cacheIndex
]
=
(
self
.
pos
,
words
)
self
.
cacheIndex
+=
1
if
self
.
cacheIndex
>=
self
.
cacheSize
:
self
.
cacheIndex
=
0
return
words
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
def
cuttest
(
text
):
#cut = Analysis(text)
tmp
=
""
tmp
=
""
try
:
for
word
in
iter
(
Analysis
(
text
)):
tmp
+=
word
...
...
@@ -375,6 +385,8 @@ if __name__=="__main__":
cuttest
(
u
"好人使用了它就可以解决一些问题"
)
cuttest
(
u
"是因为和国家"
)
cuttest
(
u
"老年搜索还支持"
)
cuttest
(
u
"干脆就把那部蒙人的闲法给废了拉倒!RT @laoshipukong : 27日,全国人大常委会第三次审议侵权责任法草案,删除了有关医疗损害责任“举证倒置”的规定。在医患纠纷中本已处于弱势地位的消费者由此将陷入万劫不复的境地。 "
)
cuttest
(
u
"干脆就把那部蒙人的闲法给废了拉倒!RT @laoshipukong : 27日,全国人大常委会第三次审议侵权责任法草案,删除了有关医疗损害责任“举证倒置”的规定。在医患纠纷中本已处于弱势地位的消费者由此将陷入万劫不复的境地。 "
)
cuttest
(
"2022年12月30日是星期几?"
)
cuttest
(
"二零二二年十二月三十日是星期几?"
)
speechx/examples/wfst/README.md
浏览文件 @
c7d9b115
utils/DER.py
浏览文件 @
c7d9b115
...
...
@@ -26,9 +26,9 @@ import argparse
import
os
import
re
import
subprocess
from
distutils.util
import
strtobool
import
numpy
as
np
from
distutils.util
import
strtobool
FILE_IDS
=
re
.
compile
(
r
"(?<=Speaker Diarization for).+(?=\*\*\*)"
)
SCORED_SPEAKER_TIME
=
re
.
compile
(
r
"(?<=SCORED SPEAKER TIME =)[\d.]+"
)
...
...
utils/compute-wer.py
浏览文件 @
c7d9b115
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# CopyRight WeNet Apache-2.0 License
import
re
,
sys
,
unicodedata
import
codecs
import
re
import
sys
import
unicodedata
remove_tag
=
True
spacelist
=
[
' '
,
'
\t
'
,
'
\r
'
,
'
\n
'
]
puncts
=
[
'!'
,
','
,
'?'
,
'、'
,
'。'
,
'!'
,
','
,
';'
,
'?'
,
':'
,
'「'
,
'」'
,
'︰'
,
'『'
,
'』'
,
'《'
,
'》'
]
spacelist
=
[
' '
,
'
\t
'
,
'
\r
'
,
'
\n
'
]
puncts
=
[
'!'
,
','
,
'?'
,
'、'
,
'。'
,
'!'
,
','
,
';'
,
'?'
,
':'
,
'「'
,
'」'
,
'︰'
,
'『'
,
'』'
,
'《'
,
'》'
]
def
characterize
(
string
)
:
def
characterize
(
string
):
res
=
[]
i
=
0
while
i
<
len
(
string
):
...
...
@@ -31,10 +34,10 @@ def characterize(string) :
# some input looks like: <unk><noise>, we want to separate it to two words.
sep
=
' '
if
char
==
'<'
:
sep
=
'>'
j
=
i
+
1
j
=
i
+
1
while
j
<
len
(
string
):
c
=
string
[
j
]
if
ord
(
c
)
>=
128
or
(
c
in
spacelist
)
or
(
c
==
sep
):
if
ord
(
c
)
>=
128
or
(
c
in
spacelist
)
or
(
c
==
sep
):
break
j
+=
1
if
j
<
len
(
string
)
and
string
[
j
]
==
'>'
:
...
...
@@ -43,10 +46,12 @@ def characterize(string) :
i
=
j
return
res
def
stripoff_tags
(
x
):
if
not
x
:
return
''
chars
=
[]
i
=
0
;
T
=
len
(
x
)
i
=
0
T
=
len
(
x
)
while
i
<
T
:
if
x
[
i
]
==
'<'
:
while
i
<
T
and
x
[
i
]
!=
'>'
:
...
...
@@ -78,8 +83,9 @@ def normalize(sentence, ignore_words, cs, split=None):
new_sentence
.
append
(
x
)
return
new_sentence
class
Calculator
:
def
__init__
(
self
)
:
class
Calculator
:
def
__init__
(
self
):
self
.
data
=
{}
self
.
space
=
[]
self
.
cost
=
{}
...
...
@@ -87,66 +93,87 @@ class Calculator :
self
.
cost
[
'sub'
]
=
1
self
.
cost
[
'del'
]
=
1
self
.
cost
[
'ins'
]
=
1
def
calculate
(
self
,
lab
,
rec
)
:
def
calculate
(
self
,
lab
,
rec
):
# Initialization
lab
.
insert
(
0
,
''
)
rec
.
insert
(
0
,
''
)
while
len
(
self
.
space
)
<
len
(
lab
)
:
while
len
(
self
.
space
)
<
len
(
lab
)
:
self
.
space
.
append
([])
for
row
in
self
.
space
:
for
element
in
row
:
for
row
in
self
.
space
:
for
element
in
row
:
element
[
'dist'
]
=
0
element
[
'error'
]
=
'non'
while
len
(
row
)
<
len
(
rec
)
:
row
.
append
({
'dist'
:
0
,
'error'
:
'non'
})
for
i
in
range
(
len
(
lab
))
:
while
len
(
row
)
<
len
(
rec
)
:
row
.
append
({
'dist'
:
0
,
'error'
:
'non'
})
for
i
in
range
(
len
(
lab
))
:
self
.
space
[
i
][
0
][
'dist'
]
=
i
self
.
space
[
i
][
0
][
'error'
]
=
'del'
for
j
in
range
(
len
(
rec
))
:
for
j
in
range
(
len
(
rec
))
:
self
.
space
[
0
][
j
][
'dist'
]
=
j
self
.
space
[
0
][
j
][
'error'
]
=
'ins'
self
.
space
[
0
][
0
][
'error'
]
=
'non'
for
token
in
lab
:
if
token
not
in
self
.
data
and
len
(
token
)
>
0
:
self
.
data
[
token
]
=
{
'all'
:
0
,
'cor'
:
0
,
'sub'
:
0
,
'ins'
:
0
,
'del'
:
0
}
for
token
in
rec
:
if
token
not
in
self
.
data
and
len
(
token
)
>
0
:
self
.
data
[
token
]
=
{
'all'
:
0
,
'cor'
:
0
,
'sub'
:
0
,
'ins'
:
0
,
'del'
:
0
}
for
token
in
lab
:
if
token
not
in
self
.
data
and
len
(
token
)
>
0
:
self
.
data
[
token
]
=
{
'all'
:
0
,
'cor'
:
0
,
'sub'
:
0
,
'ins'
:
0
,
'del'
:
0
}
for
token
in
rec
:
if
token
not
in
self
.
data
and
len
(
token
)
>
0
:
self
.
data
[
token
]
=
{
'all'
:
0
,
'cor'
:
0
,
'sub'
:
0
,
'ins'
:
0
,
'del'
:
0
}
# Computing edit distance
for
i
,
lab_token
in
enumerate
(
lab
)
:
for
j
,
rec_token
in
enumerate
(
rec
)
:
if
i
==
0
or
j
==
0
:
for
i
,
lab_token
in
enumerate
(
lab
)
:
for
j
,
rec_token
in
enumerate
(
rec
)
:
if
i
==
0
or
j
==
0
:
continue
min_dist
=
sys
.
maxsize
min_error
=
'none'
dist
=
self
.
space
[
i
-
1
][
j
][
'dist'
]
+
self
.
cost
[
'del'
]
dist
=
self
.
space
[
i
-
1
][
j
][
'dist'
]
+
self
.
cost
[
'del'
]
error
=
'del'
if
dist
<
min_dist
:
if
dist
<
min_dist
:
min_dist
=
dist
min_error
=
error
dist
=
self
.
space
[
i
][
j
-
1
][
'dist'
]
+
self
.
cost
[
'ins'
]
dist
=
self
.
space
[
i
][
j
-
1
][
'dist'
]
+
self
.
cost
[
'ins'
]
error
=
'ins'
if
dist
<
min_dist
:
if
dist
<
min_dist
:
min_dist
=
dist
min_error
=
error
if
lab_token
==
rec_token
:
dist
=
self
.
space
[
i
-
1
][
j
-
1
][
'dist'
]
+
self
.
cost
[
'cor'
]
if
lab_token
==
rec_token
:
dist
=
self
.
space
[
i
-
1
][
j
-
1
][
'dist'
]
+
self
.
cost
[
'cor'
]
error
=
'cor'
else
:
dist
=
self
.
space
[
i
-
1
][
j
-
1
][
'dist'
]
+
self
.
cost
[
'sub'
]
else
:
dist
=
self
.
space
[
i
-
1
][
j
-
1
][
'dist'
]
+
self
.
cost
[
'sub'
]
error
=
'sub'
if
dist
<
min_dist
:
if
dist
<
min_dist
:
min_dist
=
dist
min_error
=
error
self
.
space
[
i
][
j
][
'dist'
]
=
min_dist
self
.
space
[
i
][
j
][
'error'
]
=
min_error
# Tracing back
result
=
{
'lab'
:[],
'rec'
:[],
'all'
:
0
,
'cor'
:
0
,
'sub'
:
0
,
'ins'
:
0
,
'del'
:
0
}
result
=
{
'lab'
:
[],
'rec'
:
[],
'all'
:
0
,
'cor'
:
0
,
'sub'
:
0
,
'ins'
:
0
,
'del'
:
0
}
i
=
len
(
lab
)
-
1
j
=
len
(
rec
)
-
1
while
True
:
if
self
.
space
[
i
][
j
][
'error'
]
==
'cor'
:
# correct
if
len
(
lab
[
i
])
>
0
:
while
True
:
if
self
.
space
[
i
][
j
][
'error'
]
==
'cor'
:
# correct
if
len
(
lab
[
i
])
>
0
:
self
.
data
[
lab
[
i
]][
'all'
]
=
self
.
data
[
lab
[
i
]][
'all'
]
+
1
self
.
data
[
lab
[
i
]][
'cor'
]
=
self
.
data
[
lab
[
i
]][
'cor'
]
+
1
result
[
'all'
]
=
result
[
'all'
]
+
1
...
...
@@ -155,8 +182,8 @@ class Calculator :
result
[
'rec'
].
insert
(
0
,
rec
[
j
])
i
=
i
-
1
j
=
j
-
1
elif
self
.
space
[
i
][
j
][
'error'
]
==
'sub'
:
# substitution
if
len
(
lab
[
i
])
>
0
:
elif
self
.
space
[
i
][
j
][
'error'
]
==
'sub'
:
# substitution
if
len
(
lab
[
i
])
>
0
:
self
.
data
[
lab
[
i
]][
'all'
]
=
self
.
data
[
lab
[
i
]][
'all'
]
+
1
self
.
data
[
lab
[
i
]][
'sub'
]
=
self
.
data
[
lab
[
i
]][
'sub'
]
+
1
result
[
'all'
]
=
result
[
'all'
]
+
1
...
...
@@ -165,8 +192,8 @@ class Calculator :
result
[
'rec'
].
insert
(
0
,
rec
[
j
])
i
=
i
-
1
j
=
j
-
1
elif
self
.
space
[
i
][
j
][
'error'
]
==
'del'
:
# deletion
if
len
(
lab
[
i
])
>
0
:
elif
self
.
space
[
i
][
j
][
'error'
]
==
'del'
:
# deletion
if
len
(
lab
[
i
])
>
0
:
self
.
data
[
lab
[
i
]][
'all'
]
=
self
.
data
[
lab
[
i
]][
'all'
]
+
1
self
.
data
[
lab
[
i
]][
'del'
]
=
self
.
data
[
lab
[
i
]][
'del'
]
+
1
result
[
'all'
]
=
result
[
'all'
]
+
1
...
...
@@ -174,57 +201,64 @@ class Calculator :
result
[
'lab'
].
insert
(
0
,
lab
[
i
])
result
[
'rec'
].
insert
(
0
,
""
)
i
=
i
-
1
elif
self
.
space
[
i
][
j
][
'error'
]
==
'ins'
:
# insertion
if
len
(
rec
[
j
])
>
0
:
elif
self
.
space
[
i
][
j
][
'error'
]
==
'ins'
:
# insertion
if
len
(
rec
[
j
])
>
0
:
self
.
data
[
rec
[
j
]][
'ins'
]
=
self
.
data
[
rec
[
j
]][
'ins'
]
+
1
result
[
'ins'
]
=
result
[
'ins'
]
+
1
result
[
'lab'
].
insert
(
0
,
""
)
result
[
'rec'
].
insert
(
0
,
rec
[
j
])
j
=
j
-
1
elif
self
.
space
[
i
][
j
][
'error'
]
==
'non'
:
# starting point
elif
self
.
space
[
i
][
j
][
'error'
]
==
'non'
:
# starting point
break
else
:
# shouldn't reach here
print
(
'this should not happen , i = {i} , j = {j} , error = {error}'
.
format
(
i
=
i
,
j
=
j
,
error
=
self
.
space
[
i
][
j
][
'error'
]))
else
:
# shouldn't reach here
print
(
'this should not happen , i = {i} , j = {j} , error = {error}'
.
format
(
i
=
i
,
j
=
j
,
error
=
self
.
space
[
i
][
j
][
'error'
]))
return
result
def
overall
(
self
)
:
result
=
{
'all'
:
0
,
'cor'
:
0
,
'sub'
:
0
,
'ins'
:
0
,
'del'
:
0
}
for
token
in
self
.
data
:
def
overall
(
self
):
result
=
{
'all'
:
0
,
'cor'
:
0
,
'sub'
:
0
,
'ins'
:
0
,
'del'
:
0
}
for
token
in
self
.
data
:
result
[
'all'
]
=
result
[
'all'
]
+
self
.
data
[
token
][
'all'
]
result
[
'cor'
]
=
result
[
'cor'
]
+
self
.
data
[
token
][
'cor'
]
result
[
'sub'
]
=
result
[
'sub'
]
+
self
.
data
[
token
][
'sub'
]
result
[
'ins'
]
=
result
[
'ins'
]
+
self
.
data
[
token
][
'ins'
]
result
[
'del'
]
=
result
[
'del'
]
+
self
.
data
[
token
][
'del'
]
return
result
def
cluster
(
self
,
data
)
:
result
=
{
'all'
:
0
,
'cor'
:
0
,
'sub'
:
0
,
'ins'
:
0
,
'del'
:
0
}
for
token
in
data
:
if
token
in
self
.
data
:
def
cluster
(
self
,
data
):
result
=
{
'all'
:
0
,
'cor'
:
0
,
'sub'
:
0
,
'ins'
:
0
,
'del'
:
0
}
for
token
in
data
:
if
token
in
self
.
data
:
result
[
'all'
]
=
result
[
'all'
]
+
self
.
data
[
token
][
'all'
]
result
[
'cor'
]
=
result
[
'cor'
]
+
self
.
data
[
token
][
'cor'
]
result
[
'sub'
]
=
result
[
'sub'
]
+
self
.
data
[
token
][
'sub'
]
result
[
'ins'
]
=
result
[
'ins'
]
+
self
.
data
[
token
][
'ins'
]
result
[
'del'
]
=
result
[
'del'
]
+
self
.
data
[
token
][
'del'
]
return
result
def
keys
(
self
)
:
def
keys
(
self
):
return
list
(
self
.
data
.
keys
())
def
width
(
string
):
return
sum
(
1
+
(
unicodedata
.
east_asian_width
(
c
)
in
"AFW"
)
for
c
in
string
)
def
default_cluster
(
word
)
:
unicode_names
=
[
unicodedata
.
name
(
char
)
for
char
in
word
]
for
i
in
reversed
(
range
(
len
(
unicode_names
)))
:
if
unicode_names
[
i
].
startswith
(
'DIGIT'
)
:
# 1
def
default_cluster
(
word
):
unicode_names
=
[
unicodedata
.
name
(
char
)
for
char
in
word
]
for
i
in
reversed
(
range
(
len
(
unicode_names
))):
if
unicode_names
[
i
].
startswith
(
'DIGIT'
):
# 1
unicode_names
[
i
]
=
'Number'
# 'DIGIT'
elif
(
unicode_names
[
i
].
startswith
(
'CJK UNIFIED IDEOGRAPH'
)
or
unicode_names
[
i
].
startswith
(
'CJK COMPATIBILITY IDEOGRAPH'
))
:
unicode_names
[
i
].
startswith
(
'CJK COMPATIBILITY IDEOGRAPH'
))
:
# 明 / 郎
unicode_names
[
i
]
=
'Mandarin'
# 'CJK IDEOGRAPH'
elif
(
unicode_names
[
i
].
startswith
(
'LATIN CAPITAL LETTER'
)
or
unicode_names
[
i
].
startswith
(
'LATIN SMALL LETTER'
))
:
unicode_names
[
i
].
startswith
(
'LATIN SMALL LETTER'
))
:
# A / a
unicode_names
[
i
]
=
'English'
# 'LATIN LETTER'
elif
unicode_names
[
i
].
startswith
(
'HIRAGANA LETTER'
)
:
# は こ め
elif
unicode_names
[
i
].
startswith
(
'HIRAGANA LETTER'
)
:
# は こ め
unicode_names
[
i
]
=
'Japanese'
# 'GANA LETTER'
elif
(
unicode_names
[
i
].
startswith
(
'AMPERSAND'
)
or
unicode_names
[
i
].
startswith
(
'APOSTROPHE'
)
or
...
...
@@ -236,34 +270,40 @@ def default_cluster(word) :
unicode_names
[
i
].
startswith
(
'LOW LINE'
)
or
unicode_names
[
i
].
startswith
(
'NUMBER SIGN'
)
or
unicode_names
[
i
].
startswith
(
'PLUS SIGN'
)
or
unicode_names
[
i
].
startswith
(
'SEMICOLON'
))
:
unicode_names
[
i
].
startswith
(
'SEMICOLON'
))
:
# & / ' / @ / ℃ / = / . / - / _ / # / + / ;
del
unicode_names
[
i
]
else
:
else
:
return
'Other'
if
len
(
unicode_names
)
==
0
:
if
len
(
unicode_names
)
==
0
:
return
'Other'
if
len
(
unicode_names
)
==
1
:
if
len
(
unicode_names
)
==
1
:
return
unicode_names
[
0
]
for
i
in
range
(
len
(
unicode_names
)
-
1
)
:
if
unicode_names
[
i
]
!=
unicode_names
[
i
+
1
]
:
for
i
in
range
(
len
(
unicode_names
)
-
1
)
:
if
unicode_names
[
i
]
!=
unicode_names
[
i
+
1
]
:
return
'Other'
return
unicode_names
[
0
]
def
usage
()
:
print
(
"compute-wer.py : compute word error rate (WER) and align recognition results and references."
)
print
(
" usage : python compute-wer.py [--cs={0,1}] [--cluster=foo] [--ig=ignore_file] [--char={0,1}] [--v={0,1}] [--padding-symbol={space,underline}] test.ref test.hyp > test.wer"
)
def
usage
():
print
(
"compute-wer.py : compute word error rate (WER) and align recognition results and references."
)
print
(
" usage : python compute-wer.py [--cs={0,1}] [--cluster=foo] [--ig=ignore_file] [--char={0,1}] [--v={0,1}] [--padding-symbol={space,underline}] test.ref test.hyp > test.wer"
)
if
__name__
==
'__main__'
:
if
len
(
sys
.
argv
)
==
1
:
if
len
(
sys
.
argv
)
==
1
:
usage
()
sys
.
exit
(
0
)
calculator
=
Calculator
()
cluster_file
=
''
ignore_words
=
set
()
tochar
=
False
verbose
=
1
padding_symbol
=
' '
verbose
=
1
padding_symbol
=
' '
case_sensitive
=
False
max_words_per_line
=
sys
.
maxsize
split
=
None
...
...
@@ -322,9 +362,9 @@ if __name__ == '__main__':
if
sys
.
argv
[
1
].
startswith
(
a
):
b
=
sys
.
argv
[
1
][
len
(
a
):].
lower
()
del
sys
.
argv
[
1
]
verbose
=
0
verbose
=
0
try
:
verbose
=
int
(
b
)
verbose
=
int
(
b
)
except
:
if
b
==
'true'
or
b
!=
'0'
:
verbose
=
1
...
...
@@ -334,9 +374,9 @@ if __name__ == '__main__':
b
=
sys
.
argv
[
1
][
len
(
a
):].
lower
()
del
sys
.
argv
[
1
]
if
b
==
'space'
:
padding_symbol
=
' '
padding_symbol
=
' '
elif
b
==
'underline'
:
padding_symbol
=
'_'
padding_symbol
=
'_'
continue
if
True
or
sys
.
argv
[
1
].
startswith
(
'-'
):
#ignore invalid switch
...
...
@@ -344,7 +384,7 @@ if __name__ == '__main__':
continue
if
not
case_sensitive
:
ig
=
set
([
w
.
upper
()
for
w
in
ignore_words
])
ig
=
set
([
w
.
upper
()
for
w
in
ignore_words
])
ignore_words
=
ig
default_clusters
=
{}
...
...
@@ -368,17 +408,18 @@ if __name__ == '__main__':
array
=
characterize
(
line
)
else
:
array
=
line
.
strip
().
split
()
if
len
(
array
)
==
0
:
continue
if
len
(
array
)
==
0
:
continue
fid
=
array
[
0
]
rec_set
[
fid
]
=
normalize
(
array
[
1
:],
ignore_words
,
case_sensitive
,
split
)
rec_set
[
fid
]
=
normalize
(
array
[
1
:],
ignore_words
,
case_sensitive
,
split
)
# compute error rate on the interaction of reference file and hyp file
for
line
in
open
(
ref_file
,
'r'
,
encoding
=
'utf-8'
)
:
for
line
in
open
(
ref_file
,
'r'
,
encoding
=
'utf-8'
)
:
if
tochar
:
array
=
characterize
(
line
)
else
:
array
=
line
.
rstrip
(
'
\n
'
).
split
()
if
len
(
array
)
==
0
:
continue
if
len
(
array
)
==
0
:
continue
fid
=
array
[
0
]
if
fid
not
in
rec_set
:
continue
...
...
@@ -387,105 +428,116 @@ if __name__ == '__main__':
if
verbose
:
print
(
'
\n
utt: %s'
%
fid
)
for
word
in
rec
+
lab
:
if
word
not
in
default_words
:
for
word
in
rec
+
lab
:
if
word
not
in
default_words
:
default_cluster_name
=
default_cluster
(
word
)
if
default_cluster_name
not
in
default_clusters
:
if
default_cluster_name
not
in
default_clusters
:
default_clusters
[
default_cluster_name
]
=
{}
if
word
not
in
default_clusters
[
default_cluster_name
]
:
if
word
not
in
default_clusters
[
default_cluster_name
]
:
default_clusters
[
default_cluster_name
][
word
]
=
1
default_words
[
word
]
=
default_cluster_name
result
=
calculator
.
calculate
(
lab
,
rec
)
if
verbose
:
if
result
[
'all'
]
!=
0
:
wer
=
float
(
result
[
'ins'
]
+
result
[
'sub'
]
+
result
[
'del'
])
*
100.0
/
result
[
'all'
]
else
:
if
result
[
'all'
]
!=
0
:
wer
=
float
(
result
[
'ins'
]
+
result
[
'sub'
]
+
result
[
'del'
])
*
100.0
/
result
[
'all'
]
else
:
wer
=
0.0
print
(
'WER: %4.2f %%'
%
wer
,
end
=
' '
)
print
(
'WER: %4.2f %%'
%
wer
,
end
=
' '
)
print
(
'N=%d C=%d S=%d D=%d I=%d'
%
(
result
[
'all'
],
result
[
'cor'
],
result
[
'sub'
],
result
[
'del'
],
result
[
'ins'
]))
(
result
[
'all'
],
result
[
'cor'
],
result
[
'sub'
],
result
[
'del'
],
result
[
'ins'
]))
space
=
{}
space
[
'lab'
]
=
[]
space
[
'rec'
]
=
[]
for
idx
in
range
(
len
(
result
[
'lab'
]))
:
for
idx
in
range
(
len
(
result
[
'lab'
]))
:
len_lab
=
width
(
result
[
'lab'
][
idx
])
len_rec
=
width
(
result
[
'rec'
][
idx
])
length
=
max
(
len_lab
,
len_rec
)
space
[
'lab'
].
append
(
length
-
len_lab
)
space
[
'rec'
].
append
(
length
-
len_rec
)
space
[
'lab'
].
append
(
length
-
len_lab
)
space
[
'rec'
].
append
(
length
-
len_rec
)
upper_lab
=
len
(
result
[
'lab'
])
upper_rec
=
len
(
result
[
'rec'
])
lab1
,
rec1
=
0
,
0
while
lab1
<
upper_lab
or
rec1
<
upper_rec
:
if
verbose
>
1
:
print
(
'lab(%s):'
%
fid
.
encode
(
'utf-8'
),
end
=
' '
)
print
(
'lab(%s):'
%
fid
.
encode
(
'utf-8'
),
end
=
' '
)
else
:
print
(
'lab:'
,
end
=
' '
)
print
(
'lab:'
,
end
=
' '
)
lab2
=
min
(
upper_lab
,
lab1
+
max_words_per_line
)
for
idx
in
range
(
lab1
,
lab2
):
token
=
result
[
'lab'
][
idx
]
print
(
'{token}'
.
format
(
token
=
token
),
end
=
''
)
for
n
in
range
(
space
[
'lab'
][
idx
])
:
print
(
padding_symbol
,
end
=
''
)
print
(
' '
,
end
=
''
)
print
(
'{token}'
.
format
(
token
=
token
),
end
=
''
)
for
n
in
range
(
space
[
'lab'
][
idx
])
:
print
(
padding_symbol
,
end
=
''
)
print
(
' '
,
end
=
''
)
print
()
if
verbose
>
1
:
print
(
'rec(%s):'
%
fid
.
encode
(
'utf-8'
),
end
=
' '
)
print
(
'rec(%s):'
%
fid
.
encode
(
'utf-8'
),
end
=
' '
)
else
:
print
(
'rec:'
,
end
=
' '
)
print
(
'rec:'
,
end
=
' '
)
rec2
=
min
(
upper_rec
,
rec1
+
max_words_per_line
)
for
idx
in
range
(
rec1
,
rec2
):
token
=
result
[
'rec'
][
idx
]
print
(
'{token}'
.
format
(
token
=
token
),
end
=
''
)
for
n
in
range
(
space
[
'rec'
][
idx
])
:
print
(
padding_symbol
,
end
=
''
)
print
(
' '
,
end
=
''
)
print
(
'{token}'
.
format
(
token
=
token
),
end
=
''
)
for
n
in
range
(
space
[
'rec'
][
idx
])
:
print
(
padding_symbol
,
end
=
''
)
print
(
' '
,
end
=
''
)
print
(
'
\n
'
,
end
=
'
\n
'
)
lab1
=
lab2
rec1
=
rec2
if
verbose
:
print
(
'==========================================================================='
)
print
(
'==========================================================================='
)
print
()
result
=
calculator
.
overall
()
if
result
[
'all'
]
!=
0
:
wer
=
float
(
result
[
'ins'
]
+
result
[
'sub'
]
+
result
[
'del'
])
*
100.0
/
result
[
'all'
]
else
:
if
result
[
'all'
]
!=
0
:
wer
=
float
(
result
[
'ins'
]
+
result
[
'sub'
]
+
result
[
'del'
])
*
100.0
/
result
[
'all'
]
else
:
wer
=
0.0
print
(
'Overall -> %4.2f %%'
%
wer
,
end
=
' '
)
print
(
'Overall -> %4.2f %%'
%
wer
,
end
=
' '
)
print
(
'N=%d C=%d S=%d D=%d I=%d'
%
(
result
[
'all'
],
result
[
'cor'
],
result
[
'sub'
],
result
[
'del'
],
result
[
'ins'
]))
(
result
[
'all'
],
result
[
'cor'
],
result
[
'sub'
],
result
[
'del'
],
result
[
'ins'
]))
if
not
verbose
:
print
()
if
verbose
:
for
cluster_id
in
default_clusters
:
result
=
calculator
.
cluster
([
k
for
k
in
default_clusters
[
cluster_id
]
])
if
result
[
'all'
]
!=
0
:
wer
=
float
(
result
[
'ins'
]
+
result
[
'sub'
]
+
result
[
'del'
])
*
100.0
/
result
[
'all'
]
else
:
for
cluster_id
in
default_clusters
:
result
=
calculator
.
cluster
(
[
k
for
k
in
default_clusters
[
cluster_id
]])
if
result
[
'all'
]
!=
0
:
wer
=
float
(
result
[
'ins'
]
+
result
[
'sub'
]
+
result
[
'del'
])
*
100.0
/
result
[
'all'
]
else
:
wer
=
0.0
print
(
'%s -> %4.2f %%'
%
(
cluster_id
,
wer
),
end
=
' '
)
print
(
'%s -> %4.2f %%'
%
(
cluster_id
,
wer
),
end
=
' '
)
print
(
'N=%d C=%d S=%d D=%d I=%d'
%
(
result
[
'all'
],
result
[
'cor'
],
result
[
'sub'
],
result
[
'del'
],
result
[
'ins'
]))
if
len
(
cluster_file
)
>
0
:
# compute separated WERs for word clusters
(
result
[
'all'
],
result
[
'cor'
],
result
[
'sub'
],
result
[
'del'
],
result
[
'ins'
]))
if
len
(
cluster_file
)
>
0
:
# compute separated WERs for word clusters
cluster_id
=
''
cluster
=
[]
for
line
in
open
(
cluster_file
,
'r'
,
encoding
=
'utf-8'
)
:
for
token
in
line
.
decode
(
'utf-8'
).
rstrip
(
'
\n
'
).
split
()
:
for
line
in
open
(
cluster_file
,
'r'
,
encoding
=
'utf-8'
)
:
for
token
in
line
.
decode
(
'utf-8'
).
rstrip
(
'
\n
'
).
split
()
:
# end of cluster reached, like </Keyword>
if
token
[
0
:
2
]
==
'</'
and
token
[
len
(
token
)
-
1
]
==
'>'
and
\
token
.
lstrip
(
'</'
).
rstrip
(
'>'
)
==
cluster_id
:
result
=
calculator
.
cluster
(
cluster
)
if
result
[
'all'
]
!=
0
:
wer
=
float
(
result
[
'ins'
]
+
result
[
'sub'
]
+
result
[
'del'
])
*
100.0
/
result
[
'all'
]
else
:
if
result
[
'all'
]
!=
0
:
wer
=
float
(
result
[
'ins'
]
+
result
[
'sub'
]
+
result
[
'del'
])
*
100.0
/
result
[
'all'
]
else
:
wer
=
0.0
print
(
'%s -> %4.2f %%'
%
(
cluster_id
,
wer
),
end
=
' '
)
print
(
'%s -> %4.2f %%'
%
(
cluster_id
,
wer
),
end
=
' '
)
print
(
'N=%d C=%d S=%d D=%d I=%d'
%
(
result
[
'all'
],
result
[
'cor'
],
result
[
'sub'
],
result
[
'del'
],
result
[
'ins'
]))
(
result
[
'all'
],
result
[
'cor'
],
result
[
'sub'
],
result
[
'del'
],
result
[
'ins'
]))
cluster_id
=
''
cluster
=
[]
# begin of cluster reached, like <Keyword>
...
...
@@ -494,7 +546,9 @@ if __name__ == '__main__':
cluster_id
=
token
.
lstrip
(
'<'
).
rstrip
(
'>'
)
cluster
=
[]
# general terms, like WEATHER / CAR / ...
else
:
else
:
cluster
.
append
(
token
)
print
()
print
(
'==========================================================================='
)
\ No newline at end of file
print
(
'==========================================================================='
)
utils/format_rsl.py
浏览文件 @
c7d9b115
import
os
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
argparse
import
jsonlines
def
trans_hyp
(
origin_hyp
,
trans_hyp
=
None
,
trans_hyp_sclite
=
None
):
def
trans_hyp
(
origin_hyp
,
trans_hyp
=
None
,
trans_hyp_sclite
=
None
):
"""
Args:
origin_hyp: The input json file which contains the model output
...
...
@@ -24,12 +34,11 @@ def trans_hyp(origin_hyp,
if
trans_hyp_sclite
is
not
None
:
with
open
(
trans_hyp_sclite
,
"w+"
)
as
f
:
for
key
in
input_dict
.
keys
():
line
=
input_dict
[
key
]
+
"("
+
key
+
".wav"
+
")"
+
"
\n
"
line
=
input_dict
[
key
]
+
"("
+
key
+
".wav"
+
")"
+
"
\n
"
f
.
write
(
line
)
def
trans_ref
(
origin_ref
,
trans_ref
=
None
,
trans_ref_sclite
=
None
):
def
trans_ref
(
origin_ref
,
trans_ref
=
None
,
trans_ref_sclite
=
None
):
"""
Args:
origin_hyp: The input json file which contains the model output
...
...
@@ -49,42 +58,48 @@ def trans_ref(origin_ref,
if
trans_ref_sclite
is
not
None
:
with
open
(
trans_ref_sclite
,
"w"
)
as
f
:
for
key
in
input_dict
.
keys
():
line
=
input_dict
[
key
]
+
"("
+
key
+
".wav"
+
")"
+
"
\n
"
line
=
input_dict
[
key
]
+
"("
+
key
+
".wav"
+
")"
+
"
\n
"
f
.
write
(
line
)
if
__name__
==
"__main__"
:
parser
=
argparse
.
ArgumentParser
(
prog
=
'format hyp file for compute CER/WER'
,
add_help
=
True
)
parser
=
argparse
.
ArgumentParser
(
prog
=
'format hyp file for compute CER/WER'
,
add_help
=
True
)
parser
.
add_argument
(
'--origin_hyp'
,
type
=
str
,
default
=
None
,
help
=
'origin hyp file'
)
'--origin_hyp'
,
type
=
str
,
default
=
None
,
help
=
'origin hyp file'
)
parser
.
add_argument
(
'--trans_hyp'
,
type
=
str
,
default
=
None
,
help
=
'hyp file for caculating CER/WER'
)
'--trans_hyp'
,
type
=
str
,
default
=
None
,
help
=
'hyp file for caculating CER/WER'
)
parser
.
add_argument
(
'--trans_hyp_sclite'
,
type
=
str
,
default
=
None
,
help
=
'hyp file for caculating CER/WER by sclite'
)
'--trans_hyp_sclite'
,
type
=
str
,
default
=
None
,
help
=
'hyp file for caculating CER/WER by sclite'
)
parser
.
add_argument
(
'--origin_ref'
,
type
=
str
,
default
=
None
,
help
=
'origin ref file'
)
'--origin_ref'
,
type
=
str
,
default
=
None
,
help
=
'origin ref file'
)
parser
.
add_argument
(
'--trans_ref'
,
type
=
str
,
default
=
None
,
help
=
'ref file for caculating CER/WER'
)
'--trans_ref'
,
type
=
str
,
default
=
None
,
help
=
'ref file for caculating CER/WER'
)
parser
.
add_argument
(
'--trans_ref_sclite'
,
type
=
str
,
default
=
None
,
help
=
'ref file for caculating CER/WER by sclite'
)
'--trans_ref_sclite'
,
type
=
str
,
default
=
None
,
help
=
'ref file for caculating CER/WER by sclite'
)
parser_args
=
parser
.
parse_args
()
if
parser_args
.
origin_hyp
is
not
None
:
trans_hyp
(
origin_hyp
=
parser_args
.
origin_hyp
,
trans_hyp
=
parser_args
.
trans_hyp
,
trans_hyp_sclite
=
parser_args
.
trans_hyp_sclite
,
)
origin_hyp
=
parser_args
.
origin_hyp
,
trans_hyp
=
parser_args
.
trans_hyp
,
trans_hyp_sclite
=
parser_args
.
trans_hyp_sclite
,
)
if
parser_args
.
origin_ref
is
not
None
:
trans_ref
(
origin_ref
=
parser_args
.
origin_ref
,
trans_ref
=
parser_args
.
trans_ref
,
trans_ref_sclite
=
parser_args
.
trans_ref_sclite
,
)
origin_ref
=
parser_args
.
origin_ref
,
trans_ref
=
parser_args
.
trans_ref
,
trans_ref_sclite
=
parser_args
.
trans_ref_sclite
,
)
utils/fst/prepare_dict.py
浏览文件 @
c7d9b115
...
...
@@ -82,7 +82,10 @@ def main(args):
lexicon_table
.
add
(
word
)
out_n
+=
1
print
(
f
"Filter lexicon by unit table: filter out
{
in_n
-
out_n
}
,
{
out_n
}
/
{
in_n
}
"
)
print
(
f
"Filter lexicon by unit table: filter out
{
in_n
-
out_n
}
,
{
out_n
}
/
{
in_n
}
"
)
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录