“3a12839428d9ecd3b99e294ab243132a1721a637”上不存在“mobile/src/operators/kernel/cl/scale_kernel.cpp”
提交 a861e56e 编写于 作者: 小湉湉's avatar 小湉湉 提交者: root

rm space for pure Chinese

上级 dad1cbbc
...@@ -36,7 +36,7 @@ def evaluate(args, fastspeech2_config, pwg_config): ...@@ -36,7 +36,7 @@ def evaluate(args, fastspeech2_config, pwg_config):
for line in f: for line in f:
items = line.strip().split() items = line.strip().split()
utt_id = items[0] utt_id = items[0]
sentence = ",".join(items[1:]) sentence = "".join(items[1:])
sentences.append((utt_id, sentence)) sentences.append((utt_id, sentence))
with open(args.phones_dict, "r") as f: with open(args.phones_dict, "r") as f:
......
...@@ -84,7 +84,7 @@ def main(): ...@@ -84,7 +84,7 @@ def main():
for line in f: for line in f:
items = line.strip().split() items = line.strip().split()
utt_id = items[0] utt_id = items[0]
sentence = ",".join(items[1:]) sentence = "".join(items[1:])
sentences.append((utt_id, sentence)) sentences.append((utt_id, sentence))
for utt_id, sentence in sentences: for utt_id, sentence in sentences:
......
...@@ -39,7 +39,7 @@ def evaluate(args, fastspeech2_config, pwg_config): ...@@ -39,7 +39,7 @@ def evaluate(args, fastspeech2_config, pwg_config):
for line in f: for line in f:
items = line.strip().split() items = line.strip().split()
utt_id = items[0] utt_id = items[0]
sentence = ",".join(items[1:]) sentence = "".join(items[1:])
sentences.append((utt_id, sentence)) sentences.append((utt_id, sentence))
with open(args.phones_dict, "r") as f: with open(args.phones_dict, "r") as f:
......
...@@ -42,7 +42,7 @@ def evaluate(args, fastspeech2_config, pwg_config): ...@@ -42,7 +42,7 @@ def evaluate(args, fastspeech2_config, pwg_config):
for line in f: for line in f:
items = line.strip().split() items = line.strip().split()
utt_id = items[0] utt_id = items[0]
sentence = ",".join(items[1:]) sentence = "".join(items[1:])
sentences.append((utt_id, sentence)) sentences.append((utt_id, sentence))
with open(args.phones_dict, "r") as f: with open(args.phones_dict, "r") as f:
......
...@@ -42,7 +42,7 @@ def evaluate(args, fastspeech2_config, melgan_config): ...@@ -42,7 +42,7 @@ def evaluate(args, fastspeech2_config, melgan_config):
for line in f: for line in f:
items = line.strip().split() items = line.strip().split()
utt_id = items[0] utt_id = items[0]
sentence = ",".join(items[1:]) sentence = "".join(items[1:])
sentences.append((utt_id, sentence)) sentences.append((utt_id, sentence))
with open(args.phones_dict, "r") as f: with open(args.phones_dict, "r") as f:
......
...@@ -89,7 +89,7 @@ def main(): ...@@ -89,7 +89,7 @@ def main():
for line in f: for line in f:
items = line.strip().split() items = line.strip().split()
utt_id = items[0] utt_id = items[0]
sentence = ",".join(items[1:]) sentence = "".join(items[1:])
sentences.append((utt_id, sentence)) sentences.append((utt_id, sentence))
for utt_id, sentence in sentences: for utt_id, sentence in sentences:
......
...@@ -42,7 +42,7 @@ def evaluate(args, speedyspeech_config, pwg_config): ...@@ -42,7 +42,7 @@ def evaluate(args, speedyspeech_config, pwg_config):
for line in f: for line in f:
items = line.strip().split() items = line.strip().split()
utt_id = items[0] utt_id = items[0]
sentence = ",".join(items[1:]) sentence = "".join(items[1:])
sentences.append((utt_id, sentence)) sentences.append((utt_id, sentence))
with open(args.phones_dict, "r") as f: with open(args.phones_dict, "r") as f:
......
...@@ -129,6 +129,8 @@ class Frontend(): ...@@ -129,6 +129,8 @@ class Frontend():
# we discriminate i, ii and iii # we discriminate i, ii and iii
if c and c not in self.punc: if c and c not in self.punc:
phones.append(c) phones.append(c)
if c and c in self.punc:
phones.append('sp')
if v and v not in self.punc: if v and v not in self.punc:
phones.append(v) phones.append(v)
# add sp between sentence (replace the last punc with sp) # add sp between sentence (replace the last punc with sp)
......
...@@ -64,6 +64,8 @@ class TextNormalizer(): ...@@ -64,6 +64,8 @@ class TextNormalizer():
List[str] List[str]
Sentences. Sentences.
""" """
# Only for pure Chinese here
text = text.replace(" ", "")
text = self.SENTENCE_SPLITOR.sub(r'\1\n', text) text = self.SENTENCE_SPLITOR.sub(r'\1\n', text)
text = text.strip() text = text.strip()
sentences = [sentence.strip() for sentence in re.split(r'\n+', text)] sentences = [sentence.strip() for sentence in re.split(r'\n+', text)]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册