Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
925abcca
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 1 年 前同步成功
通知
206
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
925abcca
编写于
10月 08, 2022
作者:
H
Hui Zhang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
format
上级
2a75405e
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
11 addition
and
9 deletion
+11
-9
paddlespeech/s2t/modules/attention.py
paddlespeech/s2t/modules/attention.py
+9
-7
paddlespeech/s2t/modules/encoder.py
paddlespeech/s2t/modules/encoder.py
+1
-1
paddlespeech/s2t/modules/mask.py
paddlespeech/s2t/modules/mask.py
+1
-1
未找到文件。
paddlespeech/s2t/modules/attention.py
浏览文件 @
925abcca
...
@@ -19,8 +19,8 @@ from typing import Tuple
...
@@ -19,8 +19,8 @@ from typing import Tuple
import
paddle
import
paddle
from
paddle
import
nn
from
paddle
import
nn
from
paddle.nn
import
initializer
as
I
from
paddle.nn
import
functional
as
F
from
paddle.nn
import
functional
as
F
from
paddle.nn
import
initializer
as
I
from
paddlespeech.s2t.modules.align
import
Linear
from
paddlespeech.s2t.modules.align
import
Linear
from
paddlespeech.s2t.utils.log
import
Log
from
paddlespeech.s2t.utils.log
import
Log
...
@@ -56,12 +56,12 @@ class MultiHeadedAttention(nn.Layer):
...
@@ -56,12 +56,12 @@ class MultiHeadedAttention(nn.Layer):
self
.
linear_out
=
Linear
(
n_feat
,
n_feat
)
self
.
linear_out
=
Linear
(
n_feat
,
n_feat
)
self
.
dropout
=
nn
.
Dropout
(
p
=
dropout_rate
)
self
.
dropout
=
nn
.
Dropout
(
p
=
dropout_rate
)
def
_build_once
(
self
,
*
args
,
**
kwargs
):
def
_build_once
(
self
,
*
args
,
**
kwargs
):
super
().
_build_once
(
*
args
,
**
kwargs
)
super
().
_build_once
(
*
args
,
**
kwargs
)
# if self.self_att:
# if self.self_att:
# self.linear_kv = Linear(self.n_feat, self.n_feat*2)
# self.linear_kv = Linear(self.n_feat, self.n_feat*2)
self
.
weight
=
paddle
.
concat
([
self
.
linear_k
.
weight
,
self
.
linear_v
.
weight
],
axis
=-
1
)
self
.
weight
=
paddle
.
concat
(
[
self
.
linear_k
.
weight
,
self
.
linear_v
.
weight
],
axis
=-
1
)
self
.
bias
=
paddle
.
concat
([
self
.
linear_k
.
bias
,
self
.
linear_v
.
bias
])
self
.
bias
=
paddle
.
concat
([
self
.
linear_k
.
bias
,
self
.
linear_v
.
bias
])
self
.
_built
=
True
self
.
_built
=
True
...
@@ -84,12 +84,14 @@ class MultiHeadedAttention(nn.Layer):
...
@@ -84,12 +84,14 @@ class MultiHeadedAttention(nn.Layer):
(#batch, n_head, time2, d_k).
(#batch, n_head, time2, d_k).
"""
"""
n_batch
=
query
.
shape
[
0
]
n_batch
=
query
.
shape
[
0
]
q
=
self
.
linear_q
(
query
).
view
(
n_batch
,
-
1
,
self
.
h
,
self
.
d_k
)
q
=
self
.
linear_q
(
query
).
view
(
n_batch
,
-
1
,
self
.
h
,
self
.
d_k
)
# k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
# k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
# v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
# v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
k
,
v
=
F
.
linear
(
key
,
self
.
weight
,
self
.
bias
).
view
(
n_batch
,
-
1
,
2
*
self
.
h
,
self
.
d_k
).
split
(
2
,
axis
=
2
)
k
,
v
=
F
.
linear
(
key
,
self
.
weight
,
self
.
bias
).
view
(
n_batch
,
-
1
,
2
*
self
.
h
,
self
.
d_k
).
split
(
2
,
axis
=
2
)
q
=
q
.
transpose
([
0
,
2
,
1
,
3
])
# (batch, head, time1, d_k)
q
=
q
.
transpose
([
0
,
2
,
1
,
3
])
# (batch, head, time1, d_k)
k
=
k
.
transpose
([
0
,
2
,
1
,
3
])
# (batch, head, time2, d_k)
k
=
k
.
transpose
([
0
,
2
,
1
,
3
])
# (batch, head, time2, d_k)
v
=
v
.
transpose
([
0
,
2
,
1
,
3
])
# (batch, head, time2, d_k)
v
=
v
.
transpose
([
0
,
2
,
1
,
3
])
# (batch, head, time2, d_k)
...
@@ -203,7 +205,7 @@ class MultiHeadedAttention(nn.Layer):
...
@@ -203,7 +205,7 @@ class MultiHeadedAttention(nn.Layer):
new_cache
=
paddle
.
concat
((
k
,
v
),
axis
=-
1
)
new_cache
=
paddle
.
concat
((
k
,
v
),
axis
=-
1
)
# scores = paddle.matmul(q,
# scores = paddle.matmul(q,
# k.transpose([0, 1, 3, 2])) / math.sqrt(self.d_k)
# k.transpose([0, 1, 3, 2])) / math.sqrt(self.d_k)
scores
=
paddle
.
matmul
(
q
,
k
,
transpose_y
=
True
)
/
math
.
sqrt
(
self
.
d_k
)
scores
=
paddle
.
matmul
(
q
,
k
,
transpose_y
=
True
)
/
math
.
sqrt
(
self
.
d_k
)
return
self
.
forward_attention
(
v
,
scores
,
mask
),
new_cache
return
self
.
forward_attention
(
v
,
scores
,
mask
),
new_cache
...
...
paddlespeech/s2t/modules/encoder.py
浏览文件 @
925abcca
...
@@ -221,7 +221,7 @@ class BaseEncoder(nn.Layer):
...
@@ -221,7 +221,7 @@ class BaseEncoder(nn.Layer):
xs
,
pos_emb
,
_
=
self
.
embed
(
xs
,
tmp_masks
,
offset
=
offset
)
xs
,
pos_emb
,
_
=
self
.
embed
(
xs
,
tmp_masks
,
offset
=
offset
)
# after embed, xs=(B=1, chunk_size, hidden-dim)
# after embed, xs=(B=1, chunk_size, hidden-dim)
elayers
,
_
,
cache_t1
,
_
=
att_cache
.
shape
elayers
,
_
,
cache_t1
,
_
=
att_cache
.
shape
chunk_size
=
xs
.
shape
[
1
]
chunk_size
=
xs
.
shape
[
1
]
attention_key_size
=
cache_t1
+
chunk_size
attention_key_size
=
cache_t1
+
chunk_size
...
...
paddlespeech/s2t/modules/mask.py
浏览文件 @
925abcca
...
@@ -110,7 +110,7 @@ def subsequent_mask(size: int) -> paddle.Tensor:
...
@@ -110,7 +110,7 @@ def subsequent_mask(size: int) -> paddle.Tensor:
"""
"""
ret
=
paddle
.
ones
([
size
,
size
],
dtype
=
paddle
.
bool
)
ret
=
paddle
.
ones
([
size
,
size
],
dtype
=
paddle
.
bool
)
return
paddle
.
tril
(
ret
)
return
paddle
.
tril
(
ret
)
def
subsequent_chunk_mask
(
def
subsequent_chunk_mask
(
size
:
int
,
size
:
int
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录