Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
feb27e2a
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 2 年 前同步成功
通知
210
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
feb27e2a
编写于
9月 26, 2022
作者:
H
Hui Zhang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fuse linear kv
上级
3adb20b4
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
20 addition
and
6 deletion
+20
-6
paddlespeech/s2t/modules/attention.py
paddlespeech/s2t/modules/attention.py
+20
-6
未找到文件。
paddlespeech/s2t/modules/attention.py
浏览文件 @
feb27e2a
...
@@ -20,6 +20,7 @@ from typing import Tuple
...
@@ -20,6 +20,7 @@ from typing import Tuple
import
paddle
import
paddle
from
paddle
import
nn
from
paddle
import
nn
from
paddle.nn
import
initializer
as
I
from
paddle.nn
import
initializer
as
I
from
paddle.nn
import
functional
as
F
from
paddlespeech.s2t.modules.align
import
Linear
from
paddlespeech.s2t.modules.align
import
Linear
from
paddlespeech.s2t.utils.log
import
Log
from
paddlespeech.s2t.utils.log
import
Log
...
@@ -45,6 +46,7 @@ class MultiHeadedAttention(nn.Layer):
...
@@ -45,6 +46,7 @@ class MultiHeadedAttention(nn.Layer):
"""
"""
super
().
__init__
()
super
().
__init__
()
assert
n_feat
%
n_head
==
0
assert
n_feat
%
n_head
==
0
self
.
n_feat
=
n_feat
# We assume d_v always equals d_k
# We assume d_v always equals d_k
self
.
d_k
=
n_feat
//
n_head
self
.
d_k
=
n_feat
//
n_head
self
.
h
=
n_head
self
.
h
=
n_head
...
@@ -54,6 +56,15 @@ class MultiHeadedAttention(nn.Layer):
...
@@ -54,6 +56,15 @@ class MultiHeadedAttention(nn.Layer):
self
.
linear_out
=
Linear
(
n_feat
,
n_feat
)
self
.
linear_out
=
Linear
(
n_feat
,
n_feat
)
self
.
dropout
=
nn
.
Dropout
(
p
=
dropout_rate
)
self
.
dropout
=
nn
.
Dropout
(
p
=
dropout_rate
)
def
_build_once
(
self
,
*
args
,
**
kwargs
):
super
().
_build_once
(
*
args
,
**
kwargs
)
# if self.self_att:
# self.linear_kv = Linear(self.n_feat, self.n_feat*2)
self
.
weight
=
paddle
.
concat
([
self
.
linear_k
.
weight
,
self
.
linear_v
.
weight
],
axis
=-
1
)
self
.
bias
=
paddle
.
concat
([
self
.
linear_k
.
bias
,
self
.
linear_v
.
bias
])
self
.
_built
=
True
def
forward_qkv
(
self
,
def
forward_qkv
(
self
,
query
:
paddle
.
Tensor
,
query
:
paddle
.
Tensor
,
key
:
paddle
.
Tensor
,
key
:
paddle
.
Tensor
,
...
@@ -73,9 +84,12 @@ class MultiHeadedAttention(nn.Layer):
...
@@ -73,9 +84,12 @@ class MultiHeadedAttention(nn.Layer):
(#batch, n_head, time2, d_k).
(#batch, n_head, time2, d_k).
"""
"""
n_batch
=
query
.
shape
[
0
]
n_batch
=
query
.
shape
[
0
]
q
=
self
.
linear_q
(
query
).
view
(
n_batch
,
-
1
,
self
.
h
,
self
.
d_k
)
q
=
self
.
linear_q
(
query
).
view
(
n_batch
,
-
1
,
self
.
h
,
self
.
d_k
)
k
=
self
.
linear_k
(
key
).
view
(
n_batch
,
-
1
,
self
.
h
,
self
.
d_k
)
# k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v
=
self
.
linear_v
(
value
).
view
(
n_batch
,
-
1
,
self
.
h
,
self
.
d_k
)
# v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
k
,
v
=
F
.
linear
(
key
,
self
.
weight
,
self
.
bias
).
view
(
n_batch
,
-
1
,
2
*
self
.
h
,
self
.
d_k
).
split
(
2
,
axis
=
2
)
q
=
q
.
transpose
([
0
,
2
,
1
,
3
])
# (batch, head, time1, d_k)
q
=
q
.
transpose
([
0
,
2
,
1
,
3
])
# (batch, head, time1, d_k)
k
=
k
.
transpose
([
0
,
2
,
1
,
3
])
# (batch, head, time2, d_k)
k
=
k
.
transpose
([
0
,
2
,
1
,
3
])
# (batch, head, time2, d_k)
v
=
v
.
transpose
([
0
,
2
,
1
,
3
])
# (batch, head, time2, d_k)
v
=
v
.
transpose
([
0
,
2
,
1
,
3
])
# (batch, head, time2, d_k)
...
@@ -108,10 +122,10 @@ class MultiHeadedAttention(nn.Layer):
...
@@ -108,10 +122,10 @@ class MultiHeadedAttention(nn.Layer):
# When will `if mask.size(2) > 0` be False?
# When will `if mask.size(2) > 0` be False?
# 1. onnx(16/-1, -1/-1, 16/0)
# 1. onnx(16/-1, -1/-1, 16/0)
# 2. jit (16/-1, -1/-1, 16/0, 16/4)
# 2. jit (16/-1, -1/-1, 16/0, 16/4)
if
paddle
.
shape
(
mask
)
[
2
]
>
0
:
# time2 > 0
if
mask
.
shape
[
2
]
>
0
:
# time2 > 0
mask
=
mask
.
unsqueeze
(
1
).
equal
(
0
)
# (batch, 1, *, time2)
mask
=
mask
.
unsqueeze
(
1
).
equal
(
0
)
# (batch, 1, *, time2)
# for last chunk, time2 might be larger than scores.size(-1)
# for last chunk, time2 might be larger than scores.size(-1)
mask
=
mask
[:,
:,
:,
:
paddle
.
shape
(
scores
)
[
-
1
]]
mask
=
mask
[:,
:,
:,
:
scores
.
shape
[
-
1
]]
scores
=
scores
.
masked_fill
(
mask
,
-
float
(
'inf'
))
scores
=
scores
.
masked_fill
(
mask
,
-
float
(
'inf'
))
attn
=
paddle
.
softmax
(
attn
=
paddle
.
softmax
(
scores
,
axis
=-
1
).
masked_fill
(
mask
,
scores
,
axis
=-
1
).
masked_fill
(
mask
,
...
@@ -179,7 +193,7 @@ class MultiHeadedAttention(nn.Layer):
...
@@ -179,7 +193,7 @@ class MultiHeadedAttention(nn.Layer):
# >>> torch.equal(b, c) # True
# >>> torch.equal(b, c) # True
# >>> d = torch.split(a, 2, dim=-1)
# >>> d = torch.split(a, 2, dim=-1)
# >>> torch.equal(d[0], d[1]) # True
# >>> torch.equal(d[0], d[1]) # True
if
paddle
.
shape
(
cache
)
[
0
]
>
0
:
if
cache
.
shape
[
0
]
>
0
:
# last dim `d_k * 2` for (key, val)
# last dim `d_k * 2` for (key, val)
key_cache
,
value_cache
=
paddle
.
split
(
cache
,
2
,
axis
=-
1
)
key_cache
,
value_cache
=
paddle
.
split
(
cache
,
2
,
axis
=-
1
)
k
=
paddle
.
concat
([
key_cache
,
k
],
axis
=
2
)
k
=
paddle
.
concat
([
key_cache
,
k
],
axis
=
2
)
...
@@ -288,7 +302,7 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention):
...
@@ -288,7 +302,7 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention):
# >>> torch.equal(b, c) # True
# >>> torch.equal(b, c) # True
# >>> d = torch.split(a, 2, dim=-1)
# >>> d = torch.split(a, 2, dim=-1)
# >>> torch.equal(d[0], d[1]) # True
# >>> torch.equal(d[0], d[1]) # True
if
paddle
.
shape
(
cache
)
[
0
]
>
0
:
if
cache
.
shape
[
0
]
>
0
:
# last dim `d_k * 2` for (key, val)
# last dim `d_k * 2` for (key, val)
key_cache
,
value_cache
=
paddle
.
split
(
cache
,
2
,
axis
=-
1
)
key_cache
,
value_cache
=
paddle
.
split
(
cache
,
2
,
axis
=-
1
)
k
=
paddle
.
concat
([
key_cache
,
k
],
axis
=
2
)
k
=
paddle
.
concat
([
key_cache
,
k
],
axis
=
2
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录