Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
d638325c
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 2 年 前同步成功
通知
210
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d638325c
编写于
9月 09, 2022
作者:
H
Hui Zhang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
do not jit save forward; using slice for zeros([0,0,0,0]) tensor
上级
c1fbfe92
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
23 addition
and
37 deletion
+23
-37
paddlespeech/s2t/exps/u2/model.py
paddlespeech/s2t/exps/u2/model.py
+20
-31
paddlespeech/s2t/models/u2/u2.py
paddlespeech/s2t/models/u2/u2.py
+0
-4
paddlespeech/s2t/modules/encoder.py
paddlespeech/s2t/modules/encoder.py
+3
-2
未找到文件。
paddlespeech/s2t/exps/u2/model.py
浏览文件 @
d638325c
...
@@ -482,10 +482,12 @@ class U2Tester(U2Trainer):
...
@@ -482,10 +482,12 @@ class U2Tester(U2Trainer):
# TODO: 80(feature dim) be configable
# TODO: 80(feature dim) be configable
input_spec
=
[
input_spec
=
[
paddle
.
static
.
InputSpec
(
shape
=
[
1
,
None
,
80
],
dtype
=
'float32'
),
paddle
.
static
.
InputSpec
(
shape
=
[
1
,
None
,
80
],
dtype
=
'float32'
),
paddle
.
static
.
InputSpec
(
shape
=
[
1
],
dtype
=
'int32'
),
-
1
,
paddle
.
static
.
InputSpec
(
shape
=
[
1
],
dtype
=
'int32'
),
-
1
,
paddle
.
static
.
InputSpec
(
paddle
.
static
.
InputSpec
(
shape
=
[
None
,
None
,
None
,
None
],
shape
=
[
None
,
None
,
None
,
None
],
dtype
=
'float32'
),
paddle
.
static
.
InputSpec
(
dtype
=
'float32'
),
paddle
.
static
.
InputSpec
(
shape
=
[
None
,
None
,
None
,
None
],
dtype
=
'float32'
)
shape
=
[
None
,
None
,
None
,
None
],
dtype
=
'float32'
)
]
]
infer_model
.
forward_encoder_chunk
=
paddle
.
jit
.
to_static
(
infer_model
.
forward_encoder_chunk
=
paddle
.
jit
.
to_static
(
...
@@ -511,7 +513,7 @@ class U2Tester(U2Trainer):
...
@@ -511,7 +513,7 @@ class U2Tester(U2Trainer):
infer_model
.
ctc_activation
=
paddle
.
jit
.
to_static
(
infer_model
.
ctc_activation
=
paddle
.
jit
.
to_static
(
infer_model
.
ctc_activation
,
input_spec
=
input_spec
)
infer_model
.
ctc_activation
,
input_spec
=
input_spec
)
paddle
.
jit
.
save
(
infer_model
,
'./export.jit'
,
combine_params
=
True
)
paddle
.
jit
.
save
(
infer_model
,
'./export.jit'
,
combine_params
=
True
,
skip_forward
=
True
)
def
flatten
(
out
):
def
flatten
(
out
):
if
isinstance
(
out
,
paddle
.
Tensor
):
if
isinstance
(
out
,
paddle
.
Tensor
):
...
@@ -531,33 +533,20 @@ class U2Tester(U2Trainer):
...
@@ -531,33 +533,20 @@ class U2Tester(U2Trainer):
att_cache
=
paddle
.
zeros
([
0
,
0
,
0
,
0
])
att_cache
=
paddle
.
zeros
([
0
,
0
,
0
,
0
])
cnn_cache
=
paddle
.
zeros
([
0
,
0
,
0
,
0
])
cnn_cache
=
paddle
.
zeros
([
0
,
0
,
0
,
0
])
# xs, att_cache, cnn_cache = infer_model.forward_encoder_chunk(xs1, offset, required_cache_size, att_cache, cnn_cache)
xs
,
att_cache
,
cnn_cache
=
infer_model
.
forward_encoder_chunk
(
xs1
,
offset
,
required_cache_size
,
att_cache
,
cnn_cache
)
# xs2 = paddle.rand(shape=[1, 67, 80], dtype='float32')
# offset = paddle.to_tensor([16], dtype='int32')
# out1 = infer_model.forward_encoder_chunk(xs2, offset, required_cache_size, att_cache, cnn_cache)
# print(out1)
xs
,
att_cache
,
cnn_cache
=
infer_model
.
forward_encoder_chunk
(
xs1
,
offset
,
att_cache
,
cnn_cache
)
xs2
=
paddle
.
rand
(
shape
=
[
1
,
67
,
80
],
dtype
=
'float32'
)
xs2
=
paddle
.
rand
(
shape
=
[
1
,
67
,
80
],
dtype
=
'float32'
)
offset
=
paddle
.
to_tensor
([
16
],
dtype
=
'int32'
)
offset
=
paddle
.
to_tensor
([
16
],
dtype
=
'int32'
)
out1
=
infer_model
.
forward_encoder_chunk
(
xs2
,
offset
,
att_cache
,
out1
=
infer_model
.
forward_encoder_chunk
(
xs2
,
offset
,
required_cache_size
,
att_cache
,
cnn_cache
)
cnn_cache
)
print
(
'py encoder'
,
out1
)
print
(
out1
)
from
paddle.jit.layer
import
Layer
# from paddle.jit.layer import Layer
layer
=
Layer
()
# layer = Layer()
layer
.
load
(
'./export.jit'
,
paddle
.
CPUPlace
())
# layer.load('./export.jit', paddle.CPUPlace())
xs1
=
paddle
.
full
([
1
,
7
,
80
],
0.1
,
dtype
=
'float32'
)
# offset = paddle.to_tensor([0], dtype='int32')
offset
=
paddle
.
to_tensor
([
0
],
dtype
=
'int32'
)
# att_cache = paddle.zeros([0, 0, 0, 0])
att_cache
=
paddle
.
zeros
([
0
,
0
,
0
,
0
])
# cnn_cache=paddle.zeros([0, 0, 0, 0])
cnn_cache
=
paddle
.
zeros
([
0
,
0
,
0
,
0
])
# xs, att_cache, cnn_cache = layer.forward_encoder_chunk(xs1, offset, att_cache, cnn_cache)
func
=
getattr
(
layer
,
'forward_encoder_chunk'
)
# offset = paddle.to_tensor([16], dtype='int32')
xs
,
att_cache
,
cnn_cache
=
func
(
xs1
,
offset
,
att_cache
,
cnn_cache
)
# out2 = layer.forward_encoder_chunk(xs2, offset, att_cache, cnn_cache)
print
(
'py static encoder'
,
xs
)
# # print(out2)
# out1 = flatten(out1)
# out2 = flatten(out2)
# for i in range(len(out1)):
# print(np.equal(out1[i].numpy(), out2[i].numpy()).all())
paddlespeech/s2t/models/u2/u2.py
浏览文件 @
d638325c
...
@@ -924,10 +924,6 @@ class U2InferModel(U2Model):
...
@@ -924,10 +924,6 @@ class U2InferModel(U2Model):
def
__init__
(
self
,
configs
:
dict
):
def
__init__
(
self
,
configs
:
dict
):
super
().
__init__
(
configs
)
super
().
__init__
(
configs
)
@
jit
.
to_static
(
input_spec
=
[
paddle
.
static
.
InputSpec
(
shape
=
[
1
,
1
,
1
],
dtype
=
'int64'
),
paddle
.
static
.
InputSpec
(
shape
=
[
1
],
dtype
=
'int64'
)
])
def
forward
(
self
,
def
forward
(
self
,
feats
,
feats
,
feats_lengths
,
feats_lengths
,
...
...
paddlespeech/s2t/modules/encoder.py
浏览文件 @
d638325c
...
@@ -251,10 +251,11 @@ class BaseEncoder(nn.Layer):
...
@@ -251,10 +251,11 @@ class BaseEncoder(nn.Layer):
for
i
,
layer
in
enumerate
(
self
.
encoders
):
for
i
,
layer
in
enumerate
(
self
.
encoders
):
# att_cache[i:i+1] = (1, head, cache_t1, d_k*2)
# att_cache[i:i+1] = (1, head, cache_t1, d_k*2)
# cnn_cache[i:i+1] = (1, B=1, hidden-dim, cache_t2)
# cnn_cache[i:i+1] = (1, B=1, hidden-dim, cache_t2)
# zeros([0,0,0,0]) support [i:i+1] slice
xs
,
_
,
new_att_cache
,
new_cnn_cache
=
layer
(
xs
,
_
,
new_att_cache
,
new_cnn_cache
=
layer
(
xs
,
att_mask
,
pos_emb
,
xs
,
att_mask
,
pos_emb
,
att_cache
=
att_cache
[
i
:
i
+
1
]
if
elayers
>
0
else
att_cache
,
att_cache
=
att_cache
[
i
:
i
+
1
],
cnn_cache
=
cnn_cache
[
i
:
i
+
1
]
if
paddle
.
shape
(
cnn_cache
)[
0
]
>
0
else
cnn_cache
,
cnn_cache
=
cnn_cache
[
i
:
i
+
1
],
)
)
# new_att_cache = (1, head, attention_key_size, d_k*2)
# new_att_cache = (1, head, attention_key_size, d_k*2)
# new_cnn_cache = (B=1, hidden-dim, cache_t2)
# new_cnn_cache = (B=1, hidden-dim, cache_t2)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录