Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
a9a3cb48
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 1 年 前同步成功
通知
207
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
a9a3cb48
编写于
7月 05, 2022
作者:
H
Hui Zhang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove fluid tools for onnx export,test=doc
上级
9c4763ec
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
3 addition
and
326 deletion
+3
-326
speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py
speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py
+0
-111
speechx/examples/ds2_ol/onnx/local/pd_prune_model.py
speechx/examples/ds2_ol/onnx/local/pd_prune_model.py
+0
-158
speechx/examples/ds2_ol/onnx/local/prune.sh
speechx/examples/ds2_ol/onnx/local/prune.sh
+0
-23
speechx/examples/ds2_ol/onnx/run.sh
speechx/examples/ds2_ol/onnx/run.sh
+3
-34
未找到文件。
speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py
已删除
100755 → 0
浏览文件 @
9c4763ec
#!/usr/bin/env python3 -W ignore::DeprecationWarning
# https://github.com/jiangjiajun/PaddleUtils/blob/main/paddle/README.md#2-%E4%BF%AE%E6%94%B9paddle%E6%A8%A1%E5%9E%8B%E8%BE%93%E5%85%A5shape
import
argparse
# paddle inference shape
def
process_old_ops_desc
(
program
):
"""set matmul op head_number attr to 1 is not exist.
Args:
program (_type_): _description_
"""
for
i
in
range
(
len
(
program
.
blocks
[
0
].
ops
)):
if
program
.
blocks
[
0
].
ops
[
i
].
type
==
"matmul"
:
if
not
program
.
blocks
[
0
].
ops
[
i
].
has_attr
(
"head_number"
):
program
.
blocks
[
0
].
ops
[
i
].
_set_attr
(
"head_number"
,
1
)
def
infer_shape
(
program
,
input_shape_dict
):
# 2002002
model_version
=
program
.
desc
.
_version
()
# 2.2.2
paddle_version
=
paddle
.
__version__
major_ver
=
model_version
//
1000000
minor_ver
=
(
model_version
-
major_ver
*
1000000
)
//
1000
patch_ver
=
model_version
-
major_ver
*
1000000
-
minor_ver
*
1000
model_version
=
"{}.{}.{}"
.
format
(
major_ver
,
minor_ver
,
patch_ver
)
if
model_version
!=
paddle_version
:
print
(
f
"[WARNING] The model is saved by paddlepaddle v
{
model_version
}
, but now your paddlepaddle is version of
{
paddle_version
}
, this difference may cause error, it is recommend you reinstall a same version of paddlepaddle for this model"
)
OP_WITHOUT_KERNEL_SET
=
{
'feed'
,
'fetch'
,
'recurrent'
,
'go'
,
'rnn_memory_helper_grad'
,
'conditional_block'
,
'while'
,
'send'
,
'recv'
,
'listen_and_serv'
,
'fl_listen_and_serv'
,
'ncclInit'
,
'select'
,
'checkpoint_notify'
,
'gen_bkcl_id'
,
'c_gen_bkcl_id'
,
'gen_nccl_id'
,
'c_gen_nccl_id'
,
'c_comm_init'
,
'c_sync_calc_stream'
,
'c_sync_comm_stream'
,
'queue_generator'
,
'dequeue'
,
'enqueue'
,
'heter_listen_and_serv'
,
'c_wait_comm'
,
'c_wait_compute'
,
'c_gen_hccl_id'
,
'c_comm_init_hccl'
,
'copy_cross_scope'
}
for
k
,
v
in
input_shape_dict
.
items
():
program
.
blocks
[
0
].
var
(
k
).
desc
.
set_shape
(
v
)
for
i
in
range
(
len
(
program
.
blocks
)):
for
j
in
range
(
len
(
program
.
blocks
[
0
].
ops
)):
# for ops
if
program
.
blocks
[
i
].
ops
[
j
].
type
in
OP_WITHOUT_KERNEL_SET
:
print
(
f
"not infer:
{
program
.
blocks
[
i
].
ops
[
j
].
type
}
op"
)
continue
print
(
f
"infer:
{
program
.
blocks
[
i
].
ops
[
j
].
type
}
op"
)
program
.
blocks
[
i
].
ops
[
j
].
desc
.
infer_shape
(
program
.
blocks
[
i
].
desc
)
def
parse_arguments
():
# python pd_infer_shape.py --model_dir data/exp/deepspeech2_online/checkpoints \
# --model_filename avg_1.jit.pdmodel\
# --params_filename avg_1.jit.pdiparams \
# --save_dir . \
# --input_shape_dict="{'audio_chunk':[1,-1,161], 'audio_chunk_lens':[1], 'chunk_state_c_box':[5, 1, 1024], 'chunk_state_h_box':[5,1,1024]}"
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--model_dir'
,
required
=
True
,
help
=
'Path of directory saved the input model.'
)
parser
.
add_argument
(
'--model_filename'
,
required
=
True
,
help
=
'model.pdmodel.'
)
parser
.
add_argument
(
'--params_filename'
,
required
=
True
,
help
=
'model.pdiparams.'
)
parser
.
add_argument
(
'--save_dir'
,
required
=
True
,
help
=
'directory to save the exported model.'
)
parser
.
add_argument
(
'--input_shape_dict'
,
required
=
True
,
help
=
"The new shape information."
)
return
parser
.
parse_args
()
if
__name__
==
'__main__'
:
args
=
parse_arguments
()
import
paddle
paddle
.
enable_static
()
import
paddle.fluid
as
fluid
input_shape_dict_str
=
args
.
input_shape_dict
input_shape_dict
=
eval
(
input_shape_dict_str
)
print
(
"Start to load paddle model..."
)
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
prog
,
ipts
,
outs
=
fluid
.
io
.
load_inference_model
(
args
.
model_dir
,
exe
,
model_filename
=
args
.
model_filename
,
params_filename
=
args
.
params_filename
)
process_old_ops_desc
(
prog
)
infer_shape
(
prog
,
input_shape_dict
)
fluid
.
io
.
save_inference_model
(
args
.
save_dir
,
ipts
,
outs
,
exe
,
prog
,
model_filename
=
args
.
model_filename
,
params_filename
=
args
.
params_filename
)
speechx/examples/ds2_ol/onnx/local/pd_prune_model.py
已删除
100755 → 0
浏览文件 @
9c4763ec
#!/usr/bin/env python3 -W ignore::DeprecationWarning
# https://github.com/jiangjiajun/PaddleUtils/blob/main/paddle/README.md#1-%E8%A3%81%E5%89%AApaddle%E6%A8%A1%E5%9E%8B
import
argparse
import
sys
from
typing
import
List
# paddle prune model.
def
prepend_feed_ops
(
program
,
feed_target_names
:
List
[
str
],
feed_holder_name
=
'feed'
):
import
paddle.fluid.core
as
core
if
len
(
feed_target_names
)
==
0
:
return
global_block
=
program
.
global_block
()
feed_var
=
global_block
.
create_var
(
name
=
feed_holder_name
,
type
=
core
.
VarDesc
.
VarType
.
FEED_MINIBATCH
,
persistable
=
True
,
)
for
i
,
name
in
enumerate
(
feed_target_names
,
0
):
if
not
global_block
.
has_var
(
name
):
print
(
f
"The input[
{
i
}
]: '
{
name
}
' doesn't exist in pruned inference program, which will be ignored in new saved model."
)
continue
out
=
global_block
.
var
(
name
)
global_block
.
_prepend_op
(
type
=
'feed'
,
inputs
=
{
'X'
:
[
feed_var
]},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
{
'col'
:
i
},
)
def
append_fetch_ops
(
program
,
fetch_target_names
:
List
[
str
],
fetch_holder_name
=
'fetch'
):
"""in the place, we will add the fetch op
Args:
program (_type_): inference program
fetch_target_names (List[str]): target names
fetch_holder_name (str, optional): fetch op name. Defaults to 'fetch'.
"""
import
paddle.fluid.core
as
core
global_block
=
program
.
global_block
()
fetch_var
=
global_block
.
create_var
(
name
=
fetch_holder_name
,
type
=
core
.
VarDesc
.
VarType
.
FETCH_LIST
,
persistable
=
True
,
)
print
(
f
"the len of fetch_target_names:
{
len
(
fetch_target_names
)
}
"
)
for
i
,
name
in
enumerate
(
fetch_target_names
):
global_block
.
append_op
(
type
=
'fetch'
,
inputs
=
{
'X'
:
[
name
]},
outputs
=
{
'Out'
:
[
fetch_var
]},
attrs
=
{
'col'
:
i
},
)
def
insert_fetch
(
program
,
fetch_target_names
:
List
[
str
],
fetch_holder_name
=
'fetch'
):
"""in the place, we will add the fetch op
Args:
program (_type_): inference program
fetch_target_names (List[str]): target names
fetch_holder_name (str, optional): fetch op name. Defaults to 'fetch'.
"""
global_block
=
program
.
global_block
()
# remove fetch
need_to_remove_op_index
=
[]
for
i
,
op
in
enumerate
(
global_block
.
ops
):
if
op
.
type
==
'fetch'
:
need_to_remove_op_index
.
append
(
i
)
for
index
in
reversed
(
need_to_remove_op_index
):
global_block
.
_remove_op
(
index
)
program
.
desc
.
flush
()
# append new fetch
append_fetch_ops
(
program
,
fetch_target_names
,
fetch_holder_name
)
def
parse_arguments
():
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--model_dir'
,
required
=
True
,
help
=
'Path of directory saved the input model.'
)
parser
.
add_argument
(
'--model_filename'
,
required
=
True
,
help
=
'model.pdmodel.'
)
parser
.
add_argument
(
'--params_filename'
,
required
=
True
,
help
=
'model.pdiparams.'
)
parser
.
add_argument
(
'--output_names'
,
required
=
True
,
help
=
'The outputs of model. sep by comma'
)
parser
.
add_argument
(
'--save_dir'
,
required
=
True
,
help
=
'directory to save the exported model.'
)
parser
.
add_argument
(
'--debug'
,
default
=
False
,
help
=
'output debug info.'
)
return
parser
.
parse_args
()
if
__name__
==
'__main__'
:
args
=
parse_arguments
()
args
.
output_names
=
args
.
output_names
.
split
(
","
)
if
len
(
set
(
args
.
output_names
))
<
len
(
args
.
output_names
):
print
(
f
"[ERROR] There's dumplicate name in --output_names
{
args
.
output_names
}
, which is not allowed."
)
sys
.
exit
(
-
1
)
import
paddle
paddle
.
enable_static
()
# hack prepend_feed_ops
paddle
.
fluid
.
io
.
prepend_feed_ops
=
prepend_feed_ops
import
paddle.fluid
as
fluid
print
(
"start to load paddle model"
)
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
prog
,
ipts
,
outs
=
fluid
.
io
.
load_inference_model
(
args
.
model_dir
,
exe
,
model_filename
=
args
.
model_filename
,
params_filename
=
args
.
params_filename
)
print
(
"start to load insert fetch op"
)
new_outputs
=
[]
insert_fetch
(
prog
,
args
.
output_names
)
for
out_name
in
args
.
output_names
:
new_outputs
.
append
(
prog
.
global_block
().
var
(
out_name
))
# not equal to paddle.static.save_inference_model
fluid
.
io
.
save_inference_model
(
args
.
save_dir
,
ipts
,
new_outputs
,
exe
,
prog
,
model_filename
=
args
.
model_filename
,
params_filename
=
args
.
params_filename
)
if
args
.
debug
:
for
op
in
prog
.
global_block
().
ops
:
print
(
op
)
speechx/examples/ds2_ol/onnx/local/prune.sh
已删除
100755 → 0
浏览文件 @
9c4763ec
#!/bin/bash
set
-e
if
[
$#
!=
5
]
;
then
# local/prune.sh data/exp/deepspeech2_online/checkpoints avg_1.jit.pdmodel avg_1.jit.pdiparams softmax_0.tmp_0,tmp_5,concat_0.tmp_0,concat_1.tmp_0 $PWD
echo
"usage:
$0
model_dir model_filename param_filename outputs_names save_dir"
exit
1
fi
dir
=
$1
model
=
$2
param
=
$3
outputs
=
$4
save_dir
=
$5
python
local
/pd_prune_model.py
\
--model_dir
$dir
\
--model_filename
$model
\
--params_filename
$param
\
--output_names
$outputs
\
--save_dir
$save_dir
\ No newline at end of file
speechx/examples/ds2_ol/onnx/run.sh
浏览文件 @
a9a3cb48
...
...
@@ -39,41 +39,10 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ];then
popd
fi
output_names
=
softmax_0.tmp_0,tmp_5,concat_0.tmp_0,concat_1.tmp_0
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
# prune model by outputs
mkdir
-p
$exp
/prune
# prune model deps on output_names.
./local/prune.sh
$dir
$model
$param
$output_names
$exp
/prune
fi
# aishell rnn hidden is 1024
# wenetspeech rnn hiddn is 2048
if
[
$model_type
==
'aishell'
]
;
then
input_shape_dict
=
"{'audio_chunk':[1,-1,161], 'audio_chunk_lens':[1], 'chunk_state_c_box':[5, 1, 1024], 'chunk_state_h_box':[5,1,1024]}"
elif
[
$model_type
==
'wenetspeech'
]
;
then
input_shape_dict
=
"{'audio_chunk':[1,-1,161], 'audio_chunk_lens':[1], 'chunk_state_c_box':[5, 1, 2048], 'chunk_state_h_box':[5,1,2048]}"
else
echo
"not support:
$model_type
"
exit
-1
fi
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
# infer shape by new shape
mkdir
-p
$exp
/shape
echo
$input_shape_dict
python3
local
/pd_infer_shape.py
\
--model_dir
$dir
\
--model_filename
$model
\
--params_filename
$param
\
--save_dir
$exp
/shape
\
--input_shape_dict
=
"
${
input_shape_dict
}
"
fi
input_file
=
$exp
/static_ds2online_inputs.pickle
test
-e
$input_file
if
[
${
stage
}
-le
3
]
&&
[
${
stop_stage
}
-ge
3
]
;
then
if
[
${
stage
}
-le
1
]
&&
[
${
stop_stage
}
-ge
1
]
;
then
# to onnx
./local/tonnx.sh
$dir
$model
$param
$exp
/model.onnx
...
...
@@ -81,7 +50,7 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ];then
fi
if
[
${
stage
}
-le
4
]
&&
[
${
stop_stage
}
-ge
4
]
;
then
if
[
${
stage
}
-le
2
]
&&
[
${
stop_stage
}
-ge
2
]
;
then
# ort graph optmize
./local/ort_opt.py
--model_in
$exp
/model.onnx
--opt_level
0
--model_out
$exp
/model.ort.opt.onnx
...
...
@@ -89,7 +58,7 @@ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ] ;then
fi
if
[
${
stage
}
-le
5
]
&&
[
${
stop_stage
}
-ge
5
]
;
then
if
[
${
stage
}
-le
3
]
&&
[
${
stop_stage
}
-ge
3
]
;
then
# convert opset_num to 11
./local/onnx_convert_opset.py
--target-opset
11
--model-file
$exp
/model.ort.opt.onnx
--save-model
$exp
/model.optset11.onnx
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录