Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleSlim
提交
92874cc0
P
PaddleSlim
项目概览
PaddlePaddle
/
PaddleSlim
1 年多 前同步成功
通知
51
Star
1434
Fork
344
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
53
列表
看板
标记
里程碑
合并请求
16
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleSlim
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
53
Issue
53
列表
看板
标记
里程碑
合并请求
16
合并请求
16
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
92874cc0
编写于
1月 31, 2023
作者:
Z
zhouzj
提交者:
GitHub
1月 31, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Clear fluid api and fix tests (#1641)
* remove fluid apis. * fix hpo. * fix asp.
上级
b248f202
变更
8
显示空白变更内容
内联
并排
Showing
8 changed file
with
91 addition
and
83 deletion
+91
-83
demo/models/pvanet.py
demo/models/pvanet.py
+12
-6
demo/quant/pact_quant_aware/train.py
demo/quant/pact_quant_aware/train.py
+3
-3
demo/quant/quant_embedding/net.py
demo/quant/quant_embedding/net.py
+28
-30
demo/quant/quant_embedding/train.py
demo/quant/quant_embedding/train.py
+3
-2
paddleslim/auto_compression/create_compressed_program.py
paddleslim/auto_compression/create_compressed_program.py
+10
-14
paddleslim/quant/post_quant_hpo.py
paddleslim/quant/post_quant_hpo.py
+1
-1
paddleslim/quant/reconstruction_quantization.py
paddleslim/quant/reconstruction_quantization.py
+29
-20
tests/test_latency_predictor.py
tests/test_latency_predictor.py
+5
-7
未找到文件。
demo/models/pvanet.py
浏览文件 @
92874cc0
...
...
@@ -2,10 +2,7 @@ from __future__ import absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
paddle
import
paddle.fluid
as
fluid
from
paddle.nn.initializer
import
KaimingUniform
import
os
,
sys
,
time
,
math
import
numpy
as
np
from
collections
import
namedtuple
BLOCK_TYPE_MCRELU
=
'BLOCK_TYPE_MCRELU'
...
...
@@ -458,15 +455,24 @@ def loss(f_score, f_geo, l_score, l_geo, l_mask, class_num=1):
abs_geo_diff
=
paddle
.
abs
(
geo_diff
)
l_flag
=
l_score
>=
1
l_flag
=
paddle
.
cast
(
x
=
l_flag
,
dtype
=
"float32"
)
l_flag
=
fluid
.
layers
.
expand
(
x
=
l_flag
,
expand_times
=
[
1
,
channels
,
1
,
1
])
l_flag
=
paddle
.
expand
(
x
=
l_flag
,
shape
=
[
l_flag
.
shape
[
0
],
l_flag
.
shape
[
1
]
*
channels
,
l_flag
.
shape
[
2
],
l_flag
.
shape
[
3
]
])
smooth_l1_sign
=
abs_geo_diff
<
l_flag
smooth_l1_sign
=
paddle
.
cast
(
x
=
smooth_l1_sign
,
dtype
=
"float32"
)
in_loss
=
abs_geo_diff
*
abs_geo_diff
*
smooth_l1_sign
+
(
abs_geo_diff
-
0.5
)
*
(
1.0
-
smooth_l1_sign
)
l_short_edge
=
fluid
.
layers
.
expand
(
x
=
l_short_edge
,
expand_times
=
[
1
,
channels
,
1
,
1
])
l_short_edge
=
paddle
.
expand
(
x
=
l_short_edge
,
shape
=
[
l_short_edge
.
shape
[
0
],
l_short_edge
.
shape
[
1
]
*
channels
,
l_short_edge
.
shape
[
2
],
l_short_edge
.
shape
[
3
]
])
out_loss
=
l_short_edge
*
in_loss
*
l_flag
out_loss
=
out_loss
*
l_flag
smooth_l1_loss
=
paddle
.
mean
(
out_loss
)
...
...
demo/quant/pact_quant_aware/train.py
浏览文件 @
92874cc0
...
...
@@ -18,7 +18,7 @@ from paddleslim.analysis import flops
from
paddleslim.quant
import
quant_aware
,
quant_post
,
convert
import
models
from
utility
import
add_arguments
,
print_arguments
from
paddle.
fluid.layer_helper
import
LayerHelper
from
paddle.
common_ops_import
import
LayerHelper
quantization_model_save_dir
=
'./quantization_models/'
_logger
=
get_logger
(
__name__
,
level
=
logging
.
INFO
)
...
...
@@ -146,8 +146,8 @@ def compress(args):
raise
ValueError
(
"{} is not supported."
.
format
(
args
.
data
))
image_shape
=
[
int
(
m
)
for
m
in
image_shape
.
split
(
","
)]
assert
args
.
model
in
model_list
,
"{} is not in lists: {}"
.
format
(
args
.
model
,
model_list
)
assert
args
.
model
in
model_list
,
"{} is not in lists: {}"
.
format
(
args
.
model
,
model_list
)
image
=
paddle
.
static
.
data
(
name
=
'image'
,
shape
=
[
None
]
+
image_shape
,
dtype
=
'float32'
)
if
args
.
use_pact
:
...
...
demo/quant/quant_embedding/net.py
浏览文件 @
92874cc0
...
...
@@ -15,15 +15,17 @@
neural network for word2vec
"""
from
__future__
import
print_function
import
math
import
numpy
as
np
import
paddle
import
paddle.
fluid
as
fluid
import
paddle.
nn.functional
as
F
def
skip_gram_word2vec
(
dict_size
,
embedding_size
,
is_sparse
=
False
,
neg_num
=
5
):
def
skip_gram_word2vec
(
dict_size
,
embedding_size
,
batch_size
,
is_sparse
=
False
,
neg_num
=
5
):
data
s
=
[]
word
s
=
[]
input_word
=
paddle
.
static
.
data
(
name
=
"input_word"
,
shape
=
[
None
,
1
],
dtype
=
'int64'
)
true_word
=
paddle
.
static
.
data
(
...
...
@@ -31,14 +33,13 @@ def skip_gram_word2vec(dict_size, embedding_size, is_sparse=False, neg_num=5):
neg_word
=
paddle
.
static
.
data
(
name
=
"neg_label"
,
shape
=
[
None
,
neg_num
],
dtype
=
'int64'
)
data
s
.
append
(
input_word
)
data
s
.
append
(
true_word
)
data
s
.
append
(
neg_word
)
word
s
.
append
(
input_word
)
word
s
.
append
(
true_word
)
word
s
.
append
(
neg_word
)
py_reader
=
fluid
.
layers
.
create_py_reader_by_data
(
capacity
=
64
,
feed_list
=
datas
,
name
=
'py_reader'
,
use_double_buffer
=
Tru
e
)
py_reader
=
paddle
.
io
.
DataLoader
.
from_generator
(
capacity
=
64
,
feed_list
=
words
,
use_double_buffer
=
True
,
iterable
=
Fals
e
)
words
=
fluid
.
layers
.
read_file
(
py_reader
)
words
[
0
]
=
paddle
.
reshape
(
words
[
0
],
[
-
1
])
words
[
1
]
=
paddle
.
reshape
(
words
[
1
],
[
-
1
])
init_width
=
0.5
/
embedding_size
...
...
@@ -72,8 +73,7 @@ def skip_gram_word2vec(dict_size, embedding_size, is_sparse=False, neg_num=5):
input
=
neg_word_reshape
,
is_sparse
=
is_sparse
,
size
=
[
dict_size
,
embedding_size
],
param_attr
=
paddle
.
ParamAttr
(
name
=
'emb_w'
,
learning_rate
=
1.0
))
param_attr
=
paddle
.
ParamAttr
(
name
=
'emb_w'
,
learning_rate
=
1.0
))
neg_emb_w_re
=
paddle
.
reshape
(
neg_emb_w
,
shape
=
[
-
1
,
neg_num
,
embedding_size
])
...
...
@@ -81,12 +81,11 @@ def skip_gram_word2vec(dict_size, embedding_size, is_sparse=False, neg_num=5):
input
=
neg_word_reshape
,
is_sparse
=
is_sparse
,
size
=
[
dict_size
,
1
],
param_attr
=
paddle
.
ParamAttr
(
name
=
'emb_b'
,
learning_rate
=
1.0
))
param_attr
=
paddle
.
ParamAttr
(
name
=
'emb_b'
,
learning_rate
=
1.0
))
neg_emb_b_vec
=
paddle
.
reshape
(
neg_emb_b
,
shape
=
[
-
1
,
neg_num
])
true_logits
=
paddle
.
add
(
paddle
.
mean
(
paddle
.
multiply
(
input_emb
,
true_emb_w
),
keepdim
=
True
),
true_logits
=
paddle
.
add
(
paddle
.
m
ean
(
paddle
.
m
ultiply
(
input_emb
,
true_emb_w
),
keepdim
=
True
),
true_emb_b
)
input_emb_re
=
paddle
.
reshape
(
input_emb
,
shape
=
[
-
1
,
1
,
embedding_size
])
neg_matmul
=
paddle
.
matmul
(
input_emb_re
,
neg_emb_w_re
,
transpose_y
=
True
)
...
...
@@ -94,18 +93,17 @@ def skip_gram_word2vec(dict_size, embedding_size, is_sparse=False, neg_num=5):
neg_logits
=
paddle
.
add
(
neg_matmul_re
,
neg_emb_b_vec
)
#nce loss
# TODO: replaced by paddle.tensor.creation.fill_constant_batch_size_like
label_ones
=
fluid
.
layers
.
fill_constant_batch_size_like
(
true_logits
,
shape
=
[
-
1
,
1
],
value
=
1.0
,
dtype
=
'float32'
)
label_zeros
=
fluid
.
layers
.
fill_constant_batch_size_like
(
true_logits
,
shape
=
[
-
1
,
neg_num
],
value
=
0.0
,
dtype
=
'float32'
)
true_xent
=
paddle
.
nn
.
functional
.
binary_cross_entropy
(
true_logits
,
label_ones
)
neg_xent
=
paddle
.
nn
.
functional
.
binary_cross_entropy
(
neg_logits
,
label_zeros
)
cost
=
paddle
.
add
(
paddle
.
sum
(
true_xent
,
axis
=
1
),
paddle
.
sum
(
neg_xent
,
axis
=
1
))
label_ones
=
paddle
.
full
(
shape
=
[
batch_size
,
1
],
fill_value
=
1.0
,
dtype
=
'float32'
)
label_zeros
=
paddle
.
full
(
shape
=
[
batch_size
,
neg_num
],
fill_value
=
0.0
,
dtype
=
'float32'
)
true_xent
=
F
.
binary_cross_entropy_with_logits
(
true_logits
,
label_ones
,
reduction
=
'none'
)
neg_xent
=
F
.
binary_cross_entropy_with_logits
(
neg_logits
,
label_zeros
,
reduction
=
'none'
)
cost
=
paddle
.
add
(
paddle
.
sum
(
true_xent
,
axis
=
1
),
paddle
.
sum
(
neg_xent
,
axis
=
1
))
avg_cost
=
paddle
.
mean
(
cost
)
return
avg_cost
,
py_reader
...
...
demo/quant/quant_embedding/train.py
浏览文件 @
92874cc0
...
...
@@ -121,7 +121,7 @@ def convert_python_to_tensor(weight, batch_size, sample_reader):
def
train_loop
(
args
,
train_program
,
reader
,
py_reader
,
loss
,
trainer_id
,
weight
,
lr
):
py_reader
.
decorate_tensor_provide
r
(
py_reader
.
set_batch_generato
r
(
convert_python_to_tensor
(
weight
,
args
.
batch_size
,
reader
.
train
()))
place
=
paddle
.
CPUPlace
()
...
...
@@ -213,6 +213,7 @@ def train(args):
loss
,
py_reader
=
skip_gram_word2vec
(
word2vec_reader
.
dict_size
,
args
.
embedding_size
,
args
.
batch_size
,
is_sparse
=
args
.
is_sparse
,
neg_num
=
args
.
nce_num
)
...
...
paddleslim/auto_compression/create_compressed_program.py
浏览文件 @
92874cc0
...
...
@@ -78,10 +78,8 @@ def _create_optimizer(train_config):
### build optimizer
optim_params
=
optimizer_builder
[
'optimizer'
]
optim_type
=
optim_params
.
pop
(
'type'
)
opt
=
getattr
(
optimizer
,
optim_type
)(
learning_rate
=
lr
,
grad_clip
=
grad_clip
,
weight_decay
=
reg
,
**
optim_params
)
opt
=
getattr
(
optimizer
,
optim_type
)(
learning_rate
=
lr
,
grad_clip
=
grad_clip
,
weight_decay
=
reg
,
**
optim_params
)
return
opt
,
lr
...
...
@@ -160,8 +158,8 @@ def _parse_distill_loss(distill_node_pair,
for
node
,
loss_clas
,
lam
in
zip
(
distill_node_pair
,
distill_loss
,
distill_lambda
):
tmp_loss
=
losses
.
get
(
loss_clas
,
0.0
)
_logger
.
info
(
"train config.distill_node_pair: {}"
.
format
(
node
,
loss_clas
,
lam
))
_logger
.
info
(
"train config.distill_node_pair: {}"
.
format
(
node
,
loss_clas
,
lam
))
assert
len
(
node
)
%
2
==
0
,
\
"distill_node_pair config wrong, the length needs to be an even number"
for
i
in
range
(
len
(
node
)
//
2
):
...
...
@@ -529,9 +527,7 @@ def build_prune_program(executor,
original_shapes
=
{}
for
param
in
train_program_info
.
program
.
global_block
(
).
all_parameters
():
if
config
[
'prune_params_name'
]
is
not
None
and
param
.
name
in
config
[
'prune_params_name'
]:
if
config
[
'prune_params_name'
]
is
not
None
and
param
.
name
in
config
[
'prune_params_name'
]:
params
.
append
(
param
.
name
)
original_shapes
[
param
.
name
]
=
param
.
shape
...
...
@@ -541,9 +537,8 @@ def build_prune_program(executor,
train_program_info
.
program
,
paddle
.
static
.
global_scope
(),
params
=
params
,
ratios
=
[
config
[
'pruned_ratio'
]]
*
len
(
params
)
if
isinstance
(
config
[
'pruned_ratio'
],
float
)
else
config
[
'pruned_ratio'
],
ratios
=
[
config
[
'pruned_ratio'
]]
*
len
(
params
)
if
isinstance
(
config
[
'pruned_ratio'
],
float
)
else
config
[
'pruned_ratio'
],
place
=
place
)
_logger
.
info
(
"####################channel pruning##########################"
)
...
...
@@ -577,8 +572,9 @@ def build_prune_program(executor,
pruner
.
add_supported_layer
(
param
.
name
)
if
"teacher_"
in
param
.
name
:
excluded_params_name
.
append
(
param
.
name
)
pruner
.
set_excluded_layers
(
train_program_info
.
program
,
excluded_params_name
)
pruner
.
set_excluded_layers
(
main_program
=
train_program_info
.
program
,
param_names
=
excluded_params_name
)
elif
strategy
.
startswith
(
'transformer_prune'
):
from
.transformer_pruner
import
TransformerPruner
assert
eval_dataloader
is
not
None
,
"transformer_pruner must set eval_dataloader"
...
...
paddleslim/quant/post_quant_hpo.py
浏览文件 @
92874cc0
...
...
@@ -83,7 +83,7 @@ class QuantConfig(object):
"""QuantConfig init"""
self
.
executor
=
executor
self
.
place
=
place
self
.
float_infer_model_path
=
float_infer_model_path
self
.
float_infer_model_path
=
float_infer_model_path
.
rstrip
(
'/'
)
self
.
quantize_model_path
=
quantize_model_path
self
.
algo
=
algo
,
self
.
hist_percent
=
hist_percent
,
...
...
paddleslim/quant/reconstruction_quantization.py
浏览文件 @
92874cc0
...
...
@@ -25,7 +25,9 @@ from ..dist import merge
from
..core.graph_wrapper
import
GraphWrapper
from
..common
import
get_logger
__all__
=
[
'ReconstructionQuantization'
,
]
__all__
=
[
'ReconstructionQuantization'
,
]
_logger
=
get_logger
(
__name__
,
...
...
@@ -91,7 +93,8 @@ class ReconstructionQuantization(PostTrainingQuantization):
batch_id
=
0
with
utils
.
tqdm
(
total
=
self
.
_batch_nums
,
bar_format
=
'Preparation stage, Run batch:|{bar}| {n_fmt}/{total_fmt}'
,
bar_format
=
'Preparation stage, Run batch:|{bar}| {n_fmt}/{total_fmt}'
,
ncols
=
80
,
)
as
t
:
for
data
in
self
.
_data_loader
():
self
.
_executor
.
run
(
...
...
@@ -111,7 +114,8 @@ class ReconstructionQuantization(PostTrainingQuantization):
batch_id
=
0
with
utils
.
tqdm
(
total
=
self
.
_batch_nums
,
bar_format
=
'Sampling stage, Run batch:|{bar}| {n_fmt}/{total_fmt}'
,
bar_format
=
'Sampling stage, Run batch:|{bar}| {n_fmt}/{total_fmt}'
,
ncols
=
80
,
)
as
t
:
for
data
in
self
.
_data_loader
():
self
.
_executor
.
run
(
...
...
@@ -237,7 +241,7 @@ class ReconstructionQuanter(object):
return a batch every time.
executor(paddle.static.Executor): The executor to load, run and save the
quantized model.
scope(
fluid
.Scope, optional): The scope of the program, use it to load
scope(
static
.Scope, optional): The scope of the program, use it to load
and save variables. If scope=None, get scope by global_scope().
place(CPUPlace()|CUDAPlace(N)): This parameter represents
paddle run on which device.
...
...
@@ -385,8 +389,8 @@ class ReconstructionQuanter(object):
with
paddle
.
static
.
program_guard
(
tmp_program
,
startup_program
):
student_var
=
tmp_program
.
global_block
().
var
(
quant_op_out_name
)
teacher_var
=
tmp_program
.
global_block
().
var
(
"teacher_"
+
quant_op_out_name
)
teacher_var
=
tmp_program
.
global_block
().
var
(
"teacher_"
+
quant_op_out_name
)
total_loss
,
recon_loss
,
round_loss
=
loss_function
.
get_loss
(
student_var
,
teacher_var
,
)
...
...
@@ -471,7 +475,8 @@ class ReconstructionQuanter(object):
shape
=
weight
.
shape
,
dtype
=
weight
.
dtype
,
name
=
weight
.
name
+
".alpha"
,
default_initializer
=
paddle
.
nn
.
initializer
.
Assign
(
self
.
_alpha
,
),
)
default_initializer
=
paddle
.
nn
.
initializer
.
Assign
(
self
.
_alpha
,
),
)
h_v
=
paddle
.
clip
(
paddle
.
nn
.
functional
.
sigmoid
(
v
)
*
(
ZETA
-
GAMMA
)
+
GAMMA
,
...
...
@@ -483,13 +488,14 @@ class ReconstructionQuanter(object):
dtype
=
weight
.
dtype
,
shape
=
weight
.
shape
,
name
=
weight
.
name
+
'.scale'
,
default_initializer
=
paddle
.
nn
.
initializer
.
Assign
(
scale
,
))
default_initializer
=
paddle
.
nn
.
initializer
.
Assign
(
scale
,
))
else
:
scale_var
=
scale
quantized_weight
=
_quant
(
weight_copy
,
scale_var
)
floor_weight
=
(
paddle
.
floor
(
quantized_weight
)
-
quantized_weight
).
detach
()
+
quantized_weight
floor_weight
=
(
paddle
.
floor
(
quantized_weight
)
-
quantized_weight
).
detach
()
+
quantized_weight
clip_weight
=
paddle
.
clip
(
floor_weight
+
h_v
,
-
bnt
,
bnt
)
w
=
_dequant
(
clip_weight
,
scale_var
)
return
w
...
...
@@ -525,8 +531,9 @@ class ReconstructionQuanter(object):
def
_insert_drop_quant_dequant
(
self
):
for
op
in
self
.
_graph
.
ops
():
if
op
.
type
(
)
in
[
'conv2d'
,
'depthwise_conv2d'
,
'mul'
,
'matmul'
,
'matmul_v2'
]:
if
op
.
type
()
in
[
'conv2d'
,
'depthwise_conv2d'
,
'mul'
,
'matmul'
,
'matmul_v2'
]:
if
op
.
type
()
in
[
'conv2d'
,
'depthwise_conv2d'
]:
if
op
.
inputs
(
"Filter"
)[
0
].
name
().
startswith
(
"teacher"
):
break
...
...
@@ -670,8 +677,8 @@ class ReconstructionQuanter(object):
'X'
:
var
.
_var
,
'Y'
:
op
.
input
(
'Y'
)[
0
]
+
'.qdrop'
,
}
elif
_type
==
'scale'
and
op
.
input
(
'X'
)[
0
]
==
inputs
.
name
+
'.tmp'
:
elif
_type
==
'scale'
and
op
.
input
(
'X'
)[
0
]
==
inputs
.
name
+
'.tmp'
:
_inputs
=
{
'X'
:
var
.
_var
}
else
:
_inputs
=
{
'X'
:
op
.
input
(
'X'
)[
0
]
+
'.qdrop'
}
...
...
@@ -687,11 +694,13 @@ class ReconstructionQuanter(object):
'conv2d'
,
'depthwise_conv2d'
,
'mul'
,
'matmul'
,
'matmul_v2'
]:
continue
if
op
.
type
()
in
[
'conv2d'
,
'depthwise_conv2d'
]
and
op
.
inputs
(
'Filter'
)[
0
].
name
().
startswith
(
'teacher'
):
if
op
.
type
()
in
[
'conv2d'
,
'depthwise_conv2d'
]
and
op
.
inputs
(
'Filter'
)[
0
].
name
().
startswith
(
'teacher'
):
continue
if
op
.
type
()
in
[
'mul'
,
'matmul'
,
'matmul_v2'
]
and
op
.
inputs
(
'Y'
)[
0
].
name
().
startswith
(
'teacher'
):
if
op
.
type
()
in
[
'mul'
,
'matmul'
,
'matmul_v2'
]
and
op
.
inputs
(
'Y'
)[
0
].
name
().
startswith
(
'teacher'
):
continue
if
func
==
'_soft_rounding'
:
op
.
_op
.
_rename_input
(
inputs
.
name
,
out
.
name
+
'.rounding'
)
...
...
@@ -964,8 +973,8 @@ class RegionBuilder(object):
else
:
future_ep
=
_find_multi_input_ep
(
ep
)
if
future_ep
is
None
or
self
.
_depth
[
future_ep
.
idx
(
)]
-
self
.
_depth
[
sp
.
idx
()]
>=
limit
:
if
future_ep
is
None
or
self
.
_depth
[
future_ep
.
idx
(
)]
-
self
.
_depth
[
sp
.
idx
()]
>=
limit
:
return
self
.
_create_region
(
sp
,
ep
)
ep
=
future_ep
...
...
tests/test_latency_predictor.py
浏览文件 @
92874cc0
...
...
@@ -147,10 +147,8 @@ class ModelCase6(paddle.nn.Layer):
x
=
paddle
.
unsqueeze
(
x
=
x
,
axis
=
[
2
])
x
=
self
.
relu1
(
x
)
y
=
paddle
.
full
(
shape
=
x
.
shape
,
fill_value
=
1
)
# x = paddle.stack([x, y], axis=3)
x
=
paddle
.
slice
(
x
,
axes
=
[
0
],
starts
=
[
0
],
ends
=
[
1
])
x
=
paddle
.
exp
(
x
)
# y += paddle.fluid.layers.uniform_random(y.shape)
y
=
paddle
.
expand
(
y
,
shape
=
[
1
,
768
,
768
,
2
])
x
=
paddle
.
expand
(
x
,
shape
=
[
1
,
768
,
768
,
2
])
out
=
paddle
.
concat
([
x
,
y
])
...
...
@@ -161,8 +159,8 @@ class ModelCase6(paddle.nn.Layer):
max_idx
=
paddle
.
argmax
(
out1
.
reshape
((
outshape
[
0
],
outshape
[
1
],
outshape
[
2
]
*
outshape
[
3
])),
axis
=-
1
)
out2
=
out2
.
reshape
(
(
outshape
[
0
],
outshape
[
1
],
outshape
[
2
]
*
outshape
[
3
]))
out2
=
out2
.
reshape
(
(
outshape
[
0
],
outshape
[
1
],
outshape
[
2
]
*
outshape
[
3
]))
res
,
_
=
self
.
lstm
(
out2
)
return
res
,
max_idx
...
...
@@ -238,8 +236,8 @@ class TestCase2(unittest.TestCase):
model_name
=
'.'
.
join
(
model_filename
.
split
(
'.'
)[:
-
1
])
model_path_prefix
=
os
.
path
.
join
(
model_dir
,
model_name
)
[
inference_program
,
feed_target_names
,
fetch_targets
]
=
(
paddle
.
static
.
load_inference_model
(
[
inference_program
,
feed_target_names
,
fetch_targets
]
=
(
paddle
.
static
.
load_inference_model
(
path_prefix
=
model_path_prefix
,
executor
=
exe
))
if
type
(
input_shapes
)
in
[
list
,
tuple
]:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录