未验证 提交 3b19311d 编写于 作者: M MissPenguin 提交者: GitHub

Merge pull request #2678 from WenmuZhou/fix_srn_post_process

add max_text_length to export model
......@@ -53,17 +53,19 @@ def main():
save_path = '{}/inference'.format(config['Global']['save_inference_dir'])
if config['Architecture']['algorithm'] == "SRN":
max_text_length = config['Architecture']['Head']['max_text_length']
other_shape = [
paddle.static.InputSpec(
shape=[None, 1, 64, 256], dtype='float32'), [
paddle.static.InputSpec(
shape=[None, 256, 1],
dtype="int64"), paddle.static.InputSpec(
shape=[None, 25, 1],
dtype="int64"), paddle.static.InputSpec(
shape=[None, 8, 25, 25], dtype="int64"),
shape=[None, max_text_length, 1], dtype="int64"),
paddle.static.InputSpec(
shape=[None, 8, 25, 25], dtype="int64")
shape=[None, 8, max_text_length, max_text_length],
dtype="int64"), paddle.static.InputSpec(
shape=[None, 8, max_text_length, max_text_length],
dtype="int64")
]
]
model = to_static(model, input_spec=other_shape)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册