未验证 提交 796aea86 编写于 作者: M MissPenguin 提交者: GitHub

Merge pull request #2633 from littletomatodonkey/dyg/cp_add_save_rec

add save rec res
...@@ -19,6 +19,7 @@ Global: ...@@ -19,6 +19,7 @@ Global:
max_text_length: 25 max_text_length: 25
infer_mode: False infer_mode: False
use_space_char: True use_space_char: True
save_res_path: ./output/rec/predicts_chinese_common_v2.0.txt
Optimizer: Optimizer:
......
...@@ -19,6 +19,7 @@ Global: ...@@ -19,6 +19,7 @@ Global:
max_text_length: 25 max_text_length: 25
infer_mode: False infer_mode: False
use_space_char: True use_space_char: True
save_res_path: ./output/rec/predicts_chinese_lite_v2.0.txt
Optimizer: Optimizer:
......
...@@ -19,6 +19,7 @@ Global: ...@@ -19,6 +19,7 @@ Global:
max_text_length: 25 max_text_length: 25
infer_mode: False infer_mode: False
use_space_char: False use_space_char: False
save_res_path: ./output/rec/predicts_ic15.txt
Optimizer: Optimizer:
name: Adam name: Adam
......
...@@ -19,6 +19,7 @@ Global: ...@@ -19,6 +19,7 @@ Global:
max_text_length: 25 max_text_length: 25
infer_mode: False infer_mode: False
use_space_char: False use_space_char: False
save_res_path: ./output/rec/predicts_mv3_none_bilstm_ctc.txt
Optimizer: Optimizer:
name: Adam name: Adam
......
...@@ -19,6 +19,7 @@ Global: ...@@ -19,6 +19,7 @@ Global:
max_text_length: 25 max_text_length: 25
infer_mode: False infer_mode: False
use_space_char: False use_space_char: False
save_res_path: ./output/rec/predicts_mv3_none_none_ctc.txt
Optimizer: Optimizer:
name: Adam name: Adam
......
...@@ -19,6 +19,7 @@ Global: ...@@ -19,6 +19,7 @@ Global:
max_text_length: 25 max_text_length: 25
infer_mode: False infer_mode: False
use_space_char: False use_space_char: False
save_res_path: ./output/rec/predicts_mv3_tps_bilstm_att.txt
Optimizer: Optimizer:
......
...@@ -19,6 +19,7 @@ Global: ...@@ -19,6 +19,7 @@ Global:
max_text_length: 25 max_text_length: 25
infer_mode: False infer_mode: False
use_space_char: False use_space_char: False
save_res_path: ./output/rec/predicts_mv3_tps_bilstm_ctc.txt
Optimizer: Optimizer:
name: Adam name: Adam
......
...@@ -19,6 +19,7 @@ Global: ...@@ -19,6 +19,7 @@ Global:
max_text_length: 25 max_text_length: 25
infer_mode: False infer_mode: False
use_space_char: False use_space_char: False
save_res_path: ./output/rec/predicts_r34_vd_none_bilstm_ctc.txt
Optimizer: Optimizer:
name: Adam name: Adam
......
...@@ -19,6 +19,7 @@ Global: ...@@ -19,6 +19,7 @@ Global:
max_text_length: 25 max_text_length: 25
infer_mode: False infer_mode: False
use_space_char: False use_space_char: False
save_res_path: ./output/rec/predicts_r34_vd_none_none_ctc.txt
Optimizer: Optimizer:
name: Adam name: Adam
......
...@@ -19,6 +19,7 @@ Global: ...@@ -19,6 +19,7 @@ Global:
max_text_length: 25 max_text_length: 25
infer_mode: False infer_mode: False
use_space_char: False use_space_char: False
save_res_path: ./output/rec/predicts_b3_rare_r34_none_gru.txt
Optimizer: Optimizer:
......
...@@ -19,6 +19,7 @@ Global: ...@@ -19,6 +19,7 @@ Global:
max_text_length: 25 max_text_length: 25
infer_mode: False infer_mode: False
use_space_char: False use_space_char: False
save_res_path: ./output/rec/predicts_r34_vd_tps_bilstm_ctc.txt
Optimizer: Optimizer:
name: Adam name: Adam
......
...@@ -20,6 +20,7 @@ Global: ...@@ -20,6 +20,7 @@ Global:
num_heads: 8 num_heads: 8
infer_mode: False infer_mode: False
use_space_char: False use_space_char: False
save_res_path: ./output/rec/predicts_srn.txt
Optimizer: Optimizer:
......
...@@ -16,6 +16,7 @@ import paddlehub as hub ...@@ -16,6 +16,7 @@ import paddlehub as hub
from tools.infer.utility import base64_to_cv2 from tools.infer.utility import base64_to_cv2
from tools.infer.predict_cls import TextClassifier from tools.infer.predict_cls import TextClassifier
from tools.infer.utility import parse_args from tools.infer.utility import parse_args
from deploy.hubserving.ocr_cls.params import read_params
@moduleinfo( @moduleinfo(
...@@ -55,7 +56,6 @@ class OCRCls(hub.Module): ...@@ -55,7 +56,6 @@ class OCRCls(hub.Module):
sys.argv = sys.argv[:1] sys.argv = sys.argv[:1]
cfg = parse_args() cfg = parse_args()
from ocr_det.params import read_params
update_cfg_map = vars(read_params()) update_cfg_map = vars(read_params())
for key in update_cfg_map: for key in update_cfg_map:
......
...@@ -73,35 +73,45 @@ def main(): ...@@ -73,35 +73,45 @@ def main():
global_config['infer_mode'] = True global_config['infer_mode'] = True
ops = create_operators(transforms, global_config) ops = create_operators(transforms, global_config)
save_res_path = config['Global'].get('save_res_path',
"./output/rec/predicts_rec.txt")
if not os.path.exists(os.path.dirname(save_res_path)):
os.makedirs(os.path.dirname(save_res_path))
model.eval() model.eval()
for file in get_image_file_list(config['Global']['infer_img']):
logger.info("infer_img: {}".format(file)) with open(save_res_path, "w") as fout:
with open(file, 'rb') as f: for file in get_image_file_list(config['Global']['infer_img']):
img = f.read() logger.info("infer_img: {}".format(file))
data = {'image': img} with open(file, 'rb') as f:
batch = transform(data, ops) img = f.read()
if config['Architecture']['algorithm'] == "SRN": data = {'image': img}
encoder_word_pos_list = np.expand_dims(batch[1], axis=0) batch = transform(data, ops)
gsrm_word_pos_list = np.expand_dims(batch[2], axis=0) if config['Architecture']['algorithm'] == "SRN":
gsrm_slf_attn_bias1_list = np.expand_dims(batch[3], axis=0) encoder_word_pos_list = np.expand_dims(batch[1], axis=0)
gsrm_slf_attn_bias2_list = np.expand_dims(batch[4], axis=0) gsrm_word_pos_list = np.expand_dims(batch[2], axis=0)
gsrm_slf_attn_bias1_list = np.expand_dims(batch[3], axis=0)
others = [ gsrm_slf_attn_bias2_list = np.expand_dims(batch[4], axis=0)
paddle.to_tensor(encoder_word_pos_list),
paddle.to_tensor(gsrm_word_pos_list), others = [
paddle.to_tensor(gsrm_slf_attn_bias1_list), paddle.to_tensor(encoder_word_pos_list),
paddle.to_tensor(gsrm_slf_attn_bias2_list) paddle.to_tensor(gsrm_word_pos_list),
] paddle.to_tensor(gsrm_slf_attn_bias1_list),
paddle.to_tensor(gsrm_slf_attn_bias2_list)
images = np.expand_dims(batch[0], axis=0) ]
images = paddle.to_tensor(images)
if config['Architecture']['algorithm'] == "SRN": images = np.expand_dims(batch[0], axis=0)
preds = model(images, others) images = paddle.to_tensor(images)
else: if config['Architecture']['algorithm'] == "SRN":
preds = model(images) preds = model(images, others)
post_result = post_process_class(preds) else:
for rec_reuslt in post_result: preds = model(images)
logger.info('\t result: {}'.format(rec_reuslt)) post_result = post_process_class(preds)
for rec_reuslt in post_result:
logger.info('\t result: {}'.format(rec_reuslt))
if len(rec_reuslt) >= 2:
fout.write(file + "\t" + rec_reuslt[0] + "\t" + str(
rec_reuslt[1]) + "\n")
logger.info("success!") logger.info("success!")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册