diff --git a/core/general-server/op/general_rec_op.cpp b/core/general-server/op/general_rec_op.cpp index 7f850071cf7179ecd2e4ed3f783e29eed30ae12f..3b77510a6ed1759511791ebc2bb6f038be71a497 100644 --- a/core/general-server/op/general_rec_op.cpp +++ b/core/general-server/op/general_rec_op.cpp @@ -14,11 +14,11 @@ #include "core/general-server/op/general_rec_op.h" #include +#include #include #include #include #include -#include #include "core/predictor/framework/infer.h" #include "core/predictor/framework/memory.h" #include "core/predictor/framework/resource.h" diff --git a/examples/C++/PaddleOCR/ocr/ocr_cpp_client.py b/examples/C++/PaddleOCR/ocr/ocr_cpp_client.py index aba8f7bbf2365bb251d043aa5628e0118785ea5d..b3187f50e6f0d677d5377dac1735bd6e679b4755 100644 --- a/examples/C++/PaddleOCR/ocr/ocr_cpp_client.py +++ b/examples/C++/PaddleOCR/ocr/ocr_cpp_client.py @@ -21,6 +21,7 @@ import os import cv2 from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor from paddle_serving_app.reader import Div, Normalize, Transpose +from paddle_serving_app.reader import OCRReader client = Client() # TODO:load_client need to load more than one client model. @@ -44,5 +45,13 @@ for img_file in os.listdir(test_img_dir): feed={"image": image}, fetch=["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"], batch=True) - #print("{} {}".format(fetch_map["price"][0], data[0][1][0])) - print(fetch_map) + result = {} + result["score"] = fetch_map["softmax_0.tmp_0"] + del fetch_map["softmax_0.tmp_0"] + rec_res = OCRReader().postprocess(fetch_map, with_score=False) + res_lst = [] + for res in rec_res: + res_lst.append(res[0]) + result["res"] = res_lst + + print(result) diff --git a/examples/C++/xpu/bert/README.md b/examples/C++/xpu/bert/README.md index 676ac361dde376b29ae73cae98bd34a28f55d469..1cafe07a150eadab200d423ba8a826e45b456278 100644 --- a/examples/C++/xpu/bert/README.md +++ b/examples/C++/xpu/bert/README.md @@ -7,7 +7,7 @@ tar zxvf bert_base_chinese.tar.gz ``` ### convert model ``` -python3 -m paddle_serving_client.convert --dirname bert_base_chinese --model_filename bert_base_chinese/model.pdmodel --params_filename bert_base_chinese/model.pdiparams +python3 -m paddle_serving_client.convert --dirname bert_base_chinese --model_filename model.pdmodel --params_filename model.pdiparams ``` ### or, you can get the serving saved model directly ``` diff --git a/python/paddle_serving_app/reader/batching.py b/python/paddle_serving_app/reader/batching.py index 5ec5f320cf5ec7bd0ab4624d9b39ef936553c774..4cc85b29bf39d4ea0ed15311cb6c3c6b7dd05fe1 100644 --- a/python/paddle_serving_app/reader/batching.py +++ b/python/paddle_serving_app/reader/batching.py @@ -108,7 +108,7 @@ def pad_batch_data(insts, input_mask_data = np.array( [[1] * len(inst) + [0] * (max_len - len(inst)) for inst in insts]) input_mask_data = np.expand_dims(input_mask_data, axis=-1) - return_list += [input_mask_data.astype("float32")] + return_list += [input_mask_data] if return_max_len: return_list += [max_len]