未验证 提交 3f4e5ee8 编写于 作者: H huangjianhui 提交者: GitHub

Merge branch 'develop' into develop

......@@ -14,11 +14,11 @@
#include "core/general-server/op/general_rec_op.h"
#include <algorithm>
#include <functional>
#include <iostream>
#include <memory>
#include <sstream>
#include <vector>
#include <functional>
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
#include "core/predictor/framework/resource.h"
......
......@@ -21,6 +21,7 @@ import os
import cv2
from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor
from paddle_serving_app.reader import Div, Normalize, Transpose
from paddle_serving_app.reader import OCRReader
client = Client()
# TODO:load_client need to load more than one client model.
......@@ -44,5 +45,13 @@ for img_file in os.listdir(test_img_dir):
feed={"image": image},
fetch=["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"],
batch=True)
#print("{} {}".format(fetch_map["price"][0], data[0][1][0]))
print(fetch_map)
result = {}
result["score"] = fetch_map["softmax_0.tmp_0"]
del fetch_map["softmax_0.tmp_0"]
rec_res = OCRReader().postprocess(fetch_map, with_score=False)
res_lst = []
for res in rec_res:
res_lst.append(res[0])
result["res"] = res_lst
print(result)
......@@ -7,7 +7,7 @@ tar zxvf bert_base_chinese.tar.gz
```
### convert model
```
python3 -m paddle_serving_client.convert --dirname bert_base_chinese --model_filename bert_base_chinese/model.pdmodel --params_filename bert_base_chinese/model.pdiparams
python3 -m paddle_serving_client.convert --dirname bert_base_chinese --model_filename model.pdmodel --params_filename model.pdiparams
```
### or, you can get the serving saved model directly
```
......
......@@ -108,7 +108,7 @@ def pad_batch_data(insts,
input_mask_data = np.array(
[[1] * len(inst) + [0] * (max_len - len(inst)) for inst in insts])
input_mask_data = np.expand_dims(input_mask_data, axis=-1)
return_list += [input_mask_data.astype("float32")]
return_list += [input_mask_data]
if return_max_len:
return_list += [max_len]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册