From 29ccd204b3b81c472b508045bb11d26473ed0a3e Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Mon, 13 Jun 2022 17:06:07 +0800 Subject: [PATCH] add cpp serving infer --- .../recognition/test_cpp_serving_client.py | 3 +- .../paddleserving/test_cpp_serving_client.py | 47 +++++++++---------- ...ormal_normal_serving_cpp_linux_gpu_cpu.txt | 18 +++++++ test_tipc/test_serving_infer.sh | 46 ++++++++++-------- 4 files changed, 69 insertions(+), 45 deletions(-) create mode 100644 test_tipc/config/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt diff --git a/deploy/paddleserving/recognition/test_cpp_serving_client.py b/deploy/paddleserving/recognition/test_cpp_serving_client.py index 60412f6e..fa35de85 100644 --- a/deploy/paddleserving/recognition/test_cpp_serving_client.py +++ b/deploy/paddleserving/recognition/test_cpp_serving_client.py @@ -113,10 +113,9 @@ if __name__ == "__main__": "im_shape": im_shape}, fetch=["features", "boxes"], batch=False) - print(fetch_map.keys()) + #add retrieval procedure det_boxes = fetch_map["boxes"] - print(det_boxes) searcher, id_map = init_index(index_dir) results = postprocess(fetch_map, feature_normalize, det_boxes, searcher, id_map, return_k, rec_score_thres, rec_nms_thresold) diff --git a/deploy/paddleserving/test_cpp_serving_client.py b/deploy/paddleserving/test_cpp_serving_client.py index 50794b36..ba5399c9 100644 --- a/deploy/paddleserving/test_cpp_serving_client.py +++ b/deploy/paddleserving/test_cpp_serving_client.py @@ -12,16 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys +import base64 +import time + from paddle_serving_client import Client -#app -from paddle_serving_app.reader import Sequential, URL2Image, Resize -from paddle_serving_app.reader import CenterCrop, RGB2BGR, Transpose, Div, Normalize -import time + +def bytes_to_base64(image: bytes) -> str: + """encode bytes into base64 string + """ + return base64.b64encode(image).decode('utf8') + client = Client() -client.load_client_config("./ResNet50_vd_serving/serving_server_conf.prototxt") +client.load_client_config("./ResNet50_client/serving_client_conf.prototxt") client.connect(["127.0.0.1:9292"]) label_dict = {} @@ -31,22 +35,17 @@ with open("imagenet.label") as fin: label_dict[label_idx] = line.strip() label_idx += 1 -#preprocess -seq = Sequential([ - URL2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), - Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) -]) - -start = time.time() -image_file = "https://paddle-serving.bj.bcebos.com/imagenet-example/daisy.jpg" +image_file = "./daisy.jpg" for i in range(1): - img = seq(image_file) - fetch_map = client.predict( - feed={"inputs": img}, fetch=["prediction"], batch=False) - - prob = max(fetch_map["prediction"][0]) - label = label_dict[fetch_map["prediction"][0].tolist().index(prob)].strip( - ).replace(",", "") - print("prediction: {}, probability: {}".format(label, prob)) -end = time.time() -print(end - start) + start = time.time() + with open(image_file, 'rb') as img_file: + image_data = img_file.read() + image = bytes_to_base64(image_data) + fetch_dict = client.predict( + feed={"inputs": image}, fetch=["prediction"], batch=False) + prob = max(fetch_dict["prediction"][0]) + label = label_dict[fetch_dict["prediction"][0].tolist().index( + prob)].strip().replace(",", "") + print("prediction: {}, probability: {}".format(label, prob)) + end = time.time() + print(end - start) diff --git a/test_tipc/config/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/config/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 00000000..5bf0e4c1 --- /dev/null +++ b/test_tipc/config/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================serving_params=========================== +model_name:PPShiTu +python:python3.7 +cls_inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar +det_inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./models/general_PPLCNet_x2_5_lite_v1.0_infer/ +--dirname:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./models/general_PPLCNet_x2_5_lite_v1.0_serving/ +--serving_client:./models/general_PPLCNet_x2_5_lite_v1.0_client/ +--serving_server:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/ +--serving_client:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/ +serving_dir:./paddleserving/recognition +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/test_serving_infer.sh b/test_tipc/test_serving_infer.sh index 002d12a7..0486e4d9 100644 --- a/test_tipc/test_serving_infer.sh +++ b/test_tipc/test_serving_infer.sh @@ -84,16 +84,16 @@ function func_serving_cls(){ break fi done - set_client_feed_type_cmd="sed -i '/feed_type/,/: .*/s/feed_type: .*/feed_type: 20/' ${serving_client_value}/serving_client_conf.prototxt" + serving_client_dir_name=$(func_get_url_file_name "$serving_client_value") + set_client_feed_type_cmd="sed -i '/feed_type/,/: .*/s/feed_type: .*/feed_type: 20/' ${serving_client_dir_name}/serving_client_conf.prototxt" eval ${set_client_feed_type_cmd} - set_client_shape_cmd="sed -i '/shape: 3/,/shape: 3/s/shape: 3/shape: 1/' ${serving_client_value}/serving_client_conf.prototxt" + set_client_shape_cmd="sed -i '/shape: 3/,/shape: 3/s/shape: 3/shape: 1/' ${serving_client_dir_name}/serving_client_conf.prototxt" eval ${set_client_shape_cmd} - set_client_shape224_cmd="sed -i '/shape: 224/,/shape: 224/s/shape: 224//' ${serving_client_value}/serving_client_conf.prototxt" + set_client_shape224_cmd="sed -i '/shape: 224/,/shape: 224/s/shape: 224//' ${serving_client_dir_name}/serving_client_conf.prototxt" eval ${set_client_shape224_cmd} - set_client_shape224_cmd="sed -i '/shape: 224/,/shape: 224/s/shape: 224//' ${serving_client_value}/serving_client_conf.prototxt" + set_client_shape224_cmd="sed -i '/shape: 224/,/shape: 224/s/shape: 224//' ${serving_client_dir_name}/serving_client_conf.prototxt" eval ${set_client_shape224_cmd} - serving_client_dir_name=$(func_get_url_file_name "$serving_client_value") set_pipeline_load_config_cmd="sed -i '/load_client_config/,/.prototxt/s/.\/.*\/serving_client_conf.prototxt/.\/${serving_client_dir_name}\/serving_client_conf.prototxt/' ${pipeline_py}" eval ${set_pipeline_load_config_cmd} @@ -105,10 +105,12 @@ function func_serving_cls(){ for use_gpu in ${web_use_gpu_list[*]}; do if [[ ${use_gpu} = "null" ]]; then web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 &" + # echo ${web_service_cpp_cmd} eval ${web_service_cpp_cmd} sleep 5s _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_batchsize_1.log" pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 " + echo {pipeline_cmd} eval ${pipeline_cmd} last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" @@ -122,8 +124,7 @@ function func_serving_cls(){ _save_log_path="${LOG_PATH}/server_infer_cpp_gpu_pipeline_batchsize_1.log" pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 " - - eval $pipeline_cmd + eval ${pipeline_cmd} last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}" @@ -245,7 +246,6 @@ function func_serving_rec(){ done # pdserving - export SERVING_BIN=$PWD/Serving/server-build-gpu-opencv/core/general-server/serving cd ./deploy set_dirname=$(func_set_params "${cls_infer_model_dir_key}" "${cls_infer_model_dir_value}") set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") @@ -263,15 +263,22 @@ function func_serving_rec(){ det_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" eval $det_trans_model_cmd - cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/*.prototxt ${cls_serving_server_value}" - eval ${cp_prototxt_cmd} - cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/*.prototxt ${cls_serving_client_value}" - eval ${cp_prototxt_cmd} - cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/*.prototxt ${det_serving_client_value}" - eval ${cp_prototxt_cmd} - cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/*.prototxt ${det_serving_server_value}" - eval ${cp_prototxt_cmd} - + if [[ ${FILENAME} =~ "cpp" ]]; then + cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/*.prototxt ${cls_serving_server_value}" + eval ${cp_prototxt_cmd} + cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/*.prototxt ${cls_serving_client_value}" + eval ${cp_prototxt_cmd} + cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/*.prototxt ${det_serving_client_value}" + eval ${cp_prototxt_cmd} + cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/*.prototxt ${det_serving_server_value}" + eval ${cp_prototxt_cmd} + else + # modify the alias_name of fetch_var to "outputs" + server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"features\"/' $cls_serving_server_value/serving_server_conf.prototxt" + eval ${server_fetch_var_line_cmd} + client_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"features\"/' $cls_serving_client_value/serving_client_conf.prototxt" + eval ${client_fetch_var_line_cmd} + fi prototxt_dataline=$(awk 'NR==1, NR==3{print}' ${cls_serving_server_value}/serving_server_conf.prototxt) IFS=$'\n' prototxt_lines=(${prototxt_dataline}) @@ -283,6 +290,7 @@ function func_serving_rec(){ unset http_proxy if [[ ${FILENAME} =~ "cpp" ]]; then + export SERVING_BIN=$PWD/../Serving/server-build-gpu-opencv/core/general-server/serving for use_gpu in ${web_use_gpu_list[*]}; do if [ ${use_gpu} = "null" ]; then det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value") @@ -290,7 +298,7 @@ function func_serving_rec(){ eval $web_service_cpp_cmd sleep 5s _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_batchsize_1.log" - pipeline_cmd="${python_interp} test_cpp_serving_client.py > ${_save_log_path} 2>&1 " + pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 " eval ${pipeline_cmd} last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" @@ -303,7 +311,7 @@ function func_serving_rec(){ eval $web_service_cpp_cmd sleep 5s _save_log_path="${LOG_PATH}/server_infer_cpp_gpu_batchsize_1.log" - pipeline_cmd="${python_interp} test_cpp_serving_client.py > ${_save_log_path} 2>&1 " + pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 " eval ${pipeline_cmd} last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" -- GitLab