未验证 提交 49e57e8c 编写于 作者: T TeslaZhao 提交者: GitHub

Merge pull request #1573 from felixhjh/develop

add Paddle inference test
import pytest
# coding:utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
......@@ -25,7 +24,10 @@ Usage: export PYTHON_EXECUTABLE=/usr/local/bin/python3.6
import sys
import os
import pytest
inference_test_cases = ["test_fit_a_line.py::TestFitALine::test_inference"]
cpp_test_cases = ["test_fit_a_line.py::TestFitALine::test_cpu", "test_fit_a_line.py::TestFitALine::test_gpu"]
pipeline_test_cases = ["test_uci_pipeline.py::TestUCIPipeline::test_cpu", "test_uci_pipeline.py::TestUCIPipeline::test_gpu"]
......@@ -39,10 +41,15 @@ def run_test_cases(cases_list, case_type):
args = args_str.split(" ")
res = pytest.main(args)
sys.stdout, sys.stderr = old_stdout, old_stderr
case_name = case.split('_')[-1]
if res == 0:
print("{} {} environment running success".format(case_type, case[-3:]))
print("{} {} environment running success".format(case_type, case_name))
elif res == 1:
if case_name == "inference":
print("{} {} environment running failure. Please refer to https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html to configure environment".format(case_type, case_name))
os._exit(0)
else:
print("{} {} environment running failure, if you need this environment, please refer to https://github.com/PaddlePaddle/Serving/blob/v0.7.0/doc/Install_CN.md to configure environment".format(case_type, case[-3:]))
print("{} {} environment running failure, if you need this environment, please refer to https://github.com/PaddlePaddle/Serving/blob/HEAD/doc/Compile_CN.md to configure environment".format(case_type, case_name))
def unset_proxy(key):
os.unsetenv(key)
......@@ -51,5 +58,6 @@ def check_env():
if 'https_proxy' in os.environ or 'http_proxy' in os.environ:
unset_proxy("https_proxy")
unset_proxy("http_proxy")
run_test_cases(inference_test_cases, "PaddlePaddle")
run_test_cases(cpp_test_cases, "C++")
run_test_cases(pipeline_test_cases, "Pipeline")
......@@ -8,10 +8,7 @@ import sys
from paddle_serving_client import Client
from paddle_serving_client.httpclient import HttpClient
from paddle_serving_client.io import inference_model_to_serving
from paddle_serving_app.reader import SegPostprocess
from paddle_serving_app.reader import *
import paddle.inference as paddle_infer
from util import *
......@@ -31,6 +28,11 @@ class TestFitALine(object):
self.serving_util.release()
def get_truth_val_by_inference(self):
try:
import paddle.inference as paddle_infer
except:
# when paddle is not installed, directly return
return
data = np.array(
[0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795,
-0.0332]).astype("float32")[np.newaxis, :]
......@@ -55,7 +57,7 @@ class TestFitALine(object):
output_handle = predictor.get_output_handle(output_data_name)
output_data = output_handle.copy_to_cpu()
output_data_dict[output_data_name] = output_data
# 对齐Serving output
# convert to the same format of Serving output
print(output_data_dict)
output_data_dict["price"] = output_data_dict["fc_0.tmp_1"]
del output_data_dict["fc_0.tmp_1"]
......@@ -86,7 +88,12 @@ class TestFitALine(object):
fetch_map = client.predict(
feed={"x": data}, fetch=fetch_list, batch=True)
print(fetch_map)
return fetch_map
output_dict = self.serving_util.parse_http_result(fetch_map)
return output_dict
def test_inference(self):
assert self.truth_val['price'].size != 0
def test_cpu(self):
# 1.start server
......@@ -97,9 +104,6 @@ class TestFitALine(object):
# 2.resource check
assert count_process_num_on_port(9494) == 1
# assert check_gpu_memory(0) is False
# 3.keywords check
# 4.predict by brpc
# batch_size 1
......@@ -120,9 +124,6 @@ class TestFitALine(object):
# 2.resource check
assert count_process_num_on_port(9494) == 1
# assert check_gpu_memory(0) is False
# 3.keywords check
# 4.predict by brpc
# batch_size 1
......@@ -134,8 +135,4 @@ class TestFitALine(object):
# 5.release
kill_process(9494)
if __name__ == '__main__':
sss = TestCPPClient()
sss.get_truth_val_by_inference()
......@@ -10,7 +10,6 @@ import sys
from paddle_serving_server.pipeline import PipelineClient
from paddle_serving_app.reader import CenterCrop, RGB2BGR, Transpose, Div, Normalize, RCNNPostprocess
from paddle_serving_app.reader import Sequential, File2Image, Resize, Transpose, BGR2RGB, SegPostprocess
import paddle.inference as paddle_infer
from util import *
......@@ -30,6 +29,11 @@ class TestUCIPipeline(object):
self.serving_util.release()
def get_truth_val_by_inference(self):
try:
import paddle.inference as paddle_infer
except:
# when paddle is not installed, directly return
return
data = np.array(
[0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795,
-0.0332]).astype("float32")[np.newaxis, :]
......@@ -54,7 +58,7 @@ class TestUCIPipeline(object):
output_handle = predictor.get_output_handle(output_data_name)
output_data = output_handle.copy_to_cpu()
output_data_dict[output_data_name] = output_data
# 对齐Serving output
# convert to the same format of Serving output
output_data_dict["prob"] = output_data_dict["fc_0.tmp_1"]
del output_data_dict["fc_0.tmp_1"]
self.truth_val = output_data_dict
......@@ -63,17 +67,15 @@ class TestUCIPipeline(object):
def predict_pipeline_rpc(self, batch_size=1):
# 1.prepare feed_data
feed_dict = {'x': '0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332'}
# TODO 原示例不支持batch
# 2.init client
# fetch = ["label", "prob"]
client = PipelineClient()
client.connect(['127.0.0.1:9998'])
# 3.predict for fetch_map
ret = client.predict(feed_dict=feed_dict)
print(ret)
# 转换为dict
# 4.convert dict to numpy
result = {"prob": np.array(eval(ret.value[0]))}
print(result)
return result
......@@ -83,7 +85,6 @@ class TestUCIPipeline(object):
data = '0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, ' \
'-0.0332'
feed_dict = {"key": [], "value": []}
# TODO 原示例不支持batch
feed_dict["key"].append("x")
feed_dict["value"].append(data)
......@@ -91,7 +92,7 @@ class TestUCIPipeline(object):
url = "http://127.0.0.1:18082/uci/prediction"
r = requests.post(url=url, data=json.dumps(feed_dict))
print(r.json())
# 转换为dict of numpy array
# 3.convert dict to numpy array
result = {"prob": np.array(eval(r.json()["value"][0]))}
return result
......@@ -104,21 +105,19 @@ class TestUCIPipeline(object):
# 2.resource check
assert count_process_num_on_port(9998) == 1 # gRPC Server
assert count_process_num_on_port(18082) == 1 # gRPC gateway 代理、转发
#assert check_gpu_memory(0) is False
assert count_process_num_on_port(18082) == 1 # gRPC gateway
# 3.keywords check
check_keywords_in_server_log("MKLDNN is enabled", filename="stderr.log")
# 4.predict by rpc
# batch_size=1
result = self.predict_pipeline_rpc(batch_size=1)
self.serving_util.check_result(result_data=result, truth_data=self.truth_val, batch_size=1)
# # predict by http
result = self.predict_pipeline_http(batch_size=1) # batch_size=1
# 5.predict by http
result = self.predict_pipeline_http(batch_size=1)
self.serving_util.check_result(result_data=result, truth_data=self.truth_val, batch_size=1)
# 5.release
# 6.release
kill_process(9998)
kill_process(18082)
......@@ -132,15 +131,13 @@ class TestUCIPipeline(object):
# 2.resource check
assert count_process_num_on_port(9998) == 1 # gRPC Server
assert count_process_num_on_port(18082) == 1 # gRPC gateway 代理、转发
#assert check_gpu_memory(0) is False
assert count_process_num_on_port(18082) == 1 # gRPC gateway
# 4.predict by rpc
# batch_size=1
# 3.predict by rpc
result = self.predict_pipeline_rpc(batch_size=1)
self.serving_util.check_result(result_data=result, truth_data=self.truth_val, batch_size=1)
# # predict by http
result = self.predict_pipeline_http(batch_size=1) # batch_size=1
# 4.predict by http
result = self.predict_pipeline_http(batch_size=1)
self.serving_util.check_result(result_data=result, truth_data=self.truth_val, batch_size=1)
# 5.release
......
......@@ -5,7 +5,6 @@ import base64
import subprocess
import numpy as np
class ServingTest(object):
def __init__(self, data_path: str, example_path: str, model_dir: str, client_dir: str):
"""
......@@ -56,8 +55,6 @@ class ServingTest(object):
predict_result[key] = value.flatten()
for key, value in truth_data.items():
truth_result[key] = np.repeat(value, repeats=batch_size, axis=0).flatten()
# print("预测值:", predict_result)
# print("真实值:", truth_result)
# compare
for key in predict_result.keys():
......@@ -65,10 +62,6 @@ class ServingTest(object):
diff_count = np.sum(diff_array > delta)
assert diff_count == 0, f"total: {np.size(diff_array)} diff count:{diff_count} max:{np.max(diff_array)}"
# for key in predict_result.keys():
# for i, data in enumerate(predict_result[key]):
# diff = sig_fig_compare(data, truth_result[key][i])
# assert diff < delta, f"data:{data} truth:{truth_result[key][i]} diff is {diff} > {delta}, index:{i}"
@staticmethod
def parse_http_result(output):
......@@ -85,7 +78,6 @@ class ServingTest(object):
@staticmethod
def release(keywords="web_service.py"):
#os.system("kill -9 $(ps -ef | grep serving | awk '{print $2}') > /dev/null 2>&1")
os.system("kill -9 $(ps -ef | grep " + keywords + " | awk '{print $2}') > /dev/null 2>&1")
......
......@@ -34,7 +34,7 @@ import socket
from paddle_serving_server.env import CONF_HOME
import signal
from paddle_serving_server.util import *
from paddle_serving_server.env_check.run import *
from paddle_serving_server.env_check.run import check_env
# web_service.py is still used by Pipeline.
......
......@@ -21,4 +21,3 @@ sentencepiece; platform_machine == "aarch64"
opencv-python==4.2.0.32; platform_machine != "aarch64"
opencv-python; platform_machine == "aarch64"
pytest
pynvml
......@@ -34,7 +34,7 @@ util.gen_pipeline_code("paddle_serving_server")
REQUIRED_PACKAGES = [
'six >= 1.10.0', 'protobuf >= 3.11.0', 'grpcio <= 1.33.2', 'grpcio-tools <= 1.33.2',
'flask >= 1.1.1,<2.0.0', 'click==7.1.2', 'itsdangerous==1.1.0', 'Jinja2==2.11.3',
'MarkupSafe==1.1.1', 'Werkzeug==1.0.1', 'func_timeout', 'pyyaml'
'MarkupSafe==1.1.1', 'Werkzeug==1.0.1', 'func_timeout', 'pyyaml', 'pytest'
]
packages=['paddle_serving_server',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册