diff --git a/python/paddle_serving_server/env_check/run.py b/python/paddle_serving_server/env_check/run.py index 8a304d7889b34b5ed139fde478f186fbbb74a833..6b295554757ed3478bb8a485582a7b43af94b235 100644 --- a/python/paddle_serving_server/env_check/run.py +++ b/python/paddle_serving_server/env_check/run.py @@ -1,4 +1,3 @@ -import pytest # coding:utf-8 # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # @@ -25,7 +24,10 @@ Usage: export PYTHON_EXECUTABLE=/usr/local/bin/python3.6 import sys import os +import pytest + +inference_test_cases = ["test_fit_a_line.py::TestFitALine::test_inference"] cpp_test_cases = ["test_fit_a_line.py::TestFitALine::test_cpu", "test_fit_a_line.py::TestFitALine::test_gpu"] pipeline_test_cases = ["test_uci_pipeline.py::TestUCIPipeline::test_cpu", "test_uci_pipeline.py::TestUCIPipeline::test_gpu"] @@ -39,11 +41,16 @@ def run_test_cases(cases_list, case_type): args = args_str.split(" ") res = pytest.main(args) sys.stdout, sys.stderr = old_stdout, old_stderr + case_name = case.split('_')[-1] if res == 0: - print("{} {} environment running success".format(case_type, case[-3:])) - else: - print("{} {} environment running failure, if you need this environment, please refer to https://github.com/PaddlePaddle/Serving/blob/v0.7.0/doc/Install_CN.md to configure environment".format(case_type, case[-3:])) - + print("{} {} environment running success".format(case_type, case_name)) + elif res == 1: + if case_name == "inference": + print("{} {} environment running failure. Please refer to https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html to configure environment".format(case_type, case_name)) + os._exit(0) + else: + print("{} {} environment running failure, if you need this environment, please refer to https://github.com/PaddlePaddle/Serving/blob/HEAD/doc/Compile_CN.md to configure environment".format(case_type, case_name)) + def unset_proxy(key): os.unsetenv(key) @@ -51,5 +58,6 @@ def check_env(): if 'https_proxy' in os.environ or 'http_proxy' in os.environ: unset_proxy("https_proxy") unset_proxy("http_proxy") + run_test_cases(inference_test_cases, "PaddlePaddle") run_test_cases(cpp_test_cases, "C++") run_test_cases(pipeline_test_cases, "Pipeline") diff --git a/python/paddle_serving_server/env_check/test_fit_a_line.py b/python/paddle_serving_server/env_check/test_fit_a_line.py index 61064ed45f21c338eb1124d126c0f4aaa64182ff..ad8c21de301424586dbec4366357f910d998ba0e 100644 --- a/python/paddle_serving_server/env_check/test_fit_a_line.py +++ b/python/paddle_serving_server/env_check/test_fit_a_line.py @@ -8,10 +8,7 @@ import sys from paddle_serving_client import Client from paddle_serving_client.httpclient import HttpClient -from paddle_serving_client.io import inference_model_to_serving -from paddle_serving_app.reader import SegPostprocess from paddle_serving_app.reader import * -import paddle.inference as paddle_infer from util import * @@ -31,6 +28,11 @@ class TestFitALine(object): self.serving_util.release() def get_truth_val_by_inference(self): + try: + import paddle.inference as paddle_infer + except: + # when paddle is not installed, directly return + return data = np.array( [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]).astype("float32")[np.newaxis, :] @@ -55,7 +57,7 @@ class TestFitALine(object): output_handle = predictor.get_output_handle(output_data_name) output_data = output_handle.copy_to_cpu() output_data_dict[output_data_name] = output_data - # 对齐Serving output + # convert to the same format of Serving output print(output_data_dict) output_data_dict["price"] = output_data_dict["fc_0.tmp_1"] del output_data_dict["fc_0.tmp_1"] @@ -86,7 +88,12 @@ class TestFitALine(object): fetch_map = client.predict( feed={"x": data}, fetch=fetch_list, batch=True) print(fetch_map) - return fetch_map + output_dict = self.serving_util.parse_http_result(fetch_map) + return output_dict + + def test_inference(self): + assert self.truth_val['price'].size != 0 + def test_cpu(self): # 1.start server @@ -97,9 +104,6 @@ class TestFitALine(object): # 2.resource check assert count_process_num_on_port(9494) == 1 - # assert check_gpu_memory(0) is False - - # 3.keywords check # 4.predict by brpc # batch_size 1 @@ -120,9 +124,6 @@ class TestFitALine(object): # 2.resource check assert count_process_num_on_port(9494) == 1 - # assert check_gpu_memory(0) is False - - # 3.keywords check # 4.predict by brpc # batch_size 1 @@ -134,8 +135,4 @@ class TestFitALine(object): # 5.release kill_process(9494) -if __name__ == '__main__': - sss = TestCPPClient() - sss.get_truth_val_by_inference() - diff --git a/python/paddle_serving_server/env_check/test_uci_pipeline.py b/python/paddle_serving_server/env_check/test_uci_pipeline.py index d39cf8825fca548dcc114e5b2a5c87fe206df27a..1d32f4e7c7ca6684cc8c73b7bd5d120abfc63c3f 100644 --- a/python/paddle_serving_server/env_check/test_uci_pipeline.py +++ b/python/paddle_serving_server/env_check/test_uci_pipeline.py @@ -10,7 +10,6 @@ import sys from paddle_serving_server.pipeline import PipelineClient from paddle_serving_app.reader import CenterCrop, RGB2BGR, Transpose, Div, Normalize, RCNNPostprocess from paddle_serving_app.reader import Sequential, File2Image, Resize, Transpose, BGR2RGB, SegPostprocess -import paddle.inference as paddle_infer from util import * @@ -30,6 +29,11 @@ class TestUCIPipeline(object): self.serving_util.release() def get_truth_val_by_inference(self): + try: + import paddle.inference as paddle_infer + except: + # when paddle is not installed, directly return + return data = np.array( [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]).astype("float32")[np.newaxis, :] @@ -54,7 +58,7 @@ class TestUCIPipeline(object): output_handle = predictor.get_output_handle(output_data_name) output_data = output_handle.copy_to_cpu() output_data_dict[output_data_name] = output_data - # 对齐Serving output + # convert to the same format of Serving output output_data_dict["prob"] = output_data_dict["fc_0.tmp_1"] del output_data_dict["fc_0.tmp_1"] self.truth_val = output_data_dict @@ -63,17 +67,15 @@ class TestUCIPipeline(object): def predict_pipeline_rpc(self, batch_size=1): # 1.prepare feed_data feed_dict = {'x': '0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332'} - # TODO 原示例不支持batch # 2.init client - # fetch = ["label", "prob"] client = PipelineClient() client.connect(['127.0.0.1:9998']) # 3.predict for fetch_map ret = client.predict(feed_dict=feed_dict) print(ret) - # 转换为dict + # 4.convert dict to numpy result = {"prob": np.array(eval(ret.value[0]))} print(result) return result @@ -83,7 +85,6 @@ class TestUCIPipeline(object): data = '0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, ' \ '-0.0332' feed_dict = {"key": [], "value": []} - # TODO 原示例不支持batch feed_dict["key"].append("x") feed_dict["value"].append(data) @@ -91,7 +92,7 @@ class TestUCIPipeline(object): url = "http://127.0.0.1:18082/uci/prediction" r = requests.post(url=url, data=json.dumps(feed_dict)) print(r.json()) - # 转换为dict of numpy array + # 3.convert dict to numpy array result = {"prob": np.array(eval(r.json()["value"][0]))} return result @@ -104,21 +105,19 @@ class TestUCIPipeline(object): # 2.resource check assert count_process_num_on_port(9998) == 1 # gRPC Server - assert count_process_num_on_port(18082) == 1 # gRPC gateway 代理、转发 - #assert check_gpu_memory(0) is False + assert count_process_num_on_port(18082) == 1 # gRPC gateway # 3.keywords check check_keywords_in_server_log("MKLDNN is enabled", filename="stderr.log") # 4.predict by rpc - # batch_size=1 result = self.predict_pipeline_rpc(batch_size=1) self.serving_util.check_result(result_data=result, truth_data=self.truth_val, batch_size=1) - # # predict by http - result = self.predict_pipeline_http(batch_size=1) # batch_size=1 + # 5.predict by http + result = self.predict_pipeline_http(batch_size=1) self.serving_util.check_result(result_data=result, truth_data=self.truth_val, batch_size=1) - # 5.release + # 6.release kill_process(9998) kill_process(18082) @@ -132,15 +131,13 @@ class TestUCIPipeline(object): # 2.resource check assert count_process_num_on_port(9998) == 1 # gRPC Server - assert count_process_num_on_port(18082) == 1 # gRPC gateway 代理、转发 - #assert check_gpu_memory(0) is False + assert count_process_num_on_port(18082) == 1 # gRPC gateway - # 4.predict by rpc - # batch_size=1 + # 3.predict by rpc result = self.predict_pipeline_rpc(batch_size=1) self.serving_util.check_result(result_data=result, truth_data=self.truth_val, batch_size=1) - # # predict by http - result = self.predict_pipeline_http(batch_size=1) # batch_size=1 + # 4.predict by http + result = self.predict_pipeline_http(batch_size=1) self.serving_util.check_result(result_data=result, truth_data=self.truth_val, batch_size=1) # 5.release diff --git a/python/paddle_serving_server/env_check/util.py b/python/paddle_serving_server/env_check/util.py index a7a8c1c5a89fc25d36c90db08a42a9f7ba0450e2..ef758c52583753f650e6b18f01c64214fa54aa15 100644 --- a/python/paddle_serving_server/env_check/util.py +++ b/python/paddle_serving_server/env_check/util.py @@ -5,7 +5,6 @@ import base64 import subprocess import numpy as np - class ServingTest(object): def __init__(self, data_path: str, example_path: str, model_dir: str, client_dir: str): """ @@ -56,8 +55,6 @@ class ServingTest(object): predict_result[key] = value.flatten() for key, value in truth_data.items(): truth_result[key] = np.repeat(value, repeats=batch_size, axis=0).flatten() - # print("预测值:", predict_result) - # print("真实值:", truth_result) # compare for key in predict_result.keys(): @@ -65,10 +62,6 @@ class ServingTest(object): diff_count = np.sum(diff_array > delta) assert diff_count == 0, f"total: {np.size(diff_array)} diff count:{diff_count} max:{np.max(diff_array)}" - # for key in predict_result.keys(): - # for i, data in enumerate(predict_result[key]): - # diff = sig_fig_compare(data, truth_result[key][i]) - # assert diff < delta, f"data:{data} truth:{truth_result[key][i]} diff is {diff} > {delta}, index:{i}" @staticmethod def parse_http_result(output): @@ -85,7 +78,6 @@ class ServingTest(object): @staticmethod def release(keywords="web_service.py"): - #os.system("kill -9 $(ps -ef | grep serving | awk '{print $2}') > /dev/null 2>&1") os.system("kill -9 $(ps -ef | grep " + keywords + " | awk '{print $2}') > /dev/null 2>&1") diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index b2f3dbb3f660f6e433c5db3c329820374840b9fe..6e8cb2832969f05df7e8fbe65c609be15a2d74bd 100755 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -34,7 +34,7 @@ import socket from paddle_serving_server.env import CONF_HOME import signal from paddle_serving_server.util import * -from paddle_serving_server.env_check.run import * +from paddle_serving_server.env_check.run import check_env # web_service.py is still used by Pipeline. diff --git a/python/requirements.txt b/python/requirements.txt index c5f3a9248ced0888adfe6b0b5ea82f4a1a07b12f..ddb2e1360527f0b8adb88ff3d347df31ec47af28 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -21,4 +21,3 @@ sentencepiece; platform_machine == "aarch64" opencv-python==4.2.0.32; platform_machine != "aarch64" opencv-python; platform_machine == "aarch64" pytest -pynvml diff --git a/python/setup.py.server.in b/python/setup.py.server.in index 33d787682ccea77968f2006100b75046e0e92920..4721303e7df86bd293e213ba1fd6da7866f10af0 100644 --- a/python/setup.py.server.in +++ b/python/setup.py.server.in @@ -34,7 +34,7 @@ util.gen_pipeline_code("paddle_serving_server") REQUIRED_PACKAGES = [ 'six >= 1.10.0', 'protobuf >= 3.11.0', 'grpcio <= 1.33.2', 'grpcio-tools <= 1.33.2', 'flask >= 1.1.1,<2.0.0', 'click==7.1.2', 'itsdangerous==1.1.0', 'Jinja2==2.11.3', - 'MarkupSafe==1.1.1', 'Werkzeug==1.0.1', 'func_timeout', 'pyyaml' + 'MarkupSafe==1.1.1', 'Werkzeug==1.0.1', 'func_timeout', 'pyyaml', 'pytest' ] packages=['paddle_serving_server',