提交 ccebc398 编写于 作者: F felixhjh

modify log print and help description

上级 e3c688de
......@@ -50,15 +50,18 @@ def run_test_cases(cases_list, case_type, is_open_std):
print("{} {} environment running failure. Please refer to https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html to configure environment".format(case_type, case_name))
os._exit(0)
else:
print("{} {} environment running failure, if you need this environment, please refer to https://github.com/PaddlePaddle/Serving/blob/HEAD/doc/Compile_CN.md to configure environment".format(case_type, case_name))
print("{} {} environment running failure, if you need this environment, please refer to https://github.com/PaddlePaddle/Serving/blob/develop/doc/Install_CN.md".format(case_type, case_name))
def unset_proxy(key):
def unset_env(key):
os.unsetenv(key)
def check_env(mode):
if 'https_proxy' in os.environ or 'http_proxy' in os.environ:
unset_proxy("https_proxy")
unset_proxy("http_proxy")
unset_env("https_proxy")
unset_env("http_proxy")
if 'GREP_OPTIONS' in os.environ:
unset_env("GREP_OPTIONS")
is_open_std = False
if mode is "debug":
is_open_std = True
......
......@@ -21,10 +21,11 @@ class TestFitALine(object):
self.get_truth_val_by_inference(self)
self.serving_util = serving_util
self.serving_util.release('service')
kill_process(9494)
def teardown_method(self):
print_log(["stderr.log", "stdout.log",
"log/serving.ERROR", "PipelineServingLogs/pipeline.log"], iden="after predict")
"log/serving.ERROR", "PipelineServingLogs/pipeline.log"])
kill_process(9494)
self.serving_util.release('service')
......@@ -59,11 +60,9 @@ class TestFitALine(object):
output_data = output_handle.copy_to_cpu()
output_data_dict[output_data_name] = output_data
# convert to the same format of Serving output
print(output_data_dict)
output_data_dict["price"] = output_data_dict["fc_0.tmp_1"]
del output_data_dict["fc_0.tmp_1"]
self.truth_val = output_data_dict
print(self.truth_val, self.truth_val["price"].shape)
def predict_brpc(self, batch_size=1):
data = np.array(
......@@ -75,7 +74,6 @@ class TestFitALine(object):
fetch_list = client.get_fetch_names()
fetch_map = client.predict(
feed={"x": data}, fetch=fetch_list, batch=True)
print(fetch_map)
return fetch_map
def predict_http(self, batch_size=1):
......@@ -88,12 +86,11 @@ class TestFitALine(object):
fetch_list = client.get_fetch_names()
fetch_map = client.predict(
feed={"x": data}, fetch=fetch_list, batch=True)
print(fetch_map)
output_dict = self.serving_util.parse_http_result(fetch_map)
return output_dict
def test_inference(self):
assert self.truth_val['price'].size != 0
assert self.truth_val['price'].size != 0, "The result of inference is empty"
def test_cpu(self):
......@@ -104,7 +101,7 @@ class TestFitALine(object):
)
# 2.resource check
assert count_process_num_on_port(9494) == 1, "Please check 'Captured stdout teardown' to refer to stderr log"
assert count_process_num_on_port(9494) == 1, "Error occured when Paddle Server started"
# 4.predict by brpc
# batch_size 1
......@@ -124,7 +121,7 @@ class TestFitALine(object):
)
# 2.resource check
assert count_process_num_on_port(9494) == 1, "Please check 'Captured stdout teardown' to refer to stderr log"
assert count_process_num_on_port(9494) == 1, "Error occured when Paddle Server started"
# 4.predict by brpc
# batch_size 1
......
......@@ -25,8 +25,9 @@ class TestUCIPipeline(object):
def teardown_method(self):
print_log(["stderr.log", "stdout.log",
"log/serving.ERROR", "PipelineServingLogs/pipeline.log"], iden="after predict")
"PipelineServingLogs/pipeline.log"], iden="after predict")
kill_process(9998)
kill_process(18082)
self.serving_util.release('web_service')
def get_truth_val_by_inference(self):
......@@ -63,7 +64,6 @@ class TestUCIPipeline(object):
output_data_dict["prob"] = output_data_dict["fc_0.tmp_1"]
del output_data_dict["fc_0.tmp_1"]
self.truth_val = output_data_dict
print(self.truth_val, self.truth_val["prob"].shape)
def predict_pipeline_rpc(self, batch_size=1):
# 1.prepare feed_data
......@@ -75,10 +75,8 @@ class TestUCIPipeline(object):
# 3.predict for fetch_map
ret = client.predict(feed_dict=feed_dict)
print(ret)
# 4.convert dict to numpy
result = {"prob": np.array(eval(ret.value[0]))}
print(result)
return result
def predict_pipeline_http(self, batch_size=1):
......@@ -92,7 +90,6 @@ class TestUCIPipeline(object):
# 2.predict for fetch_map
url = "http://127.0.0.1:18082/uci/prediction"
r = requests.post(url=url, data=json.dumps(feed_dict))
print(r.json())
# 3.convert dict to numpy array
result = {"prob": np.array(eval(r.json()["value"][0]))}
return result
......@@ -105,8 +102,8 @@ class TestUCIPipeline(object):
)
# 2.resource check
assert count_process_num_on_port(9998) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # gRPC Server
assert count_process_num_on_port(18082) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # gRPC gateway
assert count_process_num_on_port(9998) == 1, "Error occured when Paddle Server started" # gRPC Server
assert count_process_num_on_port(18082) == 1, "Error occured when Paddle Server started" # gRPC gateway
# 3.keywords check
check_keywords_in_server_log("MKLDNN is enabled", filename="stderr.log")
......@@ -131,8 +128,8 @@ class TestUCIPipeline(object):
)
# 2.resource check
assert count_process_num_on_port(9998) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # gRPC Server
assert count_process_num_on_port(18082) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # gRPC gateway
assert count_process_num_on_port(9998) == 1, "Error occured when Paddle Server started" # gRPC Server
assert count_process_num_on_port(18082) == 1, "Error occured when Paddle Server started" # gRPC gateway
# 3.predict by rpc
result = self.predict_pipeline_rpc(batch_size=1)
......
......@@ -23,8 +23,8 @@ class ServingTest(object):
self.client_config = f"{client_dir}/serving_client_conf.prototxt"
os.chdir(self.example_path)
print("======================cur path======================")
print(os.getcwd())
#print("======================cur path======================")
#print(os.getcwd())
self.check_model_data_exist()
def check_model_data_exist(self):
......@@ -46,7 +46,7 @@ class ServingTest(object):
if wait:
p.wait()
print_log([err, out])
#print_log([err, out])
@staticmethod
def check_result(result_data: dict, truth_data: dict, batch_size=1, delta=1e-3):
......@@ -92,7 +92,7 @@ def kill_process(port, sleep_time=0):
def count_process_num_on_port(port):
command = "netstat -nlp | grep :" + str(port) + " | wc -l"
count = eval(os.popen(command).read())
print(f"port-{port} processes num:", count)
#print(f"port-{port} processes num:", count)
return count
......@@ -141,7 +141,7 @@ def print_log(file_list, iden=""):
os.remove(file)
else:
print(f"{file} not exist")
print("======================================================")
#print("======================================================")
def parse_prototxt(file):
......
......@@ -479,13 +479,13 @@ class Check_Env_Shell(cmd.Cmd):
intro = "Welcome to the check env shell.Type help to list commands.\n"
# ----- basic commands -----
def do_help(self, arg):
print("\nCommand list\n"\
"check_all\tCheck Environment of Paddle Inference, Pipeline Serving, C++ Serving\n"\
"check_pipeline\tCheck Environment of Pipeline Serving\n"\
"check_cpp\tCheck Environment of C++ Serving\n"\
"check_inference\tCheck Environment of Paddle Inference\n"\
"debug\tOpen pytest log to debug\n"\
"exit\tExit Check Env Shell\n")
print("\nCommand list\t\tDescription\n"\
"check_all\t\tCheck Environment of Paddle Inference, Pipeline Serving, C++ Serving\n"\
"check_pipeline\t\tCheck Environment of Pipeline Serving\n"\
"check_cpp\t\tCheck Environment of C++ Serving\n"\
"check_inference\t\tCheck Environment of Paddle Inference\n"\
"debug\t\t\tWhen checking was failed, open log to debug\n"\
"exit\t\t\tExit Check Env Shell\n")
def do_check_all(self, arg):
"Check Environment of Paddle Inference, Pipeline Serving, C++ Serving"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册