From b834ae9d7c0b1c9dfda8594d1061afb037fe6300 Mon Sep 17 00:00:00 2001 From: ShiningZhang Date: Fri, 15 Oct 2021 15:18:15 +0800 Subject: [PATCH] fix: yaml.load with argu Loader=yaml.FullLoader to adapt to pyyamlv6.0 --- python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py | 4 ++-- python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.py | 4 ++-- python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.py | 4 ++-- python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.py | 4 ++-- .../pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.py | 4 ++-- .../pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.py | 4 ++-- python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.py | 4 ++-- .../pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.py | 4 ++-- .../examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.py | 4 ++-- .../pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.py | 4 ++-- python/examples/pipeline/PaddleClas/ResNet_V2_50/benchmark.py | 4 ++-- .../pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.py | 4 ++-- .../pipeline/PaddleDetection/faster_rcnn/benchmark.py | 4 ++-- .../pipeline/PaddleDetection/ppyolo_mbv3/benchmark.py | 4 ++-- python/examples/pipeline/PaddleDetection/yolov3/benchmark.py | 4 ++-- python/examples/pipeline/bert/benchmark.py | 4 ++-- python/examples/pipeline/ocr/benchmark.py | 4 ++-- python/examples/pipeline/simple_web_service/benchmark.py | 2 +- python/paddle_serving_server/parse_profile.py | 2 +- python/pipeline/analyse.py | 2 +- python/pipeline/pipeline_server.py | 2 +- python/requirements.txt | 2 +- python/requirements_mac.txt | 2 +- 23 files changed, 40 insertions(+), 40 deletions(-) diff --git a/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py b/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py index 71b52194..3e5db19b 100644 --- a/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py +++ b/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.py b/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.py index 90a3ff9b..c80da12c 100644 --- a/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.py +++ b/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.py b/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.py index 90a3ff9b..c80da12c 100644 --- a/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.py +++ b/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.py b/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.py index 90a3ff9b..c80da12c 100644 --- a/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.py +++ b/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.py b/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.py index 90a3ff9b..c80da12c 100644 --- a/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.py +++ b/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.py b/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.py index 90a3ff9b..c80da12c 100644 --- a/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.py index 90a3ff9b..c80da12c 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.py index 90a3ff9b..c80da12c 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.py index 90a3ff9b..c80da12c 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.py index 90a3ff9b..c80da12c 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ResNet_V2_50/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet_V2_50/benchmark.py index 562d159d..4b0336f9 100644 --- a/python/examples/pipeline/PaddleClas/ResNet_V2_50/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet_V2_50/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.py b/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.py index 90a3ff9b..c80da12c 100644 --- a/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleDetection/faster_rcnn/benchmark.py b/python/examples/pipeline/PaddleDetection/faster_rcnn/benchmark.py index 8a25952c..f8d5f2b4 100644 --- a/python/examples/pipeline/PaddleDetection/faster_rcnn/benchmark.py +++ b/python/examples/pipeline/PaddleDetection/faster_rcnn/benchmark.py @@ -33,7 +33,7 @@ def cv2_to_base64(image): def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -46,7 +46,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 30} if device == "gpu": diff --git a/python/examples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.py b/python/examples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.py index 45853c06..611712b6 100644 --- a/python/examples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.py +++ b/python/examples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.py @@ -33,7 +33,7 @@ def cv2_to_base64(image): def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -46,7 +46,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 30} if device == "gpu": diff --git a/python/examples/pipeline/PaddleDetection/yolov3/benchmark.py b/python/examples/pipeline/PaddleDetection/yolov3/benchmark.py index 62732613..cb73d2f9 100644 --- a/python/examples/pipeline/PaddleDetection/yolov3/benchmark.py +++ b/python/examples/pipeline/PaddleDetection/yolov3/benchmark.py @@ -33,7 +33,7 @@ def cv2_to_base64(image): def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -46,7 +46,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 30} if device == "gpu": diff --git a/python/examples/pipeline/bert/benchmark.py b/python/examples/pipeline/bert/benchmark.py index 5abc646b..ccdbbdf5 100644 --- a/python/examples/pipeline/bert/benchmark.py +++ b/python/examples/pipeline/bert/benchmark.py @@ -54,7 +54,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -67,7 +67,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/ocr/benchmark.py b/python/examples/pipeline/ocr/benchmark.py index 1e391764..3c1243a1 100644 --- a/python/examples/pipeline/ocr/benchmark.py +++ b/python/examples/pipeline/ocr/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/simple_web_service/benchmark.py b/python/examples/pipeline/simple_web_service/benchmark.py index c2c612dd..88c3ea21 100644 --- a/python/examples/pipeline/simple_web_service/benchmark.py +++ b/python/examples/pipeline/simple_web_service/benchmark.py @@ -27,7 +27,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def gen_yml(): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 5} with open("config2.yml", "w") as fout: diff --git a/python/paddle_serving_server/parse_profile.py b/python/paddle_serving_server/parse_profile.py index 37e801c2..e718e468 100644 --- a/python/paddle_serving_server/parse_profile.py +++ b/python/paddle_serving_server/parse_profile.py @@ -96,7 +96,7 @@ if __name__ == "__main__": args = parse_args() benchmark_cfg_filename = args.benchmark_cfg f = open(benchmark_cfg_filename, 'r') - benchmark_config = yaml.load(f) + benchmark_config = yaml.load(f, yaml.FullLoader) f.close() benchmark_log_filename = args.benchmark_log f = open(benchmark_log_filename, 'r') diff --git a/python/pipeline/analyse.py b/python/pipeline/analyse.py index 814b43ac..a571ccfe 100644 --- a/python/pipeline/analyse.py +++ b/python/pipeline/analyse.py @@ -274,7 +274,7 @@ class OpAnalyst(object): """ import yaml with open(op_config_yaml) as f: - op_config = yaml.load(f) + op_config = yaml.load(f, yaml.FullLoader) # check that each model is deployed on a different card card_set = set() diff --git a/python/pipeline/pipeline_server.py b/python/pipeline/pipeline_server.py index c3a90469..5d3fa354 100644 --- a/python/pipeline/pipeline_server.py +++ b/python/pipeline/pipeline_server.py @@ -341,7 +341,7 @@ class ServerYamlConfChecker(object): " or yml_dict can be selected as the parameter.") if yml_file is not None: with io.open(yml_file, encoding='utf-8') as f: - conf = yaml.load(f.read()) + conf = yaml.load(f.read(), yaml.FullLoader) elif yml_dict is not None: conf = yml_dict else: diff --git a/python/requirements.txt b/python/requirements.txt index 3dd93093..ba7cf42d 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -7,7 +7,7 @@ protobuf>=3.12.2 grpcio-tools>=1.28.1 grpcio>=1.28.1 func-timeout>=4.3.5 -pyyaml>=1.3.0, <6.0 +pyyaml>=5.1 flask>=1.1.2 click==7.1.2 itsdangerous==1.1.0 diff --git a/python/requirements_mac.txt b/python/requirements_mac.txt index fb289aea..6a396239 100644 --- a/python/requirements_mac.txt +++ b/python/requirements_mac.txt @@ -6,7 +6,7 @@ google>=2.0.3 opencv-python==4.2.0.32 protobuf>=3.12.2 func-timeout>=4.3.5 -pyyaml>=1.3.0, <6.0 +pyyaml>=5.1 flask>=1.1.2 click==7.1.2 itsdangerous==1.1.0 -- GitLab