diff --git a/core/general-server/op/general_detection_op.cpp b/core/general-server/op/general_detection_op.cpp index 46f5ddf1b508681661b69c60a25b6d7d000e6d4e..b62a2d2544e12d493033cf1bb8e6606d72f614d3 100644 --- a/core/general-server/op/general_detection_op.cpp +++ b/core/general-server/op/general_detection_op.cpp @@ -191,42 +191,64 @@ int GeneralDetectionOp::inference() { boxes = post_processor_.FilterTagDetRes(boxes, ratio_h, ratio_w, srcimg); - for (int i = boxes.size() - 1; i >= 0; i--) { - crop_img = GetRotateCropImage(img, boxes[i]); - - float wh_ratio = float(crop_img.cols) / float(crop_img.rows); + float max_wh_ratio = 0.0f; + std::vector crop_imgs; + std::vector resize_imgs; + int max_resize_w = 0; + int max_resize_h = 0; + int box_num = boxes.size(); + std::vector> output_rec; + for (int i = 0; i < box_num; ++i) { + cv::Mat line_img = GetRotateCropImage(img, boxes[i]); + float wh_ratio = float(line_img.cols) / float(line_img.rows); + max_wh_ratio = max_wh_ratio > wh_ratio ? max_wh_ratio : wh_ratio; + crop_imgs.push_back(line_img); + } + for (int i = 0; i < box_num; ++i) { + cv::Mat resize_img; + crop_img = crop_imgs[i]; this->resize_op_rec.Run( - crop_img, resize_img_rec, wh_ratio, this->use_tensorrt_); + crop_img, resize_img, max_wh_ratio, this->use_tensorrt_); this->normalize_op_.Run( - &resize_img_rec, this->mean_rec, this->scale_rec, this->is_scale_); - - std::vector output_rec( - 1 * 3 * resize_img_rec.rows * resize_img_rec.cols, 0.0f); - - this->permute_op_.Run(&resize_img_rec, output_rec.data()); - - // Inference. - output_shape = {1, 3, resize_img_rec.rows, resize_img_rec.cols}; - out_num = std::accumulate( - output_shape.begin(), output_shape.end(), 1, std::multiplies()); - databuf_size_out = out_num * sizeof(float); - databuf_data_out = MempoolWrapper::instance().malloc(databuf_size_out); - if (!databuf_data_out) { - LOG(ERROR) << "Malloc failed, size: " << databuf_size_out; - return -1; - } - memcpy(databuf_data_out, output_rec.data(), databuf_size_out); - databuf_char_out = reinterpret_cast(databuf_data_out); - paddle::PaddleBuf paddleBuf(databuf_char_out, databuf_size_out); - paddle::PaddleTensor tensor_out; - tensor_out.name = "image"; - tensor_out.dtype = paddle::PaddleDType::FLOAT32; - tensor_out.shape = {1, 3, resize_img_rec.rows, resize_img_rec.cols}; - tensor_out.data = paddleBuf; - out->push_back(tensor_out); + &resize_img, this->mean_rec, this->scale_rec, this->is_scale_); + + max_resize_w = std::max(max_resize_w, resize_img.cols); + max_resize_h = std::max(max_resize_h, resize_img.rows); + resize_imgs.push_back(resize_img); + } + int buf_size = 3 * max_resize_h * max_resize_w; + output_rec = std::vector>(box_num, + std::vector(buf_size, 0.0f)); + for (int i = 0; i < box_num; ++i) { + resize_img_rec = resize_imgs[i]; + + this->permute_op_.Run(&resize_img_rec, output_rec[i].data()); + } + + // Inference. + output_shape = {box_num, 3, max_resize_h, max_resize_w}; + out_num = std::accumulate( + output_shape.begin(), output_shape.end(), 1, std::multiplies()); + databuf_size_out = out_num * sizeof(float); + databuf_data_out = MempoolWrapper::instance().malloc(databuf_size_out); + if (!databuf_data_out) { + LOG(ERROR) << "Malloc failed, size: " << databuf_size_out; + return -1; + } + int offset = buf_size * sizeof(float); + for (int i = 0; i < box_num; ++i) { + memcpy(databuf_data_out + i * offset, output_rec[i].data(), offset); } + databuf_char_out = reinterpret_cast(databuf_data_out); + paddle::PaddleBuf paddleBuf(databuf_char_out, databuf_size_out); + paddle::PaddleTensor tensor_out; + tensor_out.name = "image"; + tensor_out.dtype = paddle::PaddleDType::FLOAT32; + tensor_out.shape = output_shape; + tensor_out.data = paddleBuf; + out->push_back(tensor_out); } out->erase(out->begin(), out->begin() + infer_outnum); diff --git a/core/general-server/op/general_detection_op.h b/core/general-server/op/general_detection_op.h index 272ed5ff40575d42ac3058ad1824285925fc252c..2cc027f5ed761f2d040c0c1858e81cb70a93fcb0 100644 --- a/core/general-server/op/general_detection_op.h +++ b/core/general-server/op/general_detection_op.h @@ -63,7 +63,7 @@ class GeneralDetectionOp double det_db_thresh_ = 0.3; double det_db_box_thresh_ = 0.5; - double det_db_unclip_ratio_ = 2.0; + double det_db_unclip_ratio_ = 1.5; std::vector mean_det = {0.485f, 0.456f, 0.406f}; std::vector scale_det = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f}; diff --git a/core/general-server/op/general_dist_kv_infer_op.cpp b/core/general-server/op/general_dist_kv_infer_op.cpp index 870f045d43ccf38a73d53e048e1ee435950f8c36..238d4cac3a085ef188f427c8cc3669b7617443d7 100644 --- a/core/general-server/op/general_dist_kv_infer_op.cpp +++ b/core/general-server/op/general_dist_kv_infer_op.cpp @@ -186,9 +186,9 @@ int GeneralDistKVInferOp::inference() { if (values.size() != keys.size() || values[0].buff.size() == 0) { LOG(ERROR) << "cube value return null"; } - // size_t EMBEDDING_SIZE = values[0].buff.size() / sizeof(float); + size_t EMBEDDING_SIZE = values[0].buff.size() / sizeof(float); // size_t EMBEDDING_SIZE = (values[0].buff.size() - 10) / sizeof(float); - size_t EMBEDDING_SIZE = 9; + //size_t EMBEDDING_SIZE = 9; TensorVector sparse_out; sparse_out.resize(sparse_count); TensorVector dense_out; @@ -241,7 +241,7 @@ int GeneralDistKVInferOp::inference() { // The data generated by pslib has 10 bytes of information to be filtered // out - memcpy(data_ptr, cur_val->buff.data() + 10, cur_val->buff.size() - 10); + memcpy(data_ptr, cur_val->buff.data(), cur_val->buff.size() ); // VLOG(3) << keys[cube_val_idx] << ":" << data_ptr[0] << ", " << // data_ptr[1] << ", " < TaskExecutor::schedule( LOG(ERROR) << "Failed get TaskT from object pool"; return TaskHandler::valid_handle(); } + task->clear(); /* if (!BatchTasks::check_valid(in, out, _overrun)) { diff --git a/core/predictor/framework/bsf.h b/core/predictor/framework/bsf.h index aa3aab5603012ce5d2149774d63f4c7d14655adf..17f0c3d2ace16d50c223692b91f5dd30b3764cd0 100755 --- a/core/predictor/framework/bsf.h +++ b/core/predictor/framework/bsf.h @@ -99,7 +99,40 @@ struct Task { outLodTensorVector.clear(); } ~Task() { + read_fd = -1; + write_fd = -1; + owner_tid = -1; + inVectorT_ptr = NULL; + outVectorT_ptr = NULL; + set_feed_lod_index.clear(); + set_feed_nobatch_index.clear(); + vector_fetch_lod_index.clear(); + set_fetch_nobatch_index.clear(); + rem = -1; + total_feed_batch = 0; + taskmeta_num = 0; + index.store(0, butil::memory_order_relaxed); THREAD_MUTEX_DESTROY(&task_mut); + fetch_init = false; + outLodTensorVector.clear(); + } + + void clear(){ + read_fd = -1; + write_fd = -1; + owner_tid = -1; + inVectorT_ptr = NULL; + outVectorT_ptr = NULL; + set_feed_lod_index.clear(); + set_feed_nobatch_index.clear(); + vector_fetch_lod_index.clear(); + set_fetch_nobatch_index.clear(); + rem = -1; + total_feed_batch = 0; + taskmeta_num = 0; + index.store(0, butil::memory_order_relaxed); + THREAD_MUTEX_INIT(&task_mut, NULL); + fetch_init = false; outLodTensorVector.clear(); } @@ -323,7 +356,7 @@ struct Task { size_t feedvar_index = vector_fetch_lod_index[index]; // 由于PaddleTensor的resize实现,是每次都会清空,所以必须先统计总长度。 for (size_t taskmeta_index = 0; taskmeta_index < taskmeta_num; - ++taskmeta_num) { + ++taskmeta_index) { data_length += outLodTensorVector[taskmeta_index][index].data.length(); lod_length += outLodTensorVector[taskmeta_index][index].lod[0].size(); @@ -347,7 +380,7 @@ struct Task { size_t once_lod_length = 0; size_t last_lod_value = fetchVarTensor.lod[0][lod_length_offset]; for (size_t taskmeta_index = 0; taskmeta_index < taskmeta_num; - ++taskmeta_num) { + ++taskmeta_index) { void* dst_ptr = fetchVarTensor.data.data() + data_length_offset; void* source_ptr = outLodTensorVector[taskmeta_index][index].data.data(); diff --git a/core/predictor/framework/infer.h b/core/predictor/framework/infer.h index 45146e43c6e503b69e3534a119d31c0da9d0bcfa..a824acaff2417dcb5e885c0ae9e1acd6c17e7def 100644 --- a/core/predictor/framework/infer.h +++ b/core/predictor/framework/infer.h @@ -277,7 +277,7 @@ class DBReloadableInferEngine : public ReloadableInferEngine { LOG(WARNING) << "Loading cube cache[" << next_idx << "] ..."; std::string model_path = conf.model_dir(); if (access(model_path.c_str(), F_OK) == 0) { - std::string cube_cache_path = model_path + "/" + "cube_cache"; + std::string cube_cache_path = model_path + "/cube_cache"; int reload_cache_ret = md->caches[next_idx]->reload_data(cube_cache_path); LOG(WARNING) << "Loading cube cache[" << next_idx << "] done."; } else { @@ -437,7 +437,7 @@ class CloneDBReloadableInferEngine // create caches std::string model_path = conf.model_dir(); if (access(model_path.c_str(), F_OK) == 0) { - std::string cube_cache_path = model_path + "cube_cache"; + std::string cube_cache_path = model_path + "/cube_cache"; int reload_cache_ret = md->caches[next_idx]->reload_data(cube_cache_path); LOG(WARNING) << "create cube cache[" << next_idx << "] done."; diff --git a/core/predictor/tools/ocrtools/preprocess_op.cpp b/core/predictor/tools/ocrtools/preprocess_op.cpp index ab69e4d23abbcbebfbfb5c453fbca46ff5e51967..045984e4c004f965d52badc8b8a0b8996224ab7c 100644 --- a/core/predictor/tools/ocrtools/preprocess_op.cpp +++ b/core/predictor/tools/ocrtools/preprocess_op.cpp @@ -82,14 +82,14 @@ void ResizeImgType0::Run(const cv::Mat &img, cv::Mat &resize_img, else if (resize_h / 32 < 1 + 1e-5) resize_h = 32; else - resize_h = (resize_h / 32) * 32; + resize_h = (resize_h / 32 - 1) * 32; if (resize_w % 32 == 0) resize_w = resize_w; else if (resize_w / 32 < 1 + 1e-5) resize_w = 32; else - resize_w = (resize_w / 32) * 32; + resize_w = (resize_w / 32 - 1) * 32; if (!use_tensorrt) { cv::resize(img, resize_img, cv::Size(resize_w, resize_h)); ratio_h = float(resize_h) / float(h); diff --git a/python/examples/criteo_ctr_with_cube/cube/conf/cube.conf b/python/examples/criteo_ctr_with_cube/cube/conf/cube.conf new file mode 100755 index 0000000000000000000000000000000000000000..b70f6e34247e410f9b80054010338d3c8f452ec6 --- /dev/null +++ b/python/examples/criteo_ctr_with_cube/cube/conf/cube.conf @@ -0,0 +1,13 @@ +[{ + "dict_name": "test_dict", + "shard": 1, + "dup": 1, + "timeout": 200, + "retry": 3, + "backup_request": 100, + "type": "ipport_list", + "load_balancer": "rr", + "nodes": [{ + "ipport_list": "list://127.0.0.1:8027" + }] +}] diff --git a/python/examples/criteo_ctr_with_cube/cube/conf/gflags.conf b/python/examples/criteo_ctr_with_cube/cube/conf/gflags.conf new file mode 100755 index 0000000000000000000000000000000000000000..21c7bddebd8f22b91d0ba26a6121007f96a4380b --- /dev/null +++ b/python/examples/criteo_ctr_with_cube/cube/conf/gflags.conf @@ -0,0 +1,4 @@ +--port=8027 +--dict_split=1 +--in_mem=true +--log_dir=./log/ diff --git a/python/examples/criteo_ctr_with_cube/cube/keys b/python/examples/criteo_ctr_with_cube/cube/keys new file mode 100755 index 0000000000000000000000000000000000000000..f00c965d8307308469e537302baa73048488f162 --- /dev/null +++ b/python/examples/criteo_ctr_with_cube/cube/keys @@ -0,0 +1,10 @@ +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 diff --git a/python/examples/criteo_ctr_with_cube/test_client.py b/python/examples/criteo_ctr_with_cube/test_client.py index bef04807e9b5d5c2cdc316828ed6f960f0eeb0f8..f12d727a3d2c4f6fce013d1f815f8b589a327dd5 100755 --- a/python/examples/criteo_ctr_with_cube/test_client.py +++ b/python/examples/criteo_ctr_with_cube/test_client.py @@ -16,7 +16,7 @@ from paddle_serving_client import Client import sys import os -import criteo as criteo +import criteo_reader as criteo import time from paddle_serving_client.metric import auc import numpy as np @@ -35,22 +35,23 @@ reader = dataset.infer_reader(test_filelists, batch, buf_size) label_list = [] prob_list = [] start = time.time() -for ei in range(10000): +for ei in range(100): if py_version == 2: data = reader().next() else: data = reader().__next__() feed_dict = {} - feed_dict['dense_input'] = data[0][0] + feed_dict['dense_input'] = np.array(data[0][0]).reshape(1, len(data[0][0])) + for i in range(1, 27): - feed_dict["embedding_{}.tmp_0".format(i - 1)] = np.array(data[0][i]).reshape(-1) + feed_dict["embedding_{}.tmp_0".format(i - 1)] = np.array(data[0][i]).reshape(len(data[0][i])) feed_dict["embedding_{}.tmp_0.lod".format(i - 1)] = [0, len(data[0][i])] - fetch_map = client.predict(feed=feed_dict, fetch=["prob"]) + fetch_map = client.predict(feed=feed_dict, fetch=["prob"],batch=True) print(fetch_map) prob_list.append(fetch_map['prob'][0][1]) label_list.append(data[0][-1][0]) -print(auc(label_list, prob_list)) + end = time.time() print(end - start) diff --git a/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/000000014439.jpg b/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/000000014439.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0abbdab06eb5950b93908cc91adfa640e8a3ac78 Binary files /dev/null and b/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/000000014439.jpg differ diff --git a/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/000000570688.jpg b/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/000000570688.jpg deleted file mode 100644 index cb304bd56c4010c08611a30dcca58ea9140cea54..0000000000000000000000000000000000000000 Binary files a/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/000000570688.jpg and /dev/null differ diff --git a/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/README.md b/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/README.md index 5612b754ae9610ed351a4becfec6b47bdcb57c8d..58d13e53fe9ac3b177a3b6e6661a1370efa796b9 100644 --- a/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/README.md +++ b/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/README.md @@ -16,5 +16,5 @@ This model support TensorRT, if you want a faster inference, please use `--use_t ### Perform prediction ``` -python3 test_client.py 000000570688.jpg +python3 test_client.py 000000014439.jpg ``` diff --git a/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/README_CN.md b/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/README_CN.md index d9737261632c64172684fea0d60c566f242e95e6..af2fd8753cc56ef9c732c21020712674313ac4fa 100644 --- a/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/README_CN.md +++ b/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/README_CN.md @@ -18,5 +18,5 @@ python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ ### 执行预测 ``` -python3 test_client.py 000000570688.jpg +python3 test_client.py 000000014439.jpg ``` diff --git a/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/test_client.py b/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/test_client.py index f7cdc745b97c1e07cf82c8a1b9a8a25323d1b4af..4c441f5dbb8fa086a5cf15dc457a5215affc8463 100644 --- a/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/test_client.py +++ b/python/examples/detection/fcos_dcn_r50_fpn_1x_coco/test_client.py @@ -27,7 +27,7 @@ preprocess = Sequential([ PadStride(128) ]) -postprocess = RCNNPostprocess("label_list.txt", "output") +postprocess = RCNNPostprocess("label_list.txt", "output", [608, 608]) client = Client() client.load_client_config("serving_client/serving_client_conf.prototxt") @@ -41,5 +41,6 @@ fetch_map = client.predict( }, fetch=["save_infer_model/scale_0.tmp_1"], batch=False) +print(fetch_map) fetch_map["image"] = sys.argv[1] postprocess(fetch_map) diff --git a/python/examples/detection/ssd_vgg16_300_240e_voc/000000014439.jpg b/python/examples/detection/ssd_vgg16_300_240e_voc/000000014439.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0abbdab06eb5950b93908cc91adfa640e8a3ac78 Binary files /dev/null and b/python/examples/detection/ssd_vgg16_300_240e_voc/000000014439.jpg differ diff --git a/python/examples/detection/ssd_vgg16_300_240e_voc/000000570688.jpg b/python/examples/detection/ssd_vgg16_300_240e_voc/000000570688.jpg deleted file mode 100644 index cb304bd56c4010c08611a30dcca58ea9140cea54..0000000000000000000000000000000000000000 Binary files a/python/examples/detection/ssd_vgg16_300_240e_voc/000000570688.jpg and /dev/null differ diff --git a/python/examples/detection/ssd_vgg16_300_240e_voc/README.md b/python/examples/detection/ssd_vgg16_300_240e_voc/README.md index 60a22fdb5d3c1486827376d935c4f39de1b2c387..8a9a766c7b24d8468cbc72d6affd90263e86b013 100644 --- a/python/examples/detection/ssd_vgg16_300_240e_voc/README.md +++ b/python/examples/detection/ssd_vgg16_300_240e_voc/README.md @@ -16,5 +16,5 @@ This model support TensorRT, if you want a faster inference, please use `--use_t ### Perform prediction ``` -python3 test_client.py 000000570688.jpg +python3 test_client.py 000000014439.jpg ``` diff --git a/python/examples/detection/ssd_vgg16_300_240e_voc/README_CN.md b/python/examples/detection/ssd_vgg16_300_240e_voc/README_CN.md index a2e0d187a5e896f796dec4ed0dbdcb3af4ed5334..d3df37d774bd1a478af0a41a9fca9f238ca69aac 100644 --- a/python/examples/detection/ssd_vgg16_300_240e_voc/README_CN.md +++ b/python/examples/detection/ssd_vgg16_300_240e_voc/README_CN.md @@ -18,5 +18,5 @@ python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ ### 执行预测 ``` -python3 test_client.py 000000570688.jpg +python3 test_client.py 000000014439.jpg ``` diff --git a/python/examples/detection/ssd_vgg16_300_240e_voc/test_client.py b/python/examples/detection/ssd_vgg16_300_240e_voc/test_client.py index f7cdc745b97c1e07cf82c8a1b9a8a25323d1b4af..4c441f5dbb8fa086a5cf15dc457a5215affc8463 100644 --- a/python/examples/detection/ssd_vgg16_300_240e_voc/test_client.py +++ b/python/examples/detection/ssd_vgg16_300_240e_voc/test_client.py @@ -27,7 +27,7 @@ preprocess = Sequential([ PadStride(128) ]) -postprocess = RCNNPostprocess("label_list.txt", "output") +postprocess = RCNNPostprocess("label_list.txt", "output", [608, 608]) client = Client() client.load_client_config("serving_client/serving_client_conf.prototxt") @@ -41,5 +41,6 @@ fetch_map = client.predict( }, fetch=["save_infer_model/scale_0.tmp_1"], batch=False) +print(fetch_map) fetch_map["image"] = sys.argv[1] postprocess(fetch_map) diff --git a/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py b/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py index 71b5219441a536789e02e4549c84a5cd550bc70f..3e5db19b69fc8693adfe77a84297436bfb497642 100644 --- a/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py +++ b/python/examples/pipeline/PaddleClas/DarkNet53/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.py b/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.py index 90a3ff9bdda545a01427a26146edcbdf8332da30..c80da12ce36618e75897b33d58e4f4febd382861 100644 --- a/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.py +++ b/python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.py b/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.py index 90a3ff9bdda545a01427a26146edcbdf8332da30..c80da12ce36618e75897b33d58e4f4febd382861 100644 --- a/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.py +++ b/python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.py b/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.py index 90a3ff9bdda545a01427a26146edcbdf8332da30..c80da12ce36618e75897b33d58e4f4febd382861 100644 --- a/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.py +++ b/python/examples/pipeline/PaddleClas/MobileNetV2/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.py b/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.py index 90a3ff9bdda545a01427a26146edcbdf8332da30..c80da12ce36618e75897b33d58e4f4febd382861 100644 --- a/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.py +++ b/python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.py b/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.py index 90a3ff9bdda545a01427a26146edcbdf8332da30..c80da12ce36618e75897b33d58e4f4febd382861 100644 --- a/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.py index 90a3ff9bdda545a01427a26146edcbdf8332da30..c80da12ce36618e75897b33d58e4f4febd382861 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.py index 90a3ff9bdda545a01427a26146edcbdf8332da30..c80da12ce36618e75897b33d58e4f4febd382861 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.py index 90a3ff9bdda545a01427a26146edcbdf8332da30..c80da12ce36618e75897b33d58e4f4febd382861 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.py index 90a3ff9bdda545a01427a26146edcbdf8332da30..c80da12ce36618e75897b33d58e4f4febd382861 100644 --- a/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ResNet_V2_50/benchmark.py b/python/examples/pipeline/PaddleClas/ResNet_V2_50/benchmark.py index 562d159da3ce96233f7f9d2019fbb3061022dc06..4b0336f97c2c520a46d596bf5e435c2b9e3094a9 100644 --- a/python/examples/pipeline/PaddleClas/ResNet_V2_50/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ResNet_V2_50/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.py b/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.py index 90a3ff9bdda545a01427a26146edcbdf8332da30..c80da12ce36618e75897b33d58e4f4febd382861 100644 --- a/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.py +++ b/python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/PaddleDetection/faster_rcnn/benchmark.py b/python/examples/pipeline/PaddleDetection/faster_rcnn/benchmark.py index 8a25952cdda2e09f0f74794cf8a2226880f29040..f8d5f2b4fd196048a139867a893b06f47d2778bb 100644 --- a/python/examples/pipeline/PaddleDetection/faster_rcnn/benchmark.py +++ b/python/examples/pipeline/PaddleDetection/faster_rcnn/benchmark.py @@ -33,7 +33,7 @@ def cv2_to_base64(image): def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -46,7 +46,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 30} if device == "gpu": diff --git a/python/examples/pipeline/PaddleDetection/faster_rcnn/web_service.py b/python/examples/pipeline/PaddleDetection/faster_rcnn/web_service.py index fa026000e399cf0246df4afa2a37005d40d53d70..08a9122296b801689f3d5faf2c75113b293ea220 100644 --- a/python/examples/pipeline/PaddleDetection/faster_rcnn/web_service.py +++ b/python/examples/pipeline/PaddleDetection/faster_rcnn/web_service.py @@ -25,7 +25,7 @@ class FasterRCNNOp(Op): self.img_preprocess = Sequential([ BGR2RGB(), Div(255.0), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], False), - Resize((640, 640)), Transpose((2, 0, 1)) + Resize(640, 640), Transpose((2, 0, 1)) ]) self.img_postprocess = RCNNPostprocess("label_list.txt", "output") diff --git a/python/examples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.py b/python/examples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.py index 45853c065b013754d0d591686a9a03ad0aeb6a3d..611712b6754efd88fc7b51027e99b9bb3e82cf7d 100644 --- a/python/examples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.py +++ b/python/examples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.py @@ -33,7 +33,7 @@ def cv2_to_base64(image): def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -46,7 +46,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 30} if device == "gpu": diff --git a/python/examples/pipeline/PaddleDetection/yolov3/benchmark.py b/python/examples/pipeline/PaddleDetection/yolov3/benchmark.py index 62732613dbfc6ab0b119609a547ea36c18b11ede..cb73d2f932c12d0559af307b3ecf12ecf7986390 100644 --- a/python/examples/pipeline/PaddleDetection/yolov3/benchmark.py +++ b/python/examples/pipeline/PaddleDetection/yolov3/benchmark.py @@ -33,7 +33,7 @@ def cv2_to_base64(image): def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -46,7 +46,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device, gpu_id): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 30} if device == "gpu": diff --git a/python/examples/pipeline/bert/benchmark.py b/python/examples/pipeline/bert/benchmark.py index 5abc646bffffff118ab24414e3a50f06668729d9..ccdbbdf599943ebf757d336b96d4f19b92e1b94a 100644 --- a/python/examples/pipeline/bert/benchmark.py +++ b/python/examples/pipeline/bert/benchmark.py @@ -54,7 +54,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -67,7 +67,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/ocr/benchmark.py b/python/examples/pipeline/ocr/benchmark.py index 1e39176436b0be11093031ddfc4727ee68671c62..3c1243a1c327a5f94544c7fa56524321cad2892f 100644 --- a/python/examples/pipeline/ocr/benchmark.py +++ b/python/examples/pipeline/ocr/benchmark.py @@ -28,7 +28,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def parse_benchmark(filein, fileout): with open(filein, "r") as fin: - res = yaml.load(fin) + res = yaml.load(fin, yaml.FullLoader) del_list = [] for key in res["DAG"].keys(): if "call" in key: @@ -41,7 +41,7 @@ def parse_benchmark(filein, fileout): def gen_yml(device): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 10} if device == "gpu": diff --git a/python/examples/pipeline/simple_web_service/benchmark.py b/python/examples/pipeline/simple_web_service/benchmark.py index c2c612dd2740d7c97da4289a0913270b03611e7a..88c3ea21722ad9e6420e193a69299b2cf8e443a4 100644 --- a/python/examples/pipeline/simple_web_service/benchmark.py +++ b/python/examples/pipeline/simple_web_service/benchmark.py @@ -27,7 +27,7 @@ from paddle_serving_client.utils import benchmark_args, show_latency def gen_yml(): fin = open("config.yml", "r") - config = yaml.load(fin) + config = yaml.load(fin, yaml.FullLoader) fin.close() config["dag"]["tracer"] = {"interval_s": 5} with open("config2.yml", "w") as fout: diff --git a/python/paddle_serving_server/parse_profile.py b/python/paddle_serving_server/parse_profile.py index 37e801c255272778c6926642beabdcf2f3f92cf0..e718e4685e13ed35f8dba16eb0d5f8a3ff6fd305 100644 --- a/python/paddle_serving_server/parse_profile.py +++ b/python/paddle_serving_server/parse_profile.py @@ -96,7 +96,7 @@ if __name__ == "__main__": args = parse_args() benchmark_cfg_filename = args.benchmark_cfg f = open(benchmark_cfg_filename, 'r') - benchmark_config = yaml.load(f) + benchmark_config = yaml.load(f, yaml.FullLoader) f.close() benchmark_log_filename = args.benchmark_log f = open(benchmark_log_filename, 'r') diff --git a/python/pipeline/analyse.py b/python/pipeline/analyse.py index 814b43acaf52bbf0c066ff4bbdce2a0165508a2d..a571ccfe9018fac70523803c40d05df1cf16e271 100644 --- a/python/pipeline/analyse.py +++ b/python/pipeline/analyse.py @@ -274,7 +274,7 @@ class OpAnalyst(object): """ import yaml with open(op_config_yaml) as f: - op_config = yaml.load(f) + op_config = yaml.load(f, yaml.FullLoader) # check that each model is deployed on a different card card_set = set() diff --git a/python/pipeline/pipeline_server.py b/python/pipeline/pipeline_server.py index c3a904690dcd4a7a044f6afd309553fd1446aa49..5d3fa3540149412186b9335741964910a7ed56d2 100644 --- a/python/pipeline/pipeline_server.py +++ b/python/pipeline/pipeline_server.py @@ -341,7 +341,7 @@ class ServerYamlConfChecker(object): " or yml_dict can be selected as the parameter.") if yml_file is not None: with io.open(yml_file, encoding='utf-8') as f: - conf = yaml.load(f.read()) + conf = yaml.load(f.read(), yaml.FullLoader) elif yml_dict is not None: conf = yml_dict else: diff --git a/python/requirements.txt b/python/requirements.txt index c28133c67f6c85e3dd12b08914c7aa0848a4cad7..ba7cf42d9e0a6b4cd713ef245108bb45e7244dda 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -7,7 +7,7 @@ protobuf>=3.12.2 grpcio-tools>=1.28.1 grpcio>=1.28.1 func-timeout>=4.3.5 -pyyaml>=1.3.0 +pyyaml>=5.1 flask>=1.1.2 click==7.1.2 itsdangerous==1.1.0 diff --git a/python/requirements_mac.txt b/python/requirements_mac.txt index b14fbd5fc5b779e6f2d216df434bfeb615d59b05..6a396239c5e68e545bd5af0928b3e6f42b19c82b 100644 --- a/python/requirements_mac.txt +++ b/python/requirements_mac.txt @@ -6,7 +6,7 @@ google>=2.0.3 opencv-python==4.2.0.32 protobuf>=3.12.2 func-timeout>=4.3.5 -pyyaml>=1.3.0 +pyyaml>=5.1 flask>=1.1.2 click==7.1.2 itsdangerous==1.1.0