diff --git a/python/examples/pipeline/faster_rcnn/000000570688.jpg b/python/examples/pipeline/faster_rcnn/000000570688.jpg deleted file mode 100644 index cb304bd56c4010c08611a30dcca58ea9140cea54..0000000000000000000000000000000000000000 Binary files a/python/examples/pipeline/faster_rcnn/000000570688.jpg and /dev/null differ diff --git a/python/examples/pipeline/faster_rcnn/benchmark.py b/python/examples/pipeline/faster_rcnn/benchmark.py deleted file mode 100644 index 9fdb48f7e056b2e519c1f6d7d82f4336a22c97ad..0000000000000000000000000000000000000000 --- a/python/examples/pipeline/faster_rcnn/benchmark.py +++ /dev/null @@ -1,107 +0,0 @@ -import sys -import os -import yaml -import requests -import time -import json -import cv2 -import base64 -try: - from paddle_serving_server_gpu.pipeline import PipelineClient -except ImportError: - from paddle_serving_server.pipeline import PipelineClient -import numpy as np -from paddle_serving_client.utils import MultiThreadRunner -from paddle_serving_client.utils import benchmark_args, show_latency - -def cv2_to_base64(image): - return base64.b64encode(image).decode('utf8') - -def parse_benchmark(filein, fileout): - with open(filein, "r") as fin: - res = yaml.load(fin) - del_list = [] - for key in res["DAG"].keys(): - if "call" in key: - del_list.append(key) - for key in del_list: - del res["DAG"][key] - with open(fileout, "w") as fout: - yaml.dump(res, fout, default_flow_style=False) - -def gen_yml(device): - fin = open("config.yml", "r") - config = yaml.load(fin) - fin.close() - config["dag"]["tracer"] = {"interval_s": 10} - if device == "gpu": - config["op"]["bert"]["local_service_conf"]["device_type"] = 1 - config["op"]["bert"]["local_service_conf"]["devices"] = "2" - with open("config2.yml", "w") as fout: - yaml.dump(config, fout, default_flow_style=False) - -def run_http(idx, batch_size): - print("start thread ({})".format(idx)) - url = "http://127.0.0.1:18082/faster_rcnn/prediction" - with open(os.path.join(".", "000000570688.jpg"), 'rb') as file: - image_data1 = file.read() - image = cv2_to_base64(image_data1) - - start = time.time() - for i in range(10): - data = {"key": [], "value": []} - for j in range(batch_size): - data["key"].append("image_" + str(j)) - data["value"].append(image) - r = requests.post(url=url, data=json.dumps(data)) - print("done") - end = time.time() - return [[end - start]] - -def multithread_http(thread, batch_size): - multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(run_http , thread, batch_size) - -def run_rpc(thread, batch_size): - client = PipelineClient() - client.connect(['127.0.0.1:9998']) - with open("data-c.txt", 'r') as fin: - start = time.time() - lines = fin.readlines() - start_idx = 0 - while start_idx < len(lines): - end_idx = min(len(lines), start_idx + batch_size) - feed = {} - for i in range(start_idx, end_idx): - feed[str(i - start_idx)] = lines[i] - ret = client.predict(feed_dict=feed, fetch=["res"]) - start_idx += batch_size - if start_idx > 1000: - break - end = time.time() - return [[end - start]] - - -def multithread_rpc(thraed, batch_size): - multi_thread_runner = MultiThreadRunner() - result = multi_thread_runner.run(run_rpc , thread, batch_size) - -if __name__ == "__main__": - if sys.argv[1] == "yaml": - mode = sys.argv[2] # brpc/ local predictor - thread = int(sys.argv[3]) - device = sys.argv[4] - gen_yml(device) - elif sys.argv[1] == "run": - mode = sys.argv[2] # http/ rpc - thread = int(sys.argv[3]) - batch_size = int(sys.argv[4]) - if mode == "http": - multithread_http(thread, batch_size) - elif mode == "rpc": - multithread_rpc(thread, batch_size) - elif sys.argv[1] == "dump": - filein = sys.argv[2] - fileout = sys.argv[3] - parse_benchmark(filein, fileout) - diff --git a/python/examples/pipeline/faster_rcnn/benchmark.sh b/python/examples/pipeline/faster_rcnn/benchmark.sh deleted file mode 100644 index 7ff22a8b704c92a4e6de144a0bf7e652602b2a61..0000000000000000000000000000000000000000 --- a/python/examples/pipeline/faster_rcnn/benchmark.sh +++ /dev/null @@ -1,60 +0,0 @@ -export FLAGS_profile_pipeline=1 -alias python3="python3.6" -modelname="bert" -# HTTP -ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 -sleep 3 -python3 benchmark.py yaml local_predictor 1 cpu -rm -rf profile_log_$modelname -for thread_num in 1 -do - for batch_size in 1 2 - do - echo "----FasterRCNN thread num: $thread_num batch size: $batch_size mode:http ----" >>profile_log_$modelname - rm -rf PipelineServingLogs - rm -rf cpu_utilization.py - python3 web_service.py >web.log 2>&1 & - sleep 3 - nvidia-smi --id=2 --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=2 --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & - echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - python3 benchmark.py run http $thread_num $batch_size - python3 cpu_utilization.py >>profile_log_$modelname - ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp - mv benchmark.tmp benchmark.log - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTILIZATION:", max}' gpu_utilization.log >> profile_log_$modelname - cat benchmark.log >> profile_log_$modelname - #rm -rf gpu_use.log gpu_utilization.log - done -done -# RPC -exit -ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 -sleep 3 -python3 benchmark.py yaml local_predictor 1 gpu - -for thread_num in 1 8 16 -do - for batch_size in 1 10 100 - do - echo "----Bert thread num: $thread_num batch size: $batch_size mode:rpc ----" >>profile_log_$modelname - rm -rf PipelineServingLogs - rm -rf cpu_utilization.py - python3 web_service.py >web.log 2>&1 & - sleep 3 - nvidia-smi --id=2 --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=2 --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & - echo "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - python3 benchmark.py run rpc $thread_num $batch_size - python3 cpu_utilization.py >>profile_log_$modelname - ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 - python3 benchmark.py dump benchmark.log benchmark.tmp - mv benchmark.tmp benchmark.log - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$modelname - awk 'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTILIZATION:", max}' gpu_utilization.log >> profile_log_$modelname - #rm -rf gpu_use.log gpu_utilization.log - cat benchmark.log >> profile_log_$modelname - done -done diff --git a/python/examples/pipeline/faster_rcnn/config.yml b/python/examples/pipeline/faster_rcnn/config.yml deleted file mode 100644 index 50f9298e03a6f9c7b859c561e2c2b7ce61f622c0..0000000000000000000000000000000000000000 --- a/python/examples/pipeline/faster_rcnn/config.yml +++ /dev/null @@ -1,17 +0,0 @@ -dag: - is_thread_op: false - tracer: - interval_s: 10 -http_port: 18082 -op: - faster_rcnn: - local_service_conf: - client_type: local_predictor - concurrency: 2 - device_type: 1 - devices: '2' - fetch_list: - - save_infer_model/scale_0.tmp_1 - model_config: serving_server/ -rpc_port: 9998 -worker_num: 20 diff --git a/python/examples/pipeline/faster_rcnn/label_list.txt b/python/examples/pipeline/faster_rcnn/label_list.txt deleted file mode 100644 index 941cb4e1392266f6a6c09b1fdc5f79503b2e5df6..0000000000000000000000000000000000000000 --- a/python/examples/pipeline/faster_rcnn/label_list.txt +++ /dev/null @@ -1,80 +0,0 @@ -person -bicycle -car -motorcycle -airplane -bus -train -truck -boat -traffic light -fire hydrant -stop sign -parking meter -bench -bird -cat -dog -horse -sheep -cow -elephant -bear -zebra -giraffe -backpack -umbrella -handbag -tie -suitcase -frisbee -skis -snowboard -sports ball -kite -baseball bat -baseball glove -skateboard -surfboard -tennis racket -bottle -wine glass -cup -fork -knife -spoon -bowl -banana -apple -sandwich -orange -broccoli -carrot -hot dog -pizza -donut -cake -chair -couch -potted plant -bed -dining table -toilet -tv -laptop -mouse -remote -keyboard -cell phone -microwave -oven -toaster -sink -refrigerator -book -clock -vase -scissors -teddy bear -hair drier -toothbrush diff --git a/python/examples/pipeline/faster_rcnn/pipeline_http_client.py b/python/examples/pipeline/faster_rcnn/pipeline_http_client.py deleted file mode 100644 index 7037afc2f328d4a348e108a6bbeba7a2a60032af..0000000000000000000000000000000000000000 --- a/python/examples/pipeline/faster_rcnn/pipeline_http_client.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# from paddle_serving_server.pipeline import PipelineClient -import numpy as np -import requests -import json -import cv2 -import base64 -import os - - -def cv2_to_base64(image): - return base64.b64encode(image).decode('utf8') - - -url = "http://127.0.0.1:18082/faster_rcnn/prediction" -with open(os.path.join(".", "000000570688.jpg"), 'rb') as file: - image_data1 = file.read() -image = cv2_to_base64(image_data1) - -for i in range(1): - data = {"key": ["image"], "value": [image]} - r = requests.post(url=url, data=json.dumps(data)) - print(r.json()) diff --git a/python/examples/pipeline/faster_rcnn/web_service.py b/python/examples/pipeline/faster_rcnn/web_service.py deleted file mode 100644 index 1f483a0e6d751ed150bc5038db7e3506b61ecbb5..0000000000000000000000000000000000000000 --- a/python/examples/pipeline/faster_rcnn/web_service.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -try: - from paddle_serving_server_gpu.web_service import WebService, Op -except ImportError: - from paddle_serving_server.web_service import WebService, Op -import logging -import numpy as np -import sys -import cv2 -from paddle_serving_app.reader import * -import base64 - -class FasterRCNNOp(Op): - def init_op(self): - self.img_preprocess = Sequential([ - BGR2RGB(), Div(255.0), - Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], False), - Resize((640, 640)), Transpose((2, 0, 1)) - ]) - self.img_postprocess = RCNNPostprocess("label_list.txt", "output") - - def preprocess(self, input_dicts, data_id, log_id): - (_, input_dict), = input_dicts.items() - imgs = [] - print("keys", input_dict.keys()) - for key in input_dict.keys(): - data = base64.b64decode(input_dict[key].encode('utf8')) - data = np.fromstring(data, np.uint8) - im = cv2.imdecode(data, cv2.IMREAD_COLOR) - im = self.img_preprocess(im) - imgs.append({ - "image": im[np.newaxis,:], - "im_shape": np.array(list(im.shape[1:])).reshape(-1)[np.newaxis,:], - "scale_factor": np.array([1.0, 1.0]).reshape(-1)[np.newaxis,:], - }) - feed_dict = { - "image": np.concatenate([x["image"] for x in imgs], axis=0), - "im_shape": np.concatenate([x["im_shape"] for x in imgs], axis=0), - "scale_factor": np.concatenate([x["scale_factor"] for x in imgs], axis=0) - } - for key in feed_dict.keys(): - print(key, feed_dict[key].shape) - return feed_dict, False, None, "" - - def postprocess(self, input_dicts, fetch_dict, log_id): - #print(fetch_dict) - res_dict = {"bbox_result": str(self.img_postprocess(fetch_dict))} - return res_dict, None, "" - - -class FasterRCNNService(WebService): - def get_pipeline_response(self, read_op): - faster_rcnn_op = FasterRCNNOp(name="faster_rcnn", input_ops=[read_op]) - return faster_rcnn_op - - -fasterrcnn_service = FasterRCNNService(name="faster_rcnn") -fasterrcnn_service.prepare_pipeline_config("config2.yml") -fasterrcnn_service.run_service()