benchmark_batch.py 3.0 KB
Newer Older
M
MRXLT 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import sys
from paddle_serving_client import Client
from paddle_serving_client.metric import auc
from paddle_serving_client.utils import MultiThreadRunner
import time


def predict(thr_id, resource):
    client = Client()
    client.load_client_config(resource["conf_file"])
    client.connect(resource["server_endpoint"])
    thread_num = resource["thread_num"]
    file_list = resource["filelist"]
    line_id = 0
    prob = []
    label_list = []
    dataset = []
    for fn in file_list:
        fin = open(fn)
        for line in fin:
            if line_id % thread_num == thr_id - 1:
                group = line.strip().split()
                words = [int(x) for x in group[1:int(group[0])]]
                label = [int(group[-1])]
                feed = {"words": words, "label": label}
                dataset.append(feed)
            line_id += 1
        fin.close()

    start = time.time()
    fetch = ["acc", "cost", "prediction"]
    infer_time_list = []
    counter = 0
    feed_list = []
    for inst in dataset:
        counter += 1
        feed_list.append(inst)
        if counter == resource["batch_size"]:
            fetch_map_batch, infer_time = client.batch_predict(
                feed_batch=feed_list, fetch=fetch, profile=True)
            #prob.append(fetch_map["prediction"][1])
            #label_list.append(label[0])
            infer_time_list.append(infer_time)
            counter = 0
            feed_list = []
    if counter != 0:
        fetch_map_batch, infer_time = client.batch_predict(
            feed_batch=feed_list, fetch=fetch, profile=True)
        infer_time_list.append(infer_time)

    end = time.time()
    client.release()
    return [prob, label_list, [sum(infer_time_list)], [end - start]]


if __name__ == '__main__':
    conf_file = sys.argv[1]
    data_file = sys.argv[2]
    resource = {}
    resource["conf_file"] = conf_file
    resource["server_endpoint"] = ["127.0.0.1:9292"]
    resource["filelist"] = [data_file]
    resource["thread_num"] = int(sys.argv[3])
    resource["batch_size"] = int(sys.argv[4])

    thread_runner = MultiThreadRunner()
    result = thread_runner.run(predict, int(sys.argv[3]), resource)

    print("thread num {}\tbatch size {}\ttotal time {}".format(sys.argv[
        3], resource["batch_size"], sum(result[-1]) / len(result[-1])))
    print("thread num {}\tbatch size {}\tinfer time {}".format(
        sys.argv[3], resource["batch_size"],
        sum(result[2]) / 1000.0 / 1000.0 / len(result[2])))