benchmark.py 2.3 KB
Newer Older
R
root 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
import sys
import os
import yaml
import requests
import time
import json
try:
    from paddle_serving_server_gpu.pipeline import PipelineClient
except ImportError:
    from paddle_serving_server.pipeline import PipelineClient
import numpy as np
from paddle_serving_client.utils import MultiThreadRunner
from paddle_serving_client.utils import benchmark_args, show_latency

def gen_yml():
    fin = open("config.yml", "r")
    config = yaml.load(fin)
    fin.close()
    config["dag"]["tracer"] = {"interval_s": 5}
    with open("config2.yml", "w") as fout: 
        yaml.dump(config, fout, default_flow_style=False)

def run_http(idx, batch_size):
    print("start thread ({})".format(idx))
    url = "http://127.0.0.1:18082/uci/prediction"    
    start = time.time()
R
root 已提交
27 28 29
    value = "0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332"
    all_value = ";".join([value for i in range(batch_size)])
    data = {"key": ["x"], "value": [all_value]}
R
root 已提交
30 31 32 33 34 35 36 37
    for i in range(1000):
        r = requests.post(url=url, data=json.dumps(data))
    print(r.json())
    end = time.time()
    return [[end - start]]

def multithread_http(thread, batch_size):
    multi_thread_runner = MultiThreadRunner()
R
root 已提交
38
    result = multi_thread_runner.run(run_http , thread, batch_size)
R
root 已提交
39 40 41 42

def run_rpc(thread, batch_size):
    client = PipelineClient()
    client.connect(['127.0.0.1:9998'])
R
root 已提交
43 44 45 46 47
    value = "0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332"
    all_value = ";".join([value for i in range(batch_size)])
    data = {"key": "x", "value": all_value}
    for i in range(1000):
        ret = client.predict(feed_dict={data["key"]: data["value"]}, fetch=["res"])
R
root 已提交
48 49
    print(ret)

R
root 已提交
50 51 52 53
def multithread_rpc(thraed, batch_size):
    multi_thread_runner = MultiThreadRunner()
    result = multi_thread_runner.run(run_rpc , thread, batch_size)

R
root 已提交
54 55 56 57 58 59 60 61 62 63 64 65
if __name__ == "__main__":
    if sys.argv[1] == "yaml":
        mode = sys.argv[2] # brpc/  local predictor
        thread = int(sys.argv[3])
        gen_yml()
    elif sys.argv[1] == "run":
        mode = sys.argv[2] # http/ rpc
        thread = int(sys.argv[3])
        batch_size = int(sys.argv[4])
        if mode == "http":
            multithread_http(thread, batch_size)
        elif mode == "rpc":
R
root 已提交
66
            multithread_rpc(thread, batch_size)
R
root 已提交
67 68