diff --git a/tools/benchmark.py b/tools/benchmark.py index 53037b1e7187d0d36648fe61afdbba33271d079f..6a4ec29c8d3d741865f9271a18aa70de2ca1932d 100755 --- a/tools/benchmark.py +++ b/tools/benchmark.py @@ -206,8 +206,16 @@ def main(argv): hyper_hello_path = os.path.join(build_dir, "hyper_hello") core_http_bench_exe = os.path.join(build_dir, "deno_core_http_bench") new_data["throughput"] = run_throughput(deno_path) - new_data["req_per_sec"] = http_benchmark(deno_path, hyper_hello_path, - core_http_bench_exe) + stats = http_benchmark(deno_path, hyper_hello_path, + core_http_bench_exe) + new_data["req_per_sec"] = { + k: v["req_per_sec"] + for k, v in stats.items() + } + new_data["max_latency"] = { + k: v["max_latency"] + for k, v in stats.items() + } if "linux" in sys.platform: # Thread count test, only on linux new_data["thread_count"] = run_thread_count_benchmark(deno_path) diff --git a/tools/http_benchmark.py b/tools/http_benchmark.py index 2df5fcb8914d5f5da0147680d4b8999118a7cf82..bd43a5ec435586978c14b21a40221fe024d9eaff 100755 --- a/tools/http_benchmark.py +++ b/tools/http_benchmark.py @@ -59,16 +59,18 @@ def hyper_http_benchmark(hyper_hello_exe): def http_benchmark(deno_exe, hyper_hello_exe, core_http_bench_exe): - r = {} + # TODO Rename to "deno_tcp" - r["deno"] = deno_http_benchmark(deno_exe) - r["deno_net_http"] = deno_net_http_benchmark(deno_exe) - r["deno_core_single"] = deno_core_single(core_http_bench_exe) - r["deno_core_multi"] = deno_core_multi(core_http_bench_exe) - r["node"] = node_http_benchmark() - r["node_tcp"] = node_tcp_benchmark() - r["hyper"] = hyper_http_benchmark(hyper_hello_exe) - return r + + return { + "deno": deno_http_benchmark(deno_exe), + "deno_net_http": deno_net_http_benchmark(deno_exe), + "deno_core_single": deno_core_single(core_http_bench_exe), + "deno_core_multi": deno_core_multi(core_http_bench_exe), + "node": node_http_benchmark(), + "node_tcp": node_tcp_benchmark(), + "hyper": hyper_http_benchmark(hyper_hello_exe) + } def run(server_cmd, merge_env=None): @@ -93,9 +95,9 @@ def run(server_cmd, merge_env=None): DURATION, ADDR) print cmd output = subprocess.check_output(cmd, shell=True) - req_per_sec = util.parse_wrk_output(output) + stats = util.parse_wrk_output(output) print output - return req_per_sec + return stats finally: server.kill() diff --git a/tools/testdata/wrk2.txt b/tools/testdata/wrk2.txt new file mode 100644 index 0000000000000000000000000000000000000000..3be41437c8fa22e7345936788a562d4966a3e9be --- /dev/null +++ b/tools/testdata/wrk2.txt @@ -0,0 +1,8 @@ +Running 10s test @ http://127.0.0.1:4544/ + 2 threads and 10 connections + Thread Stats Avg Stdev Max +/- Stdev + Latency 402.90us 1.15ms 1.25us 94.86% + Req/Sec 26.86k 2.01k 31.81k 78.71% + 539721 requests in 10.10s, 26.25MB read +Requests/sec: 53435.75 +Transfer/sec: 2.60MB \ No newline at end of file diff --git a/tools/testdata/wrk3.txt b/tools/testdata/wrk3.txt new file mode 100644 index 0000000000000000000000000000000000000000..71150f9f399cc2e86f6eb01c335ea4c08c9ccced --- /dev/null +++ b/tools/testdata/wrk3.txt @@ -0,0 +1,8 @@ +Running 10s test @ http://127.0.0.1:4544/ + 2 threads and 10 connections + Thread Stats Avg Stdev Max +/- Stdev + Latency 26.55ms 152.26ms 1.63s 97.45% + Req/Sec 48.26k 3.13k 61.41k 93.00% + 960491 requests in 10.00s, 80.61MB read +Requests/sec: 96037.58 +Transfer/sec: 8.06MB \ No newline at end of file diff --git a/tools/util.py b/tools/util.py index fc307512d7866daeb4876895553350315466ed0f..5576bef91a41a22f856007d009cb660798ae1134 100644 --- a/tools/util.py +++ b/tools/util.py @@ -358,12 +358,32 @@ def extract_number(pattern, string): return int(matches[0]) +def extract_max_latency_in_milliseconds(pattern, string): + matches = re.findall(pattern, string) + if len(matches) != 1: + return None + num = float(matches[0][0]) + unit = matches[0][1] + if (unit == 'ms'): + return num + elif (unit == 'us'): + return num / 1000 + elif (unit == 's'): + return num * 1000 + + def parse_wrk_output(output): - req_per_sec = None + stats = {} + stats['req_per_sec'] = None + stats['max_latency'] = None for line in output.split("\n"): - if req_per_sec is None: - req_per_sec = extract_number(r'Requests/sec:\s+(\d+)', line) - return req_per_sec + if stats['req_per_sec'] is None: + stats['req_per_sec'] = extract_number(r'Requests/sec:\s+(\d+)', + line) + if stats['max_latency'] is None: + stats['max_latency'] = extract_max_latency_in_milliseconds( + r'Latency(?:\s+(\d+.\d+)([a-z]+)){3}', line) + return stats def platform(): diff --git a/tools/util_test.py b/tools/util_test.py index b7c054b92ddfd3e8ab5e547d45aedb08eaf02e57..e4c8e697b5ef85d117e6c8d3d64d14283b58bfc0 100644 --- a/tools/util_test.py +++ b/tools/util_test.py @@ -82,8 +82,19 @@ def parse_unit_test_output_test(): def parse_wrk_output_test(): print "Testing util.parse_wrk_output_test()..." f = open(os.path.join(util.root_path, "tools/testdata/wrk1.txt")) - req_per_sec = util.parse_wrk_output(f.read()) - assert req_per_sec == 1837 + stats = util.parse_wrk_output(f.read()) + assert stats['req_per_sec'] == 1837 + assert stats['max_latency'] == 34.96 + + f2 = open(os.path.join(util.root_path, "tools/testdata/wrk2.txt")) + stats2 = util.parse_wrk_output(f2.read()) + assert stats2['req_per_sec'] == 53435 + assert stats2['max_latency'] == 0.00125 + + f3 = open(os.path.join(util.root_path, "tools/testdata/wrk3.txt")) + stats3 = util.parse_wrk_output(f3.read()) + assert stats3['req_per_sec'] == 96037 + assert stats3['max_latency'] == 1630.0 def util_test(): diff --git a/website/app.js b/website/app.js index 6e23befa913edef6e3ef2c646226e3c7dadd87a2..c012dfb4a8446424c350ca71da6bc93e78a8e1ad 100644 --- a/website/app.js +++ b/website/app.js @@ -46,6 +46,10 @@ export function createReqPerSecColumns(data) { return createColumns(data, "req_per_sec"); } +export function createMaxLatencyColumns(data) { + return createColumns(data, "max_latency"); +} + export function createBinarySizeColumns(data) { const propName = "binary_size"; const binarySizeNames = Object.keys(data[data.length - 1][propName]); @@ -198,6 +202,7 @@ export async function drawChartsFromBenchmarkData(dataUrl) { const execTimeColumns = createExecTimeColumns(data); const throughputColumns = createThroughputColumns(data); const reqPerSecColumns = createReqPerSecColumns(data); + const maxLatencyColumns = createMaxLatencyColumns(data); const binarySizeColumns = createBinarySizeColumns(data); const threadCountColumns = createThreadCountColumns(data); const syscallCountColumns = createSyscallCountColumns(data); @@ -225,6 +230,7 @@ export async function drawChartsFromBenchmarkData(dataUrl) { gen("#exec-time-chart", execTimeColumns, "seconds", logScale); gen("#throughput-chart", throughputColumns, "seconds", logScale); gen("#req-per-sec-chart", reqPerSecColumns, "1000 req/sec", formatReqSec); + gen("#max-latency-chart", maxLatencyColumns, "milliseconds", logScale); gen("#binary-size-chart", binarySizeColumns, "megabytes", formatMB); gen("#thread-count-chart", threadCountColumns, "threads"); gen("#syscall-count-chart", syscallCountColumns, "syscalls"); diff --git a/website/benchmarks.html b/website/benchmarks.html index 3fd4765d80ca49039d97f072ae17e9bd9cae3f89..4d6d543dc6a85812459f083d1a0c4661b61baeff 100644 --- a/website/benchmarks.html +++ b/website/benchmarks.html @@ -110,6 +110,14 @@
+

Max Latency #

+ +

+ Max latency during the same test used above for requests/second. Smaller is better. +

+ +
+

Executable size #

deno ships only a single binary. We track its size here.