Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Serving
提交
f5e3c59c
S
Serving
项目概览
PaddlePaddle
/
Serving
1 年多 前同步成功
通知
186
Star
833
Fork
253
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
105
列表
看板
标记
里程碑
合并请求
10
Wiki
2
Wiki
分析
仓库
DevOps
项目成员
Pages
S
Serving
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
105
Issue
105
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
2
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f5e3c59c
编写于
3月 13, 2020
作者:
M
MRXLT
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refine benchmark script and README
上级
c3b0b213
变更
21
隐藏空白更改
内联
并排
Showing
21 changed file
with
435 addition
and
142 deletion
+435
-142
python/examples/bert/benchmark.py
python/examples/bert/benchmark.py
+0
-7
python/examples/bert/benchmark.sh
python/examples/bert/benchmark.sh
+2
-7
python/examples/bert/benchmark_batch.py
python/examples/bert/benchmark_batch.py
+49
-46
python/examples/bert/benchmark_batch.sh
python/examples/bert/benchmark_batch.sh
+7
-3
python/examples/criteo_ctr/README.md
python/examples/criteo_ctr/README.md
+24
-1
python/examples/criteo_ctr/benchmark.py
python/examples/criteo_ctr/benchmark.py
+76
-0
python/examples/criteo_ctr/benchmark.sh
python/examples/criteo_ctr/benchmark.sh
+9
-0
python/examples/criteo_ctr/benchmark_batch.py
python/examples/criteo_ctr/benchmark_batch.py
+80
-0
python/examples/criteo_ctr/benchmark_batch.sh
python/examples/criteo_ctr/benchmark_batch.sh
+12
-0
python/examples/criteo_ctr/get_data.sh
python/examples/criteo_ctr/get_data.sh
+1
-1
python/examples/criteo_ctr/test_client.py
python/examples/criteo_ctr/test_client.py
+7
-3
python/examples/imagenet/README.md
python/examples/imagenet/README.md
+12
-4
python/examples/imagenet/benchmark.sh
python/examples/imagenet/benchmark.sh
+9
-0
python/examples/imagenet/benchmark_batch.py
python/examples/imagenet/benchmark_batch.py
+71
-0
python/examples/imagenet/benchmark_batch.sh
python/examples/imagenet/benchmark_batch.sh
+12
-0
python/examples/imagenet/get_model.sh
python/examples/imagenet/get_model.sh
+2
-0
python/examples/imagenet/image_http_client.py
python/examples/imagenet/image_http_client.py
+1
-2
python/examples/imdb/benchmark.py
python/examples/imdb/benchmark.py
+1
-7
python/examples/imdb/benchmark.sh
python/examples/imdb/benchmark.sh
+9
-0
python/examples/imdb/benchmark_batch.py
python/examples/imdb/benchmark_batch.py
+39
-61
python/examples/imdb/benchmark_batch.sh
python/examples/imdb/benchmark_batch.sh
+12
-0
未找到文件。
python/examples/bert/benchmark.py
浏览文件 @
f5e3c59c
...
...
@@ -38,7 +38,6 @@ def single_func(idx, resource):
dataset
.
append
(
line
.
strip
())
if
args
.
request
==
"rpc"
:
reader
=
BertReader
(
vocab_file
=
"vocab.txt"
,
max_seq_len
=
20
)
config_file
=
'./serving_client_conf/serving_client_conf.prototxt'
fetch
=
[
"pooled_output"
]
client
=
Client
()
client
.
load_client_config
(
args
.
model
)
...
...
@@ -49,12 +48,6 @@ def single_func(idx, resource):
if
args
.
batch_size
==
1
:
feed_dict
=
reader
.
process
(
dataset
[
i
])
result
=
client
.
predict
(
feed
=
feed_dict
,
fetch
=
fetch
)
elif
args
.
batch_size
>
1
:
feed_batch
=
[]
for
bi
in
range
(
args
.
batch_size
):
feed_batch
.
append
(
reader
.
process
(
dataset
[
i
]))
result
=
client
.
batch_predict
(
feed_batch
=
feed_batch
,
fetch
=
fetch
)
else
:
print
(
"unsupport batch size {}"
.
format
(
args
.
batch_size
))
...
...
python/examples/bert/benchmark.sh
浏览文件 @
f5e3c59c
rm
profile_log
#for thread_num in 1 2 4 8 16
for
thread_num
in
1 2
for
thread_num
in
1 2 4 8 16
do
#for batch_size in 1 2 4 8 16 32 64 128 256 512
for
batch_size
in
1 2
do
$PYTHONROOT
/bin/python benchmark.py
--thread
$thread_num
--batch_size
$batch_size
--model
serving_client_conf/serving_client_conf.prototxt
--request
rpc
>
profile 2>&1
$PYTHONROOT
/bin/python benchmark.py
--thread
$thread_num
--model
serving_client_conf/serving_client_conf.prototxt
--request
rpc
>
profile 2>&1
echo
"========================================"
echo
"batch size :
$batch_size
"
>>
profile_log
$PYTHONROOT
/bin/python ../util/show_profile.py profile
$thread_num
>>
profile_log
tail
-n
1 profile
>>
profile_log
done
done
python/examples/bert/benchmark_batch.py
浏览文件 @
f5e3c59c
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
...
...
@@ -11,61 +13,62 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
__future__
import
unicode_literals
,
absolute_import
import
os
import
sys
import
time
from
paddle_serving_client
import
Client
from
paddle_serving_client.metric
import
auc
from
paddle_serving_client.utils
import
MultiThreadRunner
import
time
from
bert_client
import
BertService
from
paddle_serving_client.utils
import
benchmark_args
from
batching
import
pad_batch_data
import
tokenization
import
requests
import
json
from
bert_reader
import
BertReader
args
=
benchmark_args
()
def
predict
(
thr_id
,
resource
,
batch_size
):
bc
=
BertService
(
model_name
=
"bert_chinese_L-12_H-768_A-12"
,
max_seq_len
=
20
,
do_lower_case
=
True
)
bc
.
load_client
(
resource
[
"conf_file"
],
resource
[
"server_endpoint"
])
thread_num
=
resource
[
"thread_num"
]
file_list
=
resource
[
"filelist"
]
line_id
=
0
result
=
[]
label_list
=
[]
def
single_func
(
idx
,
resource
):
fin
=
open
(
"data-c.txt"
)
dataset
=
[]
for
fn
in
file_list
:
fin
=
open
(
fn
)
for
line
in
fin
:
if
line_id
%
thread_num
==
thr_id
-
1
:
dataset
.
append
(
line
.
strip
())
line_id
+=
1
fin
.
close
()
for
line
in
fin
:
dataset
.
append
(
line
.
strip
())
if
args
.
request
==
"rpc"
:
reader
=
BertReader
(
vocab_file
=
"vocab.txt"
,
max_seq_len
=
20
)
fetch
=
[
"pooled_output"
]
client
=
Client
()
client
.
load_client_config
(
args
.
model
)
client
.
connect
([
resource
[
"endpoint"
][
idx
%
len
(
resource
[
"endpoint"
])]])
start
=
time
.
time
()
fetch
=
[
"pooled_output"
]
batch
=
[]
for
inst
in
dataset
:
if
len
(
batch
)
<
batch_size
:
batch
.
append
([
inst
])
else
:
fetch_map_batch
=
bc
.
run_batch_general
(
batch
,
fetch
)
batch
=
[]
result
.
append
(
fetch_map_batch
)
start
=
time
.
time
()
for
i
in
range
(
1000
):
if
args
.
batch_size
>=
1
:
feed_batch
=
[]
for
bi
in
range
(
args
.
batch_size
):
feed_batch
.
append
(
reader
.
process
(
dataset
[
i
]))
result
=
client
.
batch_predict
(
feed_batch
=
feed_batch
,
fetch
=
fetch
)
else
:
print
(
"unsupport batch size {}"
.
format
(
args
.
batch_size
))
elif
args
.
request
==
"http"
:
raise
(
"no batch predict for http"
)
end
=
time
.
time
()
return
[
result
,
label_list
,
[
end
-
start
]]
return
[[
end
-
start
]]
if
__name__
==
'__main__'
:
conf_file
=
sys
.
argv
[
1
]
data_file
=
sys
.
argv
[
2
]
thread_num
=
sys
.
argv
[
3
]
batch_size
=
sys
.
ragv
[
4
]
resource
=
{}
resource
[
"conf_file"
]
=
conf_file
resource
[
"server_endpoint"
]
=
[
"127.0.0.1:9293"
]
resource
[
"filelist"
]
=
[
data_file
]
resource
[
"thread_num"
]
=
int
(
thread_num
)
thread_runner
=
MultiThreadRunner
()
result
=
thread_runner
.
run
(
predict
,
int
(
sys
.
argv
[
3
]),
resource
,
batch_size
)
print
(
"total time {} s"
.
format
(
sum
(
result
[
-
1
])
/
len
(
result
[
-
1
])))
multi_thread_runner
=
MultiThreadRunner
()
endpoint_list
=
[
"127.0.0.1:9292"
]
#endpoint_list = endpoint_list + endpoint_list + endpoint_list
result
=
multi_thread_runner
.
run
(
single_func
,
args
.
thread
,
{
"endpoint"
:
endpoint_list
})
#result = single_func(0, {"endpoint": endpoint_list})
avg_cost
=
0
for
i
in
range
(
args
.
thread
):
avg_cost
+=
result
[
0
][
i
]
avg_cost
=
avg_cost
/
args
.
thread
print
(
"average total cost {} s."
.
format
(
avg_cost
))
python/examples/bert/benchmark_batch.sh
浏览文件 @
f5e3c59c
rm
profile_log
thread_num
=
1
for
batch_size
in
1 4 8 16 32 64 128 256
for
thread_num
in
1 2 4 8 16
do
$PYTHONROOT
/bin/python benchmark_batch.py serving_client_conf/serving_client_conf.prototxt data.txt
$thread_num
$batch_size
>
profile 2>&1
for
batch_size
in
1 2 4 8 16 32 64 128 256 512
do
$PYTHONROOT
/bin/python benchmark_batch.py
--thread
$thread_num
--batch_size
$batch_size
--model
serving_client_conf/serving_client_conf.prototxt
--request
rpc
>
profile 2>&1
echo
"========================================"
echo
"batch size :
$batch_size
"
>>
profile_log
$PYTHONROOT
/bin/python ../util/show_profile.py profile
$thread_num
>>
profile_log
tail
-n
1 profile
>>
profile_log
done
done
python/examples/criteo_ctr/README.md
浏览文件 @
f5e3c59c
# CTR task on Criteo Dataset
## CTR预测服务
### 获取样例数据
```
sh get_data.sh
```
### 保存模型和配置文件
```
python local_train.py
```
执行脚本后会在当前目录生成serving_server_model和serving_client_config文件夹。
### 启动RPC预测服务
```
python -m paddle_serving_server.serve --model ctr_serving_model/ --port 9292
```
### 执行预测
```
python test_client.py ctr_client_conf/serving_client_conf.prototxt raw_data/
```
python/examples/criteo_ctr/benchmark.py
0 → 100644
浏览文件 @
f5e3c59c
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
__future__
import
unicode_literals
,
absolute_import
import
os
import
sys
import
time
from
paddle_serving_client
import
Client
from
paddle_serving_client.utils
import
MultiThreadRunner
from
paddle_serving_client.utils
import
benchmark_args
import
requests
import
json
import
criteo_reader
as
criteo
args
=
benchmark_args
()
def
single_func
(
idx
,
resource
):
batch
=
1
buf_size
=
100
dataset
=
criteo
.
CriteoDataset
()
dataset
.
setup
(
1000001
)
test_filelists
=
[
"./raw_data/part-%d"
%
x
for
x
in
range
(
len
(
os
.
listdir
(
"./raw_data"
)))
]
reader
=
dataset
.
infer_reader
(
test_filelists
[
len
(
test_filelists
)
-
40
:],
batch
,
buf_size
)
if
args
.
request
==
"rpc"
:
fetch
=
[
"prob"
]
client
=
Client
()
client
.
load_client_config
(
args
.
model
)
client
.
connect
([
resource
[
"endpoint"
][
idx
%
len
(
resource
[
"endpoint"
])]])
start
=
time
.
time
()
for
i
in
range
(
1000
):
if
args
.
batch_size
==
1
:
data
=
reader
().
next
()
feed_dict
=
{}
for
i
in
range
(
1
,
27
):
feed_dict
[
"sparse_{}"
.
format
(
i
-
1
)]
=
data
[
0
][
i
]
result
=
client
.
predict
(
feed
=
feed_dict
,
fetch
=
fetch
)
else
:
print
(
"unsupport batch size {}"
.
format
(
args
.
batch_size
))
elif
args
.
request
==
"http"
:
raise
(
"Not support http service."
)
end
=
time
.
time
()
return
[[
end
-
start
]]
if
__name__
==
'__main__'
:
multi_thread_runner
=
MultiThreadRunner
()
endpoint_list
=
[
"127.0.0.1:9292"
]
#endpoint_list = endpoint_list + endpoint_list + endpoint_list
result
=
multi_thread_runner
.
run
(
single_func
,
args
.
thread
,
{
"endpoint"
:
endpoint_list
})
#result = single_func(0, {"endpoint": endpoint_list})
avg_cost
=
0
for
i
in
range
(
args
.
thread
):
avg_cost
+=
result
[
0
][
i
]
avg_cost
=
avg_cost
/
args
.
thread
print
(
"average total cost {} s."
.
format
(
avg_cost
))
python/examples/criteo_ctr/benchmark.sh
0 → 100644
浏览文件 @
f5e3c59c
rm
profile_log
for
thread_num
in
1 2 4 8 16
do
$PYTHONROOT
/bin/python benchmark.py
--thread
$thread_num
--model
ctr_client_conf/serving_client_conf.prototxt
--request
rpc
>
profile 2>&1
echo
"========================================"
echo
"batch size :
$batch_size
"
>>
profile_log
$PYTHONROOT
/bin/python ../util/show_profile.py profile
$thread_num
>>
profile_log
tail
-n
1 profile
>>
profile_log
done
python/examples/criteo_ctr/benchmark_batch.py
0 → 100644
浏览文件 @
f5e3c59c
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
__future__
import
unicode_literals
,
absolute_import
import
os
import
sys
import
time
from
paddle_serving_client
import
Client
from
paddle_serving_client.utils
import
MultiThreadRunner
from
paddle_serving_client.utils
import
benchmark_args
import
requests
import
json
import
criteo_reader
as
criteo
args
=
benchmark_args
()
def
single_func
(
idx
,
resource
):
batch
=
1
buf_size
=
100
dataset
=
criteo
.
CriteoDataset
()
dataset
.
setup
(
1000001
)
test_filelists
=
[
"./raw_data/part-%d"
%
x
for
x
in
range
(
len
(
os
.
listdir
(
"./raw_data"
)))
]
reader
=
dataset
.
infer_reader
(
test_filelists
[
len
(
test_filelists
)
-
40
:],
batch
,
buf_size
)
if
args
.
request
==
"rpc"
:
fetch
=
[
"prob"
]
client
=
Client
()
client
.
load_client_config
(
args
.
model
)
client
.
connect
([
resource
[
"endpoint"
][
idx
%
len
(
resource
[
"endpoint"
])]])
start
=
time
.
time
()
for
i
in
range
(
1000
):
if
args
.
batch_size
>=
1
:
feed_batch
=
[]
for
bi
in
range
(
args
.
batch_size
):
feed_dict
=
{}
data
=
reader
().
next
()
for
i
in
range
(
1
,
27
):
feed_dict
[
"sparse_{}"
.
format
(
i
-
1
)]
=
data
[
0
][
i
]
feed_batch
.
append
(
feed_dict
)
result
=
client
.
batch_predict
(
feed_batch
=
feed_batch
,
fetch
=
fetch
)
else
:
print
(
"unsupport batch size {}"
.
format
(
args
.
batch_size
))
elif
args
.
request
==
"http"
:
raise
(
"no batch predict for http"
)
end
=
time
.
time
()
return
[[
end
-
start
]]
if
__name__
==
'__main__'
:
multi_thread_runner
=
MultiThreadRunner
()
endpoint_list
=
[
"127.0.0.1:9292"
]
#endpoint_list = endpoint_list + endpoint_list + endpoint_list
result
=
multi_thread_runner
.
run
(
single_func
,
args
.
thread
,
{
"endpoint"
:
endpoint_list
})
#result = single_func(0, {"endpoint": endpoint_list})
avg_cost
=
0
for
i
in
range
(
args
.
thread
):
avg_cost
+=
result
[
0
][
i
]
avg_cost
=
avg_cost
/
args
.
thread
print
(
"average total cost {} s."
.
format
(
avg_cost
))
python/examples/criteo_ctr/benchmark_batch.sh
0 → 100644
浏览文件 @
f5e3c59c
rm
profile_log
for
thread_num
in
1 2 4 8 16
do
for
batch_size
in
1 2 4 8 16 32 64 128 256 512
do
$PYTHONROOT
/bin/python benchmark_batch.py
--thread
$thread_num
--batch_size
$batch_size
--model
serving_client_conf/serving_client_conf.prototxt
--request
rpc
>
profile 2>&1
echo
"========================================"
echo
"batch size :
$batch_size
"
>>
profile_log
$PYTHONROOT
/bin/python ../util/show_profile.py profile
$thread_num
>>
profile_log
tail
-n
1 profile
>>
profile_log
done
done
python/examples/criteo_ctr/get_data.sh
浏览文件 @
f5e3c59c
wget
--no-check-certificate
https://paddle-serving.bj.bcebos.com/data/ctr_prediction/ctr_data.tar.gz
tar
-zxvf
*
ctr_data.tar.gz
tar
-zxvf
ctr_data.tar.gz
python/examples/criteo_ctr/test_client.py
浏览文件 @
f5e3c59c
...
...
@@ -17,6 +17,7 @@ from paddle_serving_client import Client
import
paddle
import
sys
import
os
import
time
import
criteo_reader
as
criteo
from
paddle_serving_client.metric
import
auc
...
...
@@ -34,12 +35,15 @@ test_filelists = [
]
reader
=
dataset
.
infer_reader
(
test_filelists
[
len
(
test_filelists
)
-
40
:],
batch
,
buf_size
)
label_list
=
[]
prob_list
=
[]
for
data
in
reader
():
start
=
time
.
time
()
for
ei
in
range
(
1000
):
data
=
reader
().
next
()
feed_dict
=
{}
for
i
in
range
(
1
,
27
):
feed_dict
[
"sparse_{}"
.
format
(
i
-
1
)]
=
data
[
0
][
i
]
fetch_map
=
client
.
predict
(
feed
=
feed_dict
,
fetch
=
[
"prob"
])
print
(
fetch_map
)
#print(fetch_map)
end
=
time
.
time
()
print
(
end
-
start
)
python/examples/imagenet/README.md
浏览文件 @
f5e3c59c
...
...
@@ -6,22 +6,30 @@
```
sh get_model.sh
```
### 执行
wb service
预测服务
### 执行
HTTP
预测服务
启动server端
```
python image_classification_service.py
conf_and_model/serving_server_model workdir 9393
python image_classification_service.py
ResNet50_vd_model workdir 9393 #cpu预测服务
```
```
python image_classification_service_gpu.py ResNet50_vd_model workdir 9393 #gpu预测服务
```
client端进行预测
```
python image_http_client.py
```
### 执行
rpc service
预测服务
### 执行
RPC
预测服务
启动server端
```
python -m paddle_serving_server.serve --model conf_and_model/serving_server_model/ --port 9393
python -m paddle_serving_server.serve --model ResNet50_vd_model --port 9393 #cpu预测服务
```
```
python -m paddle_serving_server_gpu.serve --model ResNet50_vd_model --port 9393 --gpu_ids 0 #gpu预测服务
```
client端进行预测
...
...
python/examples/imagenet/benchmark.sh
0 → 100644
浏览文件 @
f5e3c59c
rm
profile_log
for
thread_num
in
1 2 4 8 16
do
$PYTHONROOT
/bin/python benchmark.py
--thread
$thread_num
--model
ResNet101_vd_client_config/serving_client_conf.prototxt
--request
rpc
>
profile 2>&1
echo
"========================================"
echo
"batch size :
$batch_size
"
>>
profile_log
$PYTHONROOT
/bin/python ../util/show_profile.py profile
$thread_num
>>
profile_log
tail
-n
1 profile
>>
profile_log
done
python/examples/imagenet/benchmark_batch.py
0 → 100644
浏览文件 @
f5e3c59c
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from
__future__
import
unicode_literals
,
absolute_import
import
os
import
sys
import
time
from
paddle_serving_client
import
Client
from
paddle_serving_client.utils
import
MultiThreadRunner
from
paddle_serving_client.utils
import
benchmark_args
import
requests
import
json
from
image_reader
import
ImageReader
args
=
benchmark_args
()
def
single_func
(
idx
,
resource
):
if
args
.
request
==
"rpc"
:
reader
=
ImageReader
()
fetch
=
[
"score"
]
client
=
Client
()
client
.
load_client_config
(
args
.
model
)
client
.
connect
([
resource
[
"endpoint"
][
idx
%
len
(
resource
[
"endpoint"
])]])
start
=
time
.
time
()
with
open
(
"./data/n01440764_10026.JPEG"
)
as
f
:
raw_img
=
f
.
read
()
for
i
in
range
(
1000
):
if
args
.
batch_size
>=
1
:
feed_batch
=
[]
for
bi
in
range
(
args
.
batch_size
):
img
=
reader
.
process_image
(
raw_img
)
img
=
img
.
reshape
(
-
1
)
feed_batch
.
append
({
"image"
:
img
})
result
=
client
.
batch_predict
(
feed_batch
=
feed_batch
,
fetch
=
fetch
)
else
:
print
(
"unsupport batch size {}"
.
format
(
args
.
batch_size
))
elif
args
.
request
==
"http"
:
raise
(
"no batch predict for http"
)
end
=
time
.
time
()
return
[[
end
-
start
]]
if
__name__
==
'__main__'
:
multi_thread_runner
=
MultiThreadRunner
()
endpoint_list
=
[
"127.0.0.1:9393"
]
#endpoint_list = endpoint_list + endpoint_list + endpoint_list
result
=
multi_thread_runner
.
run
(
single_func
,
args
.
thread
,
{
"endpoint"
:
endpoint_list
})
#result = single_func(0, {"endpoint": endpoint_list})
avg_cost
=
0
for
i
in
range
(
args
.
thread
):
avg_cost
+=
result
[
0
][
i
]
avg_cost
=
avg_cost
/
args
.
thread
print
(
"average total cost {} s."
.
format
(
avg_cost
))
python/examples/imagenet/benchmark_batch.sh
0 → 100644
浏览文件 @
f5e3c59c
rm
profile_log
for
thread_num
in
1 2 4 8 16
do
for
batch_size
in
1 2 4 8 16 32 64 128 256 512
do
$PYTHONROOT
/bin/python benchmark_batch.py
--thread
$thread_num
--batch_size
$batch_size
--model
ResNet101_vd_client_config/serving_client_conf.prototxt
--request
rpc
>
profile 2>&1
echo
"========================================"
echo
"batch size :
$batch_size
"
>>
profile_log
$PYTHONROOT
/bin/python ../util/show_profile.py profile
$thread_num
>>
profile_log
tail
-n
1 profile
>>
profile_log
done
done
python/examples/imagenet/get_model.sh
浏览文件 @
f5e3c59c
wget
--no-check-certificate
https://paddle-serving.bj.bcebos.com/imagenet-example/conf_and_model.tar.gz
tar
-xzvf
conf_and_model.tar.gz
wget
--no-check-certificate
https://paddle-serving.bj.bcebos.com/imagenet-example/ResNet101_vd.tar.gz
tar
-xzvf
ResNet101_vd.tar.gz
python/examples/imagenet/image_http_client.py
浏览文件 @
f5e3c59c
...
...
@@ -26,11 +26,10 @@ def predict(image_path, server):
if
__name__
==
"__main__"
:
server
=
"http://127.0.0.1:9
292
/image/prediction"
server
=
"http://127.0.0.1:9
393
/image/prediction"
image_path
=
"./data/n01440764_10026.JPEG"
start
=
time
.
time
()
for
i
in
range
(
1000
):
predict
(
image_path
,
server
)
print
(
i
)
end
=
time
.
time
()
print
(
end
-
start
)
python/examples/imdb/benchmark.py
浏览文件 @
f5e3c59c
...
...
@@ -37,16 +37,10 @@ def single_func(idx, resource):
client
.
load_client_config
(
args
.
model
)
client
.
connect
([
args
.
endpoint
])
for
i
in
range
(
1000
):
word_ids
,
label
=
imdb_dataset
.
get_words_and_label
(
line
)
if
args
.
batch_size
==
1
:
word_ids
,
label
=
imdb_dataset
.
get_words_and_label
(
line
)
fetch_map
=
client
.
predict
(
feed
=
{
"words"
:
word_ids
},
fetch
=
[
"prediction"
])
elif
args
.
batch_size
>
1
:
feed_batch
=
[]
for
bi
in
range
(
args
.
batch_size
):
feed_batch
.
append
({
"words"
:
word_ids
})
result
=
client
.
batch_predict
(
feed_batch
=
feed_batch
,
fetch
=
[
"prediction"
])
else
:
print
(
"unsupport batch size {}"
.
format
(
args
.
batch_size
))
...
...
python/examples/imdb/benchmark.sh
0 → 100644
浏览文件 @
f5e3c59c
rm
profile_log
for
thread_num
in
1 2 4 8 16
do
$PYTHONROOT
/bin/python benchmark.py
--thread
$thread_num
--model
imdbo_bow_client_conf/serving_client_conf.prototxt
--request
rpc
>
profile 2>&1
echo
"========================================"
echo
"batch size :
$batch_size
"
>>
profile_log
$PYTHONROOT
/bin/python ../util/show_profile.py profile
$thread_num
>>
profile_log
tail
-n
1 profile
>>
profile_log
done
python/examples/imdb/benchmark_batch.py
浏览文件 @
f5e3c59c
...
...
@@ -11,77 +11,55 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import
sys
import
time
import
requests
from
imdb_reader
import
IMDBDataset
from
paddle_serving_client
import
Client
from
paddle_serving_client.metric
import
auc
from
paddle_serving_client.utils
import
MultiThreadRunner
import
time
from
paddle_serving_client.utils
import
benchmark_args
args
=
benchmark_args
()
def
predict
(
thr_id
,
resource
):
client
=
Client
()
client
.
load_client_config
(
resource
[
"conf_file"
])
client
.
connect
(
resource
[
"server_endpoint"
])
thread_num
=
resource
[
"thread_num"
]
file_list
=
resource
[
"filelist"
]
line_id
=
0
prob
=
[]
label_list
=
[]
def
single_func
(
idx
,
resource
):
imdb_dataset
=
IMDBDataset
()
imdb_dataset
.
load_resource
(
"./imdb.vocab"
)
dataset
=
[]
for
fn
in
file_list
:
fin
=
open
(
fn
)
with
open
(
"./test_data/part-0"
)
as
fin
:
for
line
in
fin
:
if
line_id
%
thread_num
==
thr_id
-
1
:
group
=
line
.
strip
().
split
()
words
=
[
int
(
x
)
for
x
in
group
[
1
:
int
(
group
[
0
])]]
label
=
[
int
(
group
[
-
1
])]
feed
=
{
"words"
:
words
,
"label"
:
label
}
dataset
.
append
(
feed
)
line_id
+=
1
fin
.
close
()
dataset
.
append
(
line
.
strip
())
start
=
time
.
time
()
fetch
=
[
"acc"
,
"cost"
,
"prediction"
]
infer_time_list
=
[]
counter
=
0
feed_list
=
[]
for
inst
in
dataset
:
counter
+=
1
feed_list
.
append
(
inst
)
if
counter
==
resource
[
"batch_size"
]:
fetch_map_batch
,
infer_time
=
client
.
batch_predict
(
feed_batch
=
feed_list
,
fetch
=
fetch
,
profile
=
True
)
#prob.append(fetch_map["prediction"][1])
#label_list.append(label[0])
infer_time_list
.
append
(
infer_time
)
counter
=
0
feed_list
=
[]
if
counter
!=
0
:
fetch_map_batch
,
infer_time
=
client
.
batch_predict
(
feed_batch
=
feed_list
,
fetch
=
fetch
,
profile
=
True
)
infer_time_list
.
append
(
infer_time
)
if
args
.
request
==
"rpc"
:
client
=
Client
()
client
.
load_client_config
(
args
.
model
)
client
.
connect
([
args
.
endpoint
])
for
i
in
range
(
1000
):
if
args
.
batch_size
>=
1
:
feed_batch
=
[]
for
bi
in
range
(
args
.
batch_size
):
word_ids
,
label
=
imdb_dataset
.
get_words_and_label
(
line
)
feed_batch
.
append
({
"words"
:
word_ids
})
result
=
client
.
batch_predict
(
feed_batch
=
feed_batch
,
fetch
=
[
"prediction"
])
else
:
print
(
"unsupport batch size {}"
.
format
(
args
.
batch_size
))
elif
args
.
request
==
"http"
:
for
fn
in
filelist
:
fin
=
open
(
fn
)
for
line
in
fin
:
word_ids
,
label
=
imdb_dataset
.
get_words_and_label
(
line
)
r
=
requests
.
post
(
"http://{}/imdb/prediction"
.
format
(
args
.
endpoint
),
data
=
{
"words"
:
word_ids
,
"fetch"
:
[
"prediction"
]})
end
=
time
.
time
()
client
.
release
()
return
[
prob
,
label_list
,
[
sum
(
infer_time_list
)],
[
end
-
start
]]
if
__name__
==
'__main__'
:
conf_file
=
sys
.
argv
[
1
]
data_file
=
sys
.
argv
[
2
]
resource
=
{}
resource
[
"conf_file"
]
=
conf_file
resource
[
"server_endpoint"
]
=
[
"127.0.0.1:9292"
]
resource
[
"filelist"
]
=
[
data_file
]
resource
[
"thread_num"
]
=
int
(
sys
.
argv
[
3
])
resource
[
"batch_size"
]
=
int
(
sys
.
argv
[
4
])
return
[[
end
-
start
]]
thread_runner
=
MultiThreadRunner
()
result
=
thread_runner
.
run
(
predict
,
int
(
sys
.
argv
[
3
]),
resource
)
print
(
"thread num {}
\t
batch size {}
\t
total time {}"
.
format
(
sys
.
argv
[
3
],
resource
[
"batch_size"
],
sum
(
result
[
-
1
])
/
len
(
result
[
-
1
])))
print
(
"thread num {}
\t
batch size {}
\t
infer time {}"
.
format
(
sys
.
argv
[
3
],
resource
[
"batch_size"
],
sum
(
result
[
2
])
/
1000.0
/
1000.0
/
len
(
result
[
2
])))
multi_thread_runner
=
MultiThreadRunner
()
result
=
multi_thread_runner
.
run
(
single_func
,
args
.
thread
,
{})
print
(
result
)
python/examples/imdb/benchmark_batch.sh
0 → 100644
浏览文件 @
f5e3c59c
rm
profile_log
for
thread_num
in
1 2 4 8 16
do
for
batch_size
in
1 2 4 8 16 32 64 128 256 512
do
$PYTHONROOT
/bin/python benchmark_batch.py
--thread
$thread_num
--batch_size
$batch_size
--model
imdbo_bow_client_conf/serving_client_conf.prototxt
--request
rpc
>
profile 2>&1
echo
"========================================"
echo
"batch size :
$batch_size
"
>>
profile_log
$PYTHONROOT
/bin/python ../util/show_profile.py profile
$thread_num
>>
profile_log
tail
-n
1 profile
>>
profile_log
done
done
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录