提交 a4c7141b 编写于 作者: W wangjiawei04

fix

上级 5d0abdf1
...@@ -6,9 +6,9 @@ ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 ...@@ -6,9 +6,9 @@ ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3 sleep 3
python3 benchmark.py yaml local_predictor 1 gpu python3 benchmark.py yaml local_predictor 1 gpu
rm -rf profile_log_$modelname rm -rf profile_log_$modelname
for thread_num in 1 for thread_num in 1 8 16
do do
for batch_size in 20 for batch_size in 1 10 100
do do
echo "----Bert thread num: $thread_num batch size: $batch_size mode:http ----" >>profile_log_$modelname echo "----Bert thread num: $thread_num batch size: $batch_size mode:http ----" >>profile_log_$modelname
rm -rf PipelineServingLogs rm -rf PipelineServingLogs
...@@ -34,9 +34,9 @@ ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 ...@@ -34,9 +34,9 @@ ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3 sleep 3
python3 benchmark.py yaml local_predictor 1 gpu python3 benchmark.py yaml local_predictor 1 gpu
for thread_num in 1 for thread_num in 1 8 16
do do
for batch_size in 20 for batch_size in 1 10 100
do do
echo "----Bert thread num: $thread_num batch size: $batch_size mode:rpc ----" >>profile_log_$modelname echo "----Bert thread num: $thread_num batch size: $batch_size mode:rpc ----" >>profile_log_$modelname
rm -rf PipelineServingLogs rm -rf PipelineServingLogs
......
...@@ -14,4 +14,4 @@ op: ...@@ -14,4 +14,4 @@ op:
- pooled_output - pooled_output
model_config: bert_seq128_model/ model_config: bert_seq128_model/
rpc_port: 9998 rpc_port: 9998
worker_num: 1 worker_num: 20
...@@ -6,7 +6,7 @@ ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 ...@@ -6,7 +6,7 @@ ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3 sleep 3
python3 benchmark.py yaml local_predictor 1 gpu python3 benchmark.py yaml local_predictor 1 gpu
rm -rf profile_log_$modelname rm -rf profile_log_$modelname
for thread_num in 1 for thread_num in 1 8 16
do do
for batch_size in 1 for batch_size in 1
do do
...@@ -34,7 +34,7 @@ ps -ef | grep web_service | awk '{print $2}' | xargs kill -9 ...@@ -34,7 +34,7 @@ ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep 3 sleep 3
python3 benchmark.py yaml local_predictor 1 gpu python3 benchmark.py yaml local_predictor 1 gpu
for thread_num in 1 for thread_num in 1 8 16
do do
for batch_size in 1 for batch_size in 1
do do
......
...@@ -6,7 +6,7 @@ http_port: 9999 ...@@ -6,7 +6,7 @@ http_port: 9999
#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG #worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG
##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num ##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num
worker_num: 1 worker_num: 5
#build_dag_each_worker, False,框架在进程内创建一条DAG;True,框架会每个进程内创建多个独立的DAG #build_dag_each_worker, False,框架在进程内创建一条DAG;True,框架会每个进程内创建多个独立的DAG
build_dag_each_worker: false build_dag_each_worker: false
...@@ -20,6 +20,9 @@ dag: ...@@ -20,6 +20,9 @@ dag:
#使用性能分析, True,生成Timeline性能数据,对性能有一定影响;False为不使用 #使用性能分析, True,生成Timeline性能数据,对性能有一定影响;False为不使用
use_profile: false use_profile: false
tracer:
interval_s: 10
op: op:
det: det:
#并发数,is_thread_op=True时,为线程并发;否则为进程并发 #并发数,is_thread_op=True时,为线程并发;否则为进程并发
...@@ -37,7 +40,7 @@ op: ...@@ -37,7 +40,7 @@ op:
fetch_list: ["concat_1.tmp_0"] fetch_list: ["concat_1.tmp_0"]
#计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡 #计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡
devices: "0" devices: "2"
rec: rec:
#并发数,is_thread_op=True时,为线程并发;否则为进程并发 #并发数,is_thread_op=True时,为线程并发;否则为进程并发
concurrency: 2 concurrency: 2
...@@ -61,4 +64,4 @@ op: ...@@ -61,4 +64,4 @@ op:
fetch_list: ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"] fetch_list: ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"]
#计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡 #计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡
devices: "0" devices: "2"
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
try: try:
from paddle_serving_server.web_service import WebService, Op
except ImportError:
from paddle_serving_server_gpu.web_service import WebService, Op from paddle_serving_server_gpu.web_service import WebService, Op
except ImportError:
from paddle_serving_server.web_service import WebService, Op
import logging import logging
import numpy as np import numpy as np
import cv2 import cv2
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册