提交 311a3cd5 编写于 作者: M MRXLT

fix conflict

doc/demo.gif

270.6 KB | W: | H:

doc/demo.gif

418.8 KB | W: | H:

doc/demo.gif
doc/demo.gif
doc/demo.gif
doc/demo.gif
  • 2-up
  • Swipe
  • Onion skin
......@@ -68,10 +68,8 @@ def single_func(idx, resource):
if __name__ == '__main__':
multi_thread_runner = MultiThreadRunner()
endpoint_list = ["127.0.0.1:9292"]
#endpoint_list = endpoint_list + endpoint_list + endpoint_list
result = multi_thread_runner.run(single_func, args.thread,
{"endpoint": endpoint_list})
#result = single_func(0, {"endpoint": endpoint_list})
avg_cost = 0
for i in range(args.thread):
avg_cost += result[0][i]
......
......@@ -27,7 +27,6 @@ import tokenization
import requests
import json
from bert_reader import BertReader
args = benchmark_args()
......@@ -63,10 +62,8 @@ def single_func(idx, resource):
if __name__ == '__main__':
multi_thread_runner = MultiThreadRunner()
endpoint_list = ["127.0.0.1:9292"]
#endpoint_list = endpoint_list + endpoint_list + endpoint_list
result = multi_thread_runner.run(single_func, args.thread,
{"endpoint": endpoint_list})
#result = single_func(0, {"endpoint": endpoint_list})
avg_cost = 0
for i in range(args.thread):
avg_cost += result[0][i]
......
# coding:utf-8
# pylint: disable=doc-string-missing
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import numpy as np
import paddlehub as hub
import ujson
import random
import time
from paddlehub.common.logger import logger
import socket
from paddle_serving_client import Client
from paddle_serving_client.utils import benchmark_args
args = benchmark_args()
fin = open("data-c.txt")
reader = BertReader(vocab_file="vocab.txt", max_seq_len=128)
fetch = ["pooled_output"]
endpoint_list = ["127.0.0.1:9494"]
client = Client()
client.load_client_config(args.model)
client.connect(endpoint_list)
for line in fin:
feed_dict = reader.process(line)
result = client.predict(feed=feed_dict, fetch=fetch)
......@@ -256,7 +256,7 @@ class Server(object):
def port_is_available(self, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('127.0.0.1', port))
result = sock.connect_ex(('0.0.0.0', port))
if result != 0:
return True
else:
......
......@@ -54,7 +54,7 @@ class WebService(object):
client_service = Client()
client_service.load_client_config(
"{}/serving_server_conf.prototxt".format(self.model_config))
client_service.connect(["127.0.0.1:{}".format(self.port + 1)])
client_service.connect(["0.0.0.0:{}".format(self.port + 1)])
service_name = "/" + self.name + "/prediction"
@app_instance.route(service_name, methods=['POST'])
......
......@@ -266,7 +266,7 @@ class Server(object):
def port_is_available(self, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('127.0.0.1', port))
result = sock.connect_ex(('0.0.0.0', port))
if result != 0:
return True
else:
......
......@@ -17,8 +17,8 @@ Usage:
Example:
python -m paddle_serving_server.serve --model ./serving_server_model --port 9292
"""
import os
import argparse
import os
from multiprocessing import Pool, Process
from paddle_serving_server_gpu import serve_args
......@@ -64,12 +64,14 @@ def start_gpu_card_model(gpuid, args): # pylint: disable=doc-string-missing
def start_multi_card(args): # pylint: disable=doc-string-missing
gpus = ""
if args.gpu_ids == "":
import os
if "CUDA_VISIBLE_DEVICES" in os.environ:
gpus = os.environ["CUDA_VISIBLE_DEVICES"]
else:
gpus = []
else:
gpus = args.gpu_ids.split(",")
if len(gpus) <= 0:
start_gpu_card_model(-1)
start_gpu_card_model(-1, args)
else:
gpu_processes = []
for i, gpu_id in enumerate(gpus):
......
......@@ -95,7 +95,7 @@ class WebService(object):
client_service = Client()
client_service.load_client_config(
"{}/serving_server_conf.prototxt".format(self.model_config))
client_service.connect(["127.0.0.1:{}".format(self.port + i + 1)])
client_service.connect(["0.0.0.0:{}".format(self.port + i + 1)])
client_list.append(client_service)
time.sleep(1)
service_name = "/" + self.name + "/prediction"
......
......@@ -80,7 +80,7 @@ class BertServer():
def build_server(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', self.port))
sock.bind(('0.0.0.0', self.port))
sock.listen(5)
print('Main server serving on {} port.'.format(self.port))
while True:
......@@ -122,7 +122,7 @@ class BertServer():
with closing(socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('127.0.0.1', port))
result = sock.connect_ex(('0.0.0.0', port))
if result != 0:
return port
return -1
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册