diff --git a/doc/DESIGN_DOC_CN.md b/doc/DESIGN_DOC_CN.md index c068ac35bb6beebe70a6f873318c6d5059fc51e7..7b6e237f787c12a7201898ee9403a6467473ef8c 100644 --- a/doc/DESIGN_DOC_CN.md +++ b/doc/DESIGN_DOC_CN.md @@ -26,7 +26,7 @@ serving_io.save_model("serving_model", "client_conf", {"words": data}, {"prediction": prediction}, fluid.default_main_program()) ``` -代码示例中,`{"words": data}`和`{"prediction": prediction}`分别指定了模型的输入和输出,`"words"`和`"prediction"`是输出和输出变量的别名,设计别名的目的是为了使开发者能够记忆自己训练模型的输入输出对应的字段。`data`和`prediction`则是Paddle训练过程中的`[Variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#variable)`,通常代表张量([Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Tensor_cn.html#tensor))或变长张量([LodTensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/lod_tensor.html#lodtensor))。调用保存命令后,会按照用户指定的`"serving_model"`和`"client_conf"`生成两个目录,内容如下: +代码示例中,`{"words": data}`和`{"prediction": prediction}`分别指定了模型的输入和输出,`"words"`和`"prediction"`是输入和输出变量的别名,设计别名的目的是为了使开发者能够记忆自己训练模型的输入输出对应的字段。`data`和`prediction`则是Paddle训练过程中的`[Variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#variable)`,通常代表张量([Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Tensor_cn.html#tensor))或变长张量([LodTensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/lod_tensor.html#lodtensor))。调用保存命令后,会按照用户指定的`"serving_model"`和`"client_conf"`生成两个目录,内容如下: ``` shell . ├── client_conf diff --git a/python/examples/bert/benchmark.py b/python/examples/bert/benchmark.py index e14c02fe1231c1ab04bcf1fda67046ea6b3806bb..af75b718b78b2bc130c2411d05d190fc0d298006 100644 --- a/python/examples/bert/benchmark.py +++ b/python/examples/bert/benchmark.py @@ -26,7 +26,7 @@ from batching import pad_batch_data import tokenization import requests import json -from bert_reader import BertReader +from paddle_serving_app.reader import ChineseBertReader args = benchmark_args() @@ -37,7 +37,7 @@ def single_func(idx, resource): for line in fin: dataset.append(line.strip()) if args.request == "rpc": - reader = BertReader(vocab_file="vocab.txt", max_seq_len=20) + reader = ChineseBertReader(vocab_file="vocab.txt", max_seq_len=20) fetch = ["pooled_output"] client = Client() client.load_client_config(args.model) diff --git a/python/examples/bert/bert_client.py b/python/examples/bert/bert_client.py index b33a80d88fcc28200a61bc6125afcea0a0352dab..b72d17f142c65bafe8ef13e1a963aacce6b3e821 100644 --- a/python/examples/bert/bert_client.py +++ b/python/examples/bert/bert_client.py @@ -25,7 +25,7 @@ from paddlehub.common.logger import logger import socket from paddle_serving_client import Client from paddle_serving_client.utils import benchmark_args -from paddle_serving_app import ChineseBertReader +from paddle_serving_app.reader import ChineseBertReader args = benchmark_args() diff --git a/python/examples/bert/bert_web_service.py b/python/examples/bert/bert_web_service.py index 6a5830ea179b033f9f761010d8cf9213d9b1e40b..d72150878c51d4f95bbc5d2263ad00fb1ed2c387 100644 --- a/python/examples/bert/bert_web_service.py +++ b/python/examples/bert/bert_web_service.py @@ -14,14 +14,14 @@ # limitations under the License. # pylint: disable=doc-string-missing from paddle_serving_server_gpu.web_service import WebService -from bert_reader import BertReader +from paddle_serving_app.reader import ChineseBertReader import sys import os class BertService(WebService): def load(self): - self.reader = BertReader(vocab_file="vocab.txt", max_seq_len=128) + self.reader = ChineseBertReader(vocab_file="vocab.txt", max_seq_len=128) def preprocess(self, feed=[], fetch=[]): feed_res = [ @@ -37,5 +37,5 @@ gpu_ids = os.environ["CUDA_VISIBLE_DEVICES"] bert_service.set_gpus(gpu_ids) bert_service.prepare_server( workdir="workdir", port=int(sys.argv[2]), device="gpu") -bert_service.run_server() -bert_service.run_flask() +bert_service.run_rpc_service() +bert_service.run_web_service() diff --git a/python/examples/imagenet/resnet50_web_service.py b/python/examples/imagenet/resnet50_web_service.py index ba40b41bbd9b773910ba0265b3604edd650570ff..3966d31c951d83d8f984e5a265504035ed273125 100644 --- a/python/examples/imagenet/resnet50_web_service.py +++ b/python/examples/imagenet/resnet50_web_service.py @@ -68,5 +68,5 @@ if device == "gpu": image_service.set_gpus("0,1") image_service.prepare_server( workdir="workdir", port=int(sys.argv[3]), device=device) -image_service.run_server() -image_service.run_flask() +image_service.run_rpc_service() +image_service.run_web_service() diff --git a/python/examples/imdb/benchmark.py b/python/examples/imdb/benchmark.py index b8d7a70f30c5cf2d0ee985a8c30fada8fa9481b3..632d336ebf20363e257e6e60f08d773cea659a74 100644 --- a/python/examples/imdb/benchmark.py +++ b/python/examples/imdb/benchmark.py @@ -16,7 +16,7 @@ import sys import time import requests -from paddle_serving_app import IMDBDataset +from paddle_serving_app.reader import IMDBDataset from paddle_serving_client import Client from paddle_serving_client.utils import MultiThreadRunner from paddle_serving_client.utils import benchmark_args diff --git a/python/examples/imdb/test_client.py b/python/examples/imdb/test_client.py index 74364e5854d223e380cb386f9a8bc68b8517305a..cbdc6fe56e0f1078ad32c0d15f4e30a1a59f581b 100644 --- a/python/examples/imdb/test_client.py +++ b/python/examples/imdb/test_client.py @@ -13,7 +13,7 @@ # limitations under the License. # pylint: disable=doc-string-missing from paddle_serving_client import Client -from paddle_serving_app import IMDBDataset +from paddle_serving_app.reader import IMDBDataset import sys client = Client() diff --git a/python/examples/imdb/text_classify_service.py b/python/examples/imdb/text_classify_service.py index ae54b99030ee777ad127242d26c13cdbc05645e9..fe6ab0319deb0de5875781cf0890aa39a45c2415 100755 --- a/python/examples/imdb/text_classify_service.py +++ b/python/examples/imdb/text_classify_service.py @@ -14,7 +14,7 @@ # pylint: disable=doc-string-missing from paddle_serving_server.web_service import WebService -from paddle_serving_app import IMDBDataset +from paddle_serving_app.reader import IMDBDataset import sys @@ -37,5 +37,5 @@ imdb_service.load_model_config(sys.argv[1]) imdb_service.prepare_server( workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu") imdb_service.prepare_dict({"dict_file_path": sys.argv[4]}) -imdb_service.run_server() -imdb_service.run_flask() +imdb_service.run_rpc_service() +imdb_service.run_web_service() diff --git a/python/examples/lac/lac_web_service.py b/python/examples/lac/lac_web_service.py index c9bd00986c62abde3ee24ddddbf08dda45bbed05..62a7148b230029bc781fa550597df25471a7fc8d 100644 --- a/python/examples/lac/lac_web_service.py +++ b/python/examples/lac/lac_web_service.py @@ -47,5 +47,5 @@ lac_service.load_model_config(sys.argv[1]) lac_service.load_reader() lac_service.prepare_server( workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu") -lac_service.run_server() -lac_service.run_flask() +lac_service.run_rpc_service() +lac_service.run_web_service() diff --git a/python/examples/resnet_v2_50/resnet50_debug.py b/python/examples/resnet_v2_50/resnet50_debug.py index 62cb1812c5718ae1f9e10e9e9a57d7c1ae6736b7..768893c20bc3f6bfcb6e21f446d053391825c5fa 100644 --- a/python/examples/resnet_v2_50/resnet50_debug.py +++ b/python/examples/resnet_v2_50/resnet50_debug.py @@ -14,7 +14,7 @@ from paddle_serving_app.reader import Sequential, File2Image, Resize, CenterCrop from paddle_serving_app.reader import RGB2BGR, Transpose, Div, Normalize -from paddle_serving_app import Debugger +from paddle_serving_app.local_predict import Debugger import sys debugger = Debugger() diff --git a/python/examples/senta/get_data.sh b/python/examples/senta/get_data.sh index f1fb3844a703503177906a029bd42810e5fa3f33..fcd060f42aa2386e841f122c851394fc472d7f5b 100644 --- a/python/examples/senta/get_data.sh +++ b/python/examples/senta/get_data.sh @@ -1,6 +1,6 @@ wget https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/SentimentAnalysis/senta_bilstm.tar.gz --no-check-certificate tar -xzvf senta_bilstm.tar.gz -wget https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/LexicalAnalysis/lac_model.tar.gz --no-check-certificate +wget https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/LexicalAnalysis/lac.tar.gz --no-check-certificate tar -xzvf lac_model.tar.gz wget https://paddle-serving.bj.bcebos.com/reader/lac/lac_dict.tar.gz --no-check-certificate tar -xzvf lac_dict.tar.gz diff --git a/python/examples/senta/senta_web_service.py b/python/examples/senta/senta_web_service.py index 5d20020c46d3b5ed23914cb9813ac889e232a2b3..0621ece74173596a1820f1b09258ecf5bb727f29 100644 --- a/python/examples/senta/senta_web_service.py +++ b/python/examples/senta/senta_web_service.py @@ -14,13 +14,10 @@ from paddle_serving_server_gpu.web_service import WebService from paddle_serving_client import Client -from paddle_serving_app import LACReader, SentaReader -import numpy as np +from paddle_serving_app.reader import LACReader, SentaReader import os -import io import sys -import subprocess -from multiprocessing import Process, Queue +from multiprocessing import Process class SentaService(WebService): @@ -33,10 +30,6 @@ class SentaService(WebService): self.lac_client_config_path = lac_model_path + "/serving_server_conf.prototxt" self.lac_dict_path = lac_dict_path self.senta_dict_path = senta_dict_path - self.show = False - - def show_detail(self, show=False): - self.show = show def start_lac_service(self): if not os.path.exists('./lac_serving'): @@ -64,34 +57,29 @@ class SentaService(WebService): self.lac_client.connect(["127.0.0.1:{}".format(self.lac_port)]) def init_lac_reader(self): - self.lac_reader = LACReader(self.lac_dict_path) + self.lac_reader = LACReader() def init_senta_reader(self): - self.senta_reader = SentaReader(vocab_path=self.senta_dict_path) + self.senta_reader = SentaReader() def preprocess(self, feed=[], fetch=[]): - feed_data = self.lac_reader.process(feed[0]["words"]) - if self.show: - print("---- lac reader ----") - print(feed_data) - lac_result = self.lac_predict(feed_data) - if self.show: - print("---- lac out ----") - print(lac_result) - segs = self.lac_reader.parse_result(feed[0]["words"], - lac_result["crf_decode"]) - if self.show: - print("---- lac parse ----") - print(segs) - feed_data = self.senta_reader.process(segs) - if self.show: - print("---- senta reader ----") - print("feed_data", feed_data) - return [{"words": feed_data}], fetch + feed_data = [{ + "words": self.lac_reader.process(x["words"]) + } for x in feed] + lac_result = self.lac_client.predict( + feed=feed_data, fetch=["crf_decode"]) + feed_batch = [] + result_lod = lac_result["crf_decode.lod"] + for i in range(len(feed)): + segs = self.lac_reader.parse_result( + feed[i]["words"], + lac_result["crf_decode"][result_lod[i]:result_lod[i + 1]]) + feed_data = self.senta_reader.process(segs) + feed_batch.append({"words": feed_data}) + return feed_batch, fetch senta_service = SentaService(name="senta") -#senta_service.show_detail(True) senta_service.set_config( lac_model_path="./lac_model", lac_dict_path="./lac_dict", @@ -102,5 +90,5 @@ senta_service.prepare_server( senta_service.init_lac_reader() senta_service.init_senta_reader() senta_service.init_lac_service() -senta_service.run_server() -senta_service.run_flask() +senta_service.run_rpc_service() +senta_service.run_web_service() diff --git a/python/paddle_serving_app/README.md b/python/paddle_serving_app/README.md index 1756b83993e67dcbc66b6809631c5e953eef08d7..a0fd35b7f02ce165f878238a757613c62d2fea26 100644 --- a/python/paddle_serving_app/README.md +++ b/python/paddle_serving_app/README.md @@ -158,7 +158,7 @@ Therefore, a local prediction tool is built into the paddle_serving_app, which i Taking [fit_a_line prediction service](../examples/fit_a_line) as an example, the following code can be used to run local prediction. ```python -from paddle_serving_app import Debugger +from paddle_serving_app.local_predict import Debugger import numpy as np debugger = Debugger() diff --git a/python/paddle_serving_app/README_CN.md b/python/paddle_serving_app/README_CN.md index 75dcf9ae78bec0c00b7662f7427d3816feaeca3d..2624c238e2dc212f1d10a251ee742891cae6a08c 100644 --- a/python/paddle_serving_app/README_CN.md +++ b/python/paddle_serving_app/README_CN.md @@ -147,7 +147,7 @@ Paddle Serving框架的server预测op使用了Paddle 的预测框架,在部署 以[fit_a_line预测服务](../examples/fit_a_line)为例,使用以下代码即可执行本地预测。 ```python -from paddle_serving_app import Debugger +from paddle_serving_app.local_predict import Debugger import numpy as np debugger = Debugger() diff --git a/python/paddle_serving_app/__init__.py b/python/paddle_serving_app/__init__.py index 2a6225570c3de61ba6e0a0587f81175816cd0f8d..11ad09a1d880a8b235e5cf1b99f6be91ec9cccbf 100644 --- a/python/paddle_serving_app/__init__.py +++ b/python/paddle_serving_app/__init__.py @@ -11,10 +11,4 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from .reader.chinese_bert_reader import ChineseBertReader -from .reader.image_reader import ImageReader, File2Image, URL2Image, Sequential, Normalize, CenterCrop, Resize, PadStride -from .reader.lac_reader import LACReader -from .reader.senta_reader import SentaReader -from .reader.imdb_reader import IMDBDataset from .models import ServingModels -from .local_predict import Debugger diff --git a/python/paddle_serving_app/models/model_list.py b/python/paddle_serving_app/models/model_list.py index b22bbe8934816e9ced881d352b9e2a54ed3c9234..3d08f2fea95cc07e0cb1b57b005f72b95c6a4bcd 100644 --- a/python/paddle_serving_app/models/model_list.py +++ b/python/paddle_serving_app/models/model_list.py @@ -37,7 +37,7 @@ class ServingModels(object): object_detection_url = "https://paddle-serving.bj.bcebos.com/paddle_hub_models/image/ObjectDetection/" senta_url = "https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/SentimentAnalysis/" semantic_url = "https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/SemanticRepresentation/" - wordseg_url = "https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/ChineseWordSegmentation/" + wordseg_url = "https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/LexicalAnalysis/" self.url_dict = {} diff --git a/python/paddle_serving_app/reader/__init__.py b/python/paddle_serving_app/reader/__init__.py index 9b556a119d47ec693a667cf7c5ab10c0e56ace53..0eee878284e2028657a660acd38a21934bb5ccd7 100644 --- a/python/paddle_serving_app/reader/__init__.py +++ b/python/paddle_serving_app/reader/__init__.py @@ -11,4 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from .chinese_bert_reader import ChineseBertReader from .image_reader import ImageReader, File2Image, URL2Image, Sequential, Normalize, CenterCrop, Resize, Transpose, Div, RGB2BGR, BGR2RGB, RCNNPostprocess, SegPostprocess, PadStride +from .lac_reader import LACReader +from .senta_reader import SentaReader +from .imdb_reader import IMDBDataset diff --git a/python/paddle_serving_app/reader/lac_reader.py b/python/paddle_serving_app/reader/lac_reader.py index 720bbf9c61051dcdc877f0a1f4933718be32263d..7e804ff371e2d90d79f7f663e83a854b1b0c9647 100644 --- a/python/paddle_serving_app/reader/lac_reader.py +++ b/python/paddle_serving_app/reader/lac_reader.py @@ -48,10 +48,16 @@ def load_kv_dict(dict_path, class LACReader(object): """data reader""" - def __init__(self, dict_folder): + def __init__(self, dict_folder=""): # read dict #basepath = os.path.abspath(__file__) #folder = os.path.dirname(basepath) + if dict_folder == "": + dict_folder = "lac_dict" + if not os.path.exists(dict_folder): + r = os.system( + "wget https://paddle-serving.bj.bcebos.com/reader/lac/lac_dict.tar.gz --no-check-certificate && tar -xzvf lac_dict.tar.gz" + ) word_dict_path = os.path.join(dict_folder, "word.dic") label_dict_path = os.path.join(dict_folder, "tag.dic") replace_dict_path = os.path.join(dict_folder, "q2b.dic") diff --git a/python/paddle_serving_app/reader/senta_reader.py b/python/paddle_serving_app/reader/senta_reader.py index 6e608b822fbb66f11288ea0080c8e264d8e5c34a..e0c93c00d1a6acb0c3d30294d40fb63b4929a639 100644 --- a/python/paddle_serving_app/reader/senta_reader.py +++ b/python/paddle_serving_app/reader/senta_reader.py @@ -14,10 +14,11 @@ import sys import io +import os class SentaReader(): - def __init__(self, vocab_path, max_seq_len=20): + def __init__(self, vocab_path="", max_seq_len=20): self.max_seq_len = max_seq_len self.word_dict = self.load_vocab(vocab_path) @@ -25,6 +26,13 @@ class SentaReader(): """ load the given vocabulary """ + if vocab_path == "": + vocab_path = "senta_vocab.txt" + if not os.path.exists(vocab_path): + r = os.system( + " wget https://paddle-serving.bj.bcebos.com/reader/senta/senta_vocab.txt --no-check-certificate" + ) + vocab = {} with io.open(vocab_path, 'r', encoding='utf8') as f: for line in f: diff --git a/python/paddle_serving_app/version.py b/python/paddle_serving_app/version.py index 766bf4e397e46153193b1e3cac6fed5323241c45..c91808f95e7a5b62729eb630a3203ad42f7a5889 100644 --- a/python/paddle_serving_app/version.py +++ b/python/paddle_serving_app/version.py @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. """ Paddle Serving App version string """ -serving_app_version = "0.0.3" +serving_app_version = "0.1.0" diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index 8c189d415b5718788da2ff0e6757ba3af259e750..e3302c14239c8bfc37a6bafb39b112cfed5230fd 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -207,8 +207,9 @@ class Client(object): key)) if type(feed[key]).__module__ == np.__name__ and np.size(feed[ key]) != self.feed_tensor_len[key]: - raise SystemExit("The shape of feed tensor {} not match.".format( - key)) + #raise SystemExit("The shape of feed tensor {} not match.".format( + # key)) + pass def predict(self, feed=None, fetch=None, need_variant_tag=False): self.profile_.record('py_prepro_0') diff --git a/python/paddle_serving_client/io/__init__.py b/python/paddle_serving_client/io/__init__.py index 034c85217cec70b73be499aaa342739ab5153c0a..20d29e2bdfe0d2753d2f23cda028d76a3b13c699 100644 --- a/python/paddle_serving_client/io/__init__.py +++ b/python/paddle_serving_client/io/__init__.py @@ -33,7 +33,6 @@ def save_model(server_model_folder, executor = Executor(place=CPUPlace()) feed_var_names = [feed_var_dict[x].name for x in feed_var_dict] - #target_vars = list(fetch_var_dict.values()) target_vars = [] target_var_names = [] for key in sorted(fetch_var_dict.keys()): diff --git a/python/paddle_serving_client/version.py b/python/paddle_serving_client/version.py index 4870767dfcb95f9502dfa5880a85b1c11c62964f..5a1f35c598f044e80cff12ce661ff80a61647543 100644 --- a/python/paddle_serving_client/version.py +++ b/python/paddle_serving_client/version.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. """ Paddle Serving Client version string """ -serving_client_version = "0.2.2" -serving_server_version = "0.2.2" -module_proto_version = "0.2.2" +serving_client_version = "0.3.0" +serving_server_version = "0.3.0" +module_proto_version = "0.3.0" diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index 70aafbf5c3da4d1a2a8ec50ce5a2258383863057..894b0c5b132845cbde589982e1fb471f028e820b 100644 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -103,7 +103,7 @@ if __name__ == "__main__": service.load_model_config(args.model) service.prepare_server( workdir=args.workdir, port=args.port, device=args.device) - service.run_server() + service.run_rpc_service() app_instance = Flask(__name__) diff --git a/python/paddle_serving_server/version.py b/python/paddle_serving_server/version.py index 4870767dfcb95f9502dfa5880a85b1c11c62964f..5a1f35c598f044e80cff12ce661ff80a61647543 100644 --- a/python/paddle_serving_server/version.py +++ b/python/paddle_serving_server/version.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. """ Paddle Serving Client version string """ -serving_client_version = "0.2.2" -serving_server_version = "0.2.2" -module_proto_version = "0.2.2" +serving_client_version = "0.3.0" +serving_server_version = "0.3.0" +module_proto_version = "0.3.0" diff --git a/python/paddle_serving_server/web_service.py b/python/paddle_serving_server/web_service.py index f8c43707660e08e1bc44fdd62e40e20523f6cb6d..7f37b10be05e84e29cf6cda3cd3cc3d939910027 100755 --- a/python/paddle_serving_server/web_service.py +++ b/python/paddle_serving_server/web_service.py @@ -92,7 +92,7 @@ class WebService(object): result = {"result": "Request Value Error"} return result - def run_server(self): + def run_rpc_service(self): import socket localIP = socket.gethostbyname(socket.gethostname()) print("web service address:") @@ -115,7 +115,7 @@ class WebService(object): self.app_instance = app_instance - def run_flask(self): + def run_web_service(self): self.app_instance.run(host="0.0.0.0", port=self.port, threaded=False, diff --git a/python/paddle_serving_server_gpu/serve.py b/python/paddle_serving_server_gpu/serve.py index 297ff25d2084bead186fa4b9037e5de8282df0fe..309896a876bda5fc9b1baceb089242baa6d77dc5 100644 --- a/python/paddle_serving_server_gpu/serve.py +++ b/python/paddle_serving_server_gpu/serve.py @@ -118,7 +118,7 @@ if __name__ == "__main__": web_service.set_gpus(gpu_ids) web_service.prepare_server( workdir=args.workdir, port=args.port, device=args.device) - web_service.run_server() + web_service.run_rpc_service() app_instance = Flask(__name__) diff --git a/python/paddle_serving_server_gpu/version.py b/python/paddle_serving_server_gpu/version.py index 4870767dfcb95f9502dfa5880a85b1c11c62964f..5a1f35c598f044e80cff12ce661ff80a61647543 100644 --- a/python/paddle_serving_server_gpu/version.py +++ b/python/paddle_serving_server_gpu/version.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. """ Paddle Serving Client version string """ -serving_client_version = "0.2.2" -serving_server_version = "0.2.2" -module_proto_version = "0.2.2" +serving_client_version = "0.3.0" +serving_server_version = "0.3.0" +module_proto_version = "0.3.0" diff --git a/python/paddle_serving_server_gpu/web_service.py b/python/paddle_serving_server_gpu/web_service.py index e64e73197d02a80e43bbc77a7589ab43efe2f244..2328453268f6cefa9c5bddb818677cc3962ea7ea 100644 --- a/python/paddle_serving_server_gpu/web_service.py +++ b/python/paddle_serving_server_gpu/web_service.py @@ -133,12 +133,11 @@ class WebService(object): result = self.postprocess( feed=feed, fetch=fetch, fetch_map=fetch_map) result = {"result": result} - result = {"result": fetch_map} except ValueError: result = {"result": "Request Value Error"} return result - def run_server(self): + def run_rpc_service(self): import socket localIP = socket.gethostbyname(socket.gethostname()) print("web service address:") @@ -165,7 +164,7 @@ class WebService(object): self.app_instance = app_instance - def run_flask(self): + def run_web_service(self): self.app_instance.run(host="0.0.0.0", port=self.port, threaded=False,