diff --git a/serving/README.md b/serving/README.md new file mode 100644 index 0000000000000000000000000000000000000000..37d688f548a6742b11e4fbe6ab85af1daaccde66 --- /dev/null +++ b/serving/README.md @@ -0,0 +1 @@ +# PLSC Serving diff --git a/serving/client/face_service/data/00000000.jpg b/serving/client/face_service/data/00000000.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc2c8ec8f59bade64ff622c700d8b3dbc5249c2c Binary files /dev/null and b/serving/client/face_service/data/00000000.jpg differ diff --git a/serving/client/face_service/face_service.py b/serving/client/face_service/face_service.py new file mode 100644 index 0000000000000000000000000000000000000000..09c43ad260ae56d8cfbcdebdfb0c2de99aa43b97 --- /dev/null +++ b/serving/client/face_service/face_service.py @@ -0,0 +1,94 @@ +# coding:utf-8 +import sys +import time +from base64 import b64encode +import ujson +import os +from PIL import Image + +_ver = sys.version_info +is_py2 = (_ver[0] == 2) +is_py3 = (_ver[0] == 3) + +if is_py2: + import httplib +if is_py3: + import http.client as httplib + + +class FaceService(): + def __init__(self): + self.batch_size = 16 + self.con_list = [] + self.con_index = 0 + self.server_list = [] + + def connect(self, server='127.0.0.1:8010'): + self.server_list.append(server) + con = httplib.HTTPConnection(server) + self.con_list.append(con) + + def connect_all_server(self, server_list): + for server in server_list: + self.server_list.append(server) + self.con_list.append(httplib.HTTPConnection(server)) + + def infer(self, request_msg): + + try: + cur_con = self.con_list[self.con_index] + cur_con.request('POST', "/FaceClassifyService/inference", + request_msg, {"Content-Type": "application/json"}) + response = cur_con.getresponse() + response_msg = response.read() + #print(response_msg) + response_msg = ujson.loads(response_msg) + self.con_index += 1 + self.con_index = self.con_index % len(self.con_list) + return response_msg + + except BaseException as err: + del self.con_list[self.con_index] + print(err) + if len(self.con_list) == 0: + print('All server failed') + return 'fail' + else: + self.con_index = 0 + return 'retry' + + def encode(self, images): + self.batch_size = len(images) + request = [] + for si in range(self.batch_size): + request.append(b64encode(images[si]).decode('ascii')) + + #request + request = {"base64_string": request} + request_msg = ujson.dumps(request) + + response_msg = self.infer(request_msg) + result = [] + for msg in response_msg["instance"]: + result.append(msg["embedding"]) + + #request end + return result + + def close(self): + for con in self.con_list: + con.close() + + +def test(): + with open('./data/00000000.jpg', 'rb') as f: + image = f.read() + bc = FaceService() + bc.connect('127.0.0.1:8866') + result = bc.encode([image]) + print(result[0]) + bc.close() + + +if __name__ == '__main__': + test() diff --git a/serving/server/LICENSE b/serving/server/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..96f1555dfe572f6dd2af1b7db9e100cd85bf9687 --- /dev/null +++ b/serving/server/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2018 The Python Packaging Authority + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/serving/server/README.md b/serving/server/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e6e0fcfb019a0fda74192c075da33d408299ca76 --- /dev/null +++ b/serving/server/README.md @@ -0,0 +1 @@ +This is a simple example package. diff --git a/serving/server/plsc_serving/__init__.py b/serving/server/plsc_serving/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e7b8b2e4a4501833445c0006a9048fe896ec91c5 --- /dev/null +++ b/serving/server/plsc_serving/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = '0.1.4' diff --git a/serving/server/plsc_serving/run/__init__.py b/serving/server/plsc_serving/run/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..54a2bdea807010c56f77fc52bb327c20dc3f2a3d --- /dev/null +++ b/serving/server/plsc_serving/run/__init__.py @@ -0,0 +1,194 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +import tarfile +import face_serving +import subprocess +import imp +import time + + +class PLSCServer(): + def __init__(self, with_gpu=True): + os.chdir(self.get_path()) + self.with_gpu_flag = with_gpu + self.p_list = [] + self.use_other_model = False + self.run_m = False + self.model_url = 'https://paddle-serving.bj.bcebos.com/paddle-gpu-serving/model-face' + self.bin_url = 'https://paddle-serving.bj.bcebos.com/paddle-gpu-serving/bin-face' + self.cpu_run_cmd = './bin/serving-cpu --bthread_min_concurrency=4 --bthread_concurrency=4 --logtostderr=true ' + self.gpu_run_cmd = './bin/serving-gpu --bthread_min_concurrency=4 --bthread_concurrency=4 --logtostderr=true ' + self.model_path_str = '' + self.get_exe() + + def help(self): + print("hello") + + def get_exe(self): + exe_path = './bin' + module_version = face_serving.__version__ + target_version_list = module_version.strip().split('.') + target_version = target_version_list[0] + '.' + target_version_list[1] + need_download = False + + if os.path.exists(exe_path): + with open('./bin/serving-version.txt') as f: + serving_version = f.read().strip() + if serving_version != target_version: + need_download = True + else: + need_download = True + if need_download: + tar_name = 'face-serving-' + target_version + '-bin.tar.gz' + bin_url = self.bin_url + '/' + tar_name + print('Frist time run, downloading PaddleServing components ...') + os.system('wget ' + bin_url + ' --no-check-certificate') + print('Decompressing files ..') + tar = tarfile.open(tar_name) + tar.extractall() + tar.close() + os.remove(tar_name) + + def modify_conf(self, gpu_index=0): + os.chdir(self.get_path()) + engine_name = 'name: "face_resnet50"' + if not self.with_gpu_flag: + with open('./conf/model_toolkit.prototxt', 'r') as f: + conf_str = f.read() + conf_str = re.sub('GPU', 'CPU', conf_str) + conf_str = re.sub('name.*"', engine_name, conf_str) + conf_str = re.sub('model_data_path.*"', self.model_path_str, + conf_str) + conf_str = re.sub('enable_memory_optimization: 0', + 'enable_memory_optimization: 1', conf_str) + open('./conf/model_toolkit.prototxt', 'w').write(conf_str) + + else: + conf_file = './conf/model_toolkit.prototxt.' + str(gpu_index) + with open(conf_file, 'r') as f: + conf_str = f.read() + conf_str = re.sub('CPU', 'GPU', conf_str) + conf_str = re.sub('name.*"', engine_name, conf_str) + conf_str = re.sub('model_data_path.*"', self.model_path_str, + conf_str) + conf_str = re.sub('enable_memory_optimization: 0', + 'enable_memory_optimization: 1', conf_str) + open(conf_file, 'w').write(conf_str) + + def hold(self): + try: + while True: + time.sleep(60) + except KeyboardInterrupt: + print("Server is going to quit") + time.sleep(5) + + def run(self, gpu_index=0, port=8866): + + os.chdir(self.get_path()) + self.modify_conf(gpu_index) + + if self.with_gpu_flag == True: + gpu_msg = '--gpuid=' + str(gpu_index) + ' ' + run_cmd = self.gpu_run_cmd + gpu_msg + run_cmd += '--port=' + str( + port) + ' ' + '--resource_file=resource.prototxt.' + str( + gpu_index) + ' ' + print('Start serving on gpu ' + str(gpu_index) + ' port = ' + str( + port)) + else: + re = subprocess.Popen( + 'cat /usr/local/cuda/version.txt > tmp 2>&1', shell=True) + re.wait() + if re.returncode == 0: + run_cmd = self.gpu_run_cmd + '--port=' + str(port) + ' ' + else: + run_cmd = self.cpu_run_cmd + '--port=' + str(port) + ' ' + print('Start serving on cpu port = {}'.format(port)) + + process = subprocess.Popen(run_cmd, shell=True) + + self.p_list.append(process) + if not self.run_m: + self.hold() + + def run_multi(self, gpu_index_list=[], port_list=[]): + self.run_m = True + if len(port_list) < 1: + print('Please set one port at least.') + return -1 + if self.with_gpu_flag == True: + if len(gpu_index_list) != len(port_list): + print('Expect same length of gpu_index_list and port_list.') + return -1 + for gpu_index, port in zip(gpu_index_list, port_list): + self.run(gpu_index=gpu_index, port=port) + else: + for port in port_list: + self.run(port=port) + self.hold() + + def stop(self): + for p in self.p_list: + p.kill() + + def show_conf(self): + ''' + with open('./conf/model_toolkit.prototxt', 'r') as f: + conf_str = f.read() + print(conf_str) + ''' + + def with_model(self, model_name=None, model_url=None): + ''' + if model_url != None: + self.mode_url = model_url + self.use_other_model = True + ''' + if model_name == None or type(model_name) != str: + print('Please set model name string') + os.chdir(self.get_path()) + self.get_model(model_name) + + def get_path(self): + py_path = os.path.dirname(face_serving.__file__) + server_path = os.path.join(py_path, 'server') + return server_path + + def get_model(self, model_name): + server_path = self.get_path() + if True: + tar_name = model_name + '.tar.gz' + model_url = self.model_url + '/' + tar_name + + model_path = os.path.join(server_path, 'data/model/paddle/fluid') + if not os.path.exists(model_path): + os.makedirs('data/model/paddle/fluid') + os.chdir(model_path) + if os.path.exists(model_name): + pass + else: + os.system('wget ' + model_url + ' --no-check-certificate') + print('Decompressing files ..') + tar = tarfile.open(tar_name) + tar.extractall() + tar.close() + os.remove(tar_name) + + self.model_path_str = r'model_data_path: "./data/model/paddle/fluid/' + model_name + r'"' + + os.chdir(self.get_path()) diff --git a/serving/server/plsc_serving/server/conf/cube.conf b/serving/server/plsc_serving/server/conf/cube.conf new file mode 100644 index 0000000000000000000000000000000000000000..35310301f606a2502ea5a233f480e7656fa7583e --- /dev/null +++ b/serving/server/plsc_serving/server/conf/cube.conf @@ -0,0 +1,15 @@ +[{ + "dict_name": "dict", + "shard": 2, + "dup": 1, + "timeout": 200, + "retry": 3, + "backup_request": 100, + "type": "ipport_list", + "load_balancer": "rr", + "nodes": [{ + "ipport_list": "list://xxx.xxx.xxx.xxx:8000" + },{ + "ipport_list": "list://xxx.xxx.xxx.xxx:8000" + }] +}] diff --git a/serving/server/plsc_serving/server/conf/gflags.conf b/serving/server/plsc_serving/server/conf/gflags.conf new file mode 100644 index 0000000000000000000000000000000000000000..f9c735b5e272f528eeaa409762c9c25bba09dcf3 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/gflags.conf @@ -0,0 +1,2 @@ +--enable_model_toolkit +--enable_cube=false diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..a3ff5b5c87f2da2eab47e6f205ffaf3115335d64 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt @@ -0,0 +1,11 @@ +engines { + name: "face_classify_model" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.0 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.0 new file mode 100644 index 0000000000000000000000000000000000000000..7797344772fef223743c849c84cfab4985f5e900 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.0 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 0 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.1 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.1 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.1 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.10 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.10 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.10 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.11 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.11 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.11 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.12 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.12 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.12 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.13 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.13 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.13 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.14 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.14 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.14 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.15 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.15 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.15 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.2 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.2 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.2 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.3 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.3 new file mode 100644 index 0000000000000000000000000000000000000000..7797344772fef223743c849c84cfab4985f5e900 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.3 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 0 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.4 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.4 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.4 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.5 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.5 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.5 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.6 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.6 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.6 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.7 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.7 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.7 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.8 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.8 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.8 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.9 b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.9 new file mode 100644 index 0000000000000000000000000000000000000000..a5352b790dc2b4e3e8628d22da2196948974336a --- /dev/null +++ b/serving/server/plsc_serving/server/conf/model_toolkit.prototxt.9 @@ -0,0 +1,11 @@ +engines { + name: "bert" + type: "FLUID_GPU_ANALYSIS_DIR" + reloadable_meta: "./data/model/paddle/fluid_time_file" + reloadable_type: "timestamp_ne" + model_data_path: "/home/xulongteng/.paddlehub/bert_service/bert_chinese_L-12_H-768_A-12" + runtime_thread_num: 0 + batch_infer_size: 0 + enable_batch_align: 0 + enable_memory_optimization: 1 +} diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt b/serving/server/plsc_serving/server/conf/resource.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..0a0d6678fe99a89517ba9855140c0c01f919742e --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.0 b/serving/server/plsc_serving/server/conf/resource.prototxt.0 new file mode 100644 index 0000000000000000000000000000000000000000..c202367753ae63f95918ade7d8c10098f974f189 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.0 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.0" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.1 b/serving/server/plsc_serving/server/conf/resource.prototxt.1 new file mode 100644 index 0000000000000000000000000000000000000000..d7c15d4db80938c424dd8749a217332b4f2c83a2 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.1 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.1" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.10 b/serving/server/plsc_serving/server/conf/resource.prototxt.10 new file mode 100644 index 0000000000000000000000000000000000000000..74b0033a4a83985656de5f43af05d10faa8f2e33 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.10 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.10" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.11 b/serving/server/plsc_serving/server/conf/resource.prototxt.11 new file mode 100644 index 0000000000000000000000000000000000000000..91bd7417debd0c95c65de4bbe39d55d003a33356 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.11 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.11" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.12 b/serving/server/plsc_serving/server/conf/resource.prototxt.12 new file mode 100644 index 0000000000000000000000000000000000000000..483b713d787f75b5699e6a18281fb958c50d6348 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.12 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.12" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.13 b/serving/server/plsc_serving/server/conf/resource.prototxt.13 new file mode 100644 index 0000000000000000000000000000000000000000..0a3d25a045d16e5cd005215a5f62961de515bcc7 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.13 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.13" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.14 b/serving/server/plsc_serving/server/conf/resource.prototxt.14 new file mode 100644 index 0000000000000000000000000000000000000000..64cb4cffe879d2ecd33ecf943964b57ba86b6a98 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.14 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.14" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.15 b/serving/server/plsc_serving/server/conf/resource.prototxt.15 new file mode 100644 index 0000000000000000000000000000000000000000..462193790116d1bb39e56e3179d1e9236c2fc9ce --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.15 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.15" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.2 b/serving/server/plsc_serving/server/conf/resource.prototxt.2 new file mode 100644 index 0000000000000000000000000000000000000000..45509468a088ec3c3375b2a245a36ab42d83403b --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.2 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.2" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.3 b/serving/server/plsc_serving/server/conf/resource.prototxt.3 new file mode 100644 index 0000000000000000000000000000000000000000..3aadbe34c60c8a3b36199f166847b307b1831c1d --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.3 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.3" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.4 b/serving/server/plsc_serving/server/conf/resource.prototxt.4 new file mode 100644 index 0000000000000000000000000000000000000000..eec742fc0e4dae052ff0d656e105d9a238d02cf9 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.4 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.4" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.5 b/serving/server/plsc_serving/server/conf/resource.prototxt.5 new file mode 100644 index 0000000000000000000000000000000000000000..5db39ee97ad93a47e9713d1cfdd91b0f5bf49b3c --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.5 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.5" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.6 b/serving/server/plsc_serving/server/conf/resource.prototxt.6 new file mode 100644 index 0000000000000000000000000000000000000000..ae65b2516959782282d6fc1247edbf743ba84235 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.6 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.6" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.7 b/serving/server/plsc_serving/server/conf/resource.prototxt.7 new file mode 100644 index 0000000000000000000000000000000000000000..592f0f15cf3096be8bc7abb0207831ded5c64dda --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.7 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.7" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.8 b/serving/server/plsc_serving/server/conf/resource.prototxt.8 new file mode 100644 index 0000000000000000000000000000000000000000..ccd8497170dcc679d7343a2b95e4fb005b950bd7 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.8 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.8" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/resource.prototxt.9 b/serving/server/plsc_serving/server/conf/resource.prototxt.9 new file mode 100644 index 0000000000000000000000000000000000000000..5e1238110ae83e6a5d99ba1037e4e8112bf126b8 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/resource.prototxt.9 @@ -0,0 +1,3 @@ +model_toolkit_path: "./conf/" +model_toolkit_file: "model_toolkit.prototxt.9" +cube_config_file: "./conf/cube.conf" diff --git a/serving/server/plsc_serving/server/conf/service.prototxt b/serving/server/plsc_serving/server/conf/service.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..7774d40f8f18a5212b4ad26cf98551bddd53c9ce --- /dev/null +++ b/serving/server/plsc_serving/server/conf/service.prototxt @@ -0,0 +1,4 @@ +services { + name: "FaceClassifyService" + workflows: "workflow10" +} diff --git a/serving/server/plsc_serving/server/conf/workflow.prototxt b/serving/server/plsc_serving/server/conf/workflow.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..56daa99284a07d490a3782e3d3849f90ae71bee1 --- /dev/null +++ b/serving/server/plsc_serving/server/conf/workflow.prototxt @@ -0,0 +1,8 @@ +workflows { + name: "workflow10" + workflow_type: "Sequence" + nodes { + name: "face_classify_op" + type: "FaceClassifyOp" + } +} diff --git a/serving/server/plsc_serving/server/data/model/paddle/fluid_reload_flag b/serving/server/plsc_serving/server/data/model/paddle/fluid_reload_flag new file mode 100644 index 0000000000000000000000000000000000000000..a1866984b04a26a09bbfc4ef4f08dec5e38f818d --- /dev/null +++ b/serving/server/plsc_serving/server/data/model/paddle/fluid_reload_flag @@ -0,0 +1,2 @@ +paddle fluid model +time:20180531 diff --git a/serving/server/plsc_serving/server/data/model/paddle/fluid_time_file b/serving/server/plsc_serving/server/data/model/paddle/fluid_time_file new file mode 100644 index 0000000000000000000000000000000000000000..4d9422cd37f6c51800055f1b1dc625e2c455fc6d --- /dev/null +++ b/serving/server/plsc_serving/server/data/model/paddle/fluid_time_file @@ -0,0 +1,2 @@ +201805311000 +model paddle fluid diff --git a/serving/server/setup.py b/serving/server/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..8b84f844aa7787a0526a82662faadc4ce4a58a2c --- /dev/null +++ b/serving/server/setup.py @@ -0,0 +1,50 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +#read info +info_py = './plsc_serving/__init__.py' +info_content = open(info_py, 'r').readlines() +version_line = [ + l.strip() for l in info_content if l.startswith('__version__') +][0] +exec(version_line) # produce __version__ + +setuptools.setup( + name="plsc-serving", + version=__version__, + author="MRXLT", + author_email="xlt2024@gmail.com", + description="package for plsc serving", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/PaddlePaddle/PLSC", + packages=setuptools.find_packages(), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + python_requires='>=3.5', + package_data={ + 'plsc_serving': [ + 'server/conf/*', + 'server/data/model/paddle/fluid_reload_flag', + 'server/data/model/paddle/fluid_time_file', + ] + })