diff --git a/Dockerfile b/Dockerfile index 404902334e241d9a53851d514b928468e8a69369..359dd52c0726e9a421138bf3ecf4d6cff3b2036f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,5 @@ FROM centos:centos6.10 -RUN export http_proxy=http://172.19.56.199:3128 \ - && export https_proxy=http://172.19.56.199:3128 \ - && yum -y install wget \ +RUN yum -y install wget \ && wget http://people.centos.org/tru/devtools-2/devtools-2.repo -O /etc/yum.repos.d/devtoolset-2.repo \ && yum -y install devtoolset-2-gcc devtoolset-2-gcc-c++ devtoolset-2-binutils \ && source /opt/rh/devtoolset-2/enable \ diff --git a/python/examples/imdb/test_server.py b/python/examples/imdb/test_server.py index e77c5fb6a4842bbc455193c67d8494ee6231c90f..7498faabccd2464568468f5ad10538faf481e671 100644 --- a/python/examples/imdb/test_server.py +++ b/python/examples/imdb/test_server.py @@ -14,6 +14,8 @@ op_seq_maker.add_op(general_infer_op) server = Server() server.set_op_sequence(op_seq_maker.get_op_sequence()) +server.set_num_threads(12) server.load_model_config(sys.argv[1]) -server.prepare_server(workdir="work_dir1", port=9292, device="cpu") +port = int(sys.argv[2]) +server.prepare_server(workdir="work_dir1", port=port, device="cpu") server.run_server() diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index f400336986780822cffa48927387658c7c13148c..73f67cb82b4817ab3f9fc12269c0cc61ee8eab05 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -104,7 +104,7 @@ class Client(object): predictor_sdk = SDKConfig() predictor_sdk.set_server_endpoints(endpoints) sdk_desc = predictor_sdk.gen_desc() - self.client_handle_.create_predictor_by_desc(sdk_desc) + self.client_handle_.create_predictor_by_desc(sdk_desc.SerializeToString()) def get_feed_names(self): return self.feed_names_ diff --git a/python/paddle_serving_server/__init__.py b/python/paddle_serving_server/__init__.py index d8b58bbe3cf0ad94ae3bf1c62cf48886218bcc1b..a79980e4a2065d9574309241f1ec16fcafa6f468 100644 --- a/python/paddle_serving_server/__init__.py +++ b/python/paddle_serving_server/__init__.py @@ -176,7 +176,7 @@ class Server(object): def run_server(self): # just run server with system command # currently we do not load cube - command = "/home/users/dongdaxiang/github_develop/Serving/build_server/core/general-server/serving" \ + command = "/home/xulongteng/github/Serving/build_server/core/general-server/serving" \ " -enable_model_toolkit " \ "-inferservice_path {} " \ "-inferservice_file {} " \ @@ -187,7 +187,8 @@ class Server(object): "-resource_path {} " \ "-resource_file {} " \ "-workflow_path {} " \ - "-workflow_file {} ".format( + "-workflow_file {} " \ + "-bthread_concurrency {} ".format( self.workdir, self.infer_service_fn, self.max_concurrency, @@ -196,8 +197,9 @@ class Server(object): self.reload_interval_s, self.workdir, self.resource_fn, - self.workdir, - self.workflow_fn) + self.workdir, + self.workflow_fn, + self.num_threads,) os.system(command)