From 945a035ecd925ba3cacb8dab10943136e04f8a30 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Wed, 12 Feb 2020 11:31:53 +0800 Subject: [PATCH] fix python api --- Dockerfile | 4 +--- python/examples/imdb/test_server.py | 4 +++- python/paddle_serving_client/__init__.py | 2 +- python/paddle_serving_server/__init__.py | 10 ++++++---- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/Dockerfile b/Dockerfile index 40490233..359dd52c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,5 @@ FROM centos:centos6.10 -RUN export http_proxy=http://172.19.56.199:3128 \ - && export https_proxy=http://172.19.56.199:3128 \ - && yum -y install wget \ +RUN yum -y install wget \ && wget http://people.centos.org/tru/devtools-2/devtools-2.repo -O /etc/yum.repos.d/devtoolset-2.repo \ && yum -y install devtoolset-2-gcc devtoolset-2-gcc-c++ devtoolset-2-binutils \ && source /opt/rh/devtoolset-2/enable \ diff --git a/python/examples/imdb/test_server.py b/python/examples/imdb/test_server.py index e77c5fb6..7498faab 100644 --- a/python/examples/imdb/test_server.py +++ b/python/examples/imdb/test_server.py @@ -14,6 +14,8 @@ op_seq_maker.add_op(general_infer_op) server = Server() server.set_op_sequence(op_seq_maker.get_op_sequence()) +server.set_num_threads(12) server.load_model_config(sys.argv[1]) -server.prepare_server(workdir="work_dir1", port=9292, device="cpu") +port = int(sys.argv[2]) +server.prepare_server(workdir="work_dir1", port=port, device="cpu") server.run_server() diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index f4003369..73f67cb8 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -104,7 +104,7 @@ class Client(object): predictor_sdk = SDKConfig() predictor_sdk.set_server_endpoints(endpoints) sdk_desc = predictor_sdk.gen_desc() - self.client_handle_.create_predictor_by_desc(sdk_desc) + self.client_handle_.create_predictor_by_desc(sdk_desc.SerializeToString()) def get_feed_names(self): return self.feed_names_ diff --git a/python/paddle_serving_server/__init__.py b/python/paddle_serving_server/__init__.py index d8b58bbe..a79980e4 100644 --- a/python/paddle_serving_server/__init__.py +++ b/python/paddle_serving_server/__init__.py @@ -176,7 +176,7 @@ class Server(object): def run_server(self): # just run server with system command # currently we do not load cube - command = "/home/users/dongdaxiang/github_develop/Serving/build_server/core/general-server/serving" \ + command = "/home/xulongteng/github/Serving/build_server/core/general-server/serving" \ " -enable_model_toolkit " \ "-inferservice_path {} " \ "-inferservice_file {} " \ @@ -187,7 +187,8 @@ class Server(object): "-resource_path {} " \ "-resource_file {} " \ "-workflow_path {} " \ - "-workflow_file {} ".format( + "-workflow_file {} " \ + "-bthread_concurrency {} ".format( self.workdir, self.infer_service_fn, self.max_concurrency, @@ -196,8 +197,9 @@ class Server(object): self.reload_interval_s, self.workdir, self.resource_fn, - self.workdir, - self.workflow_fn) + self.workdir, + self.workflow_fn, + self.num_threads,) os.system(command) -- GitLab