From 2374dc4f7fbb649c6fb53a9330736b8292003b88 Mon Sep 17 00:00:00 2001 From: wangjiawei04 Date: Thu, 28 Jan 2021 20:21:52 +0800 Subject: [PATCH] fix code style --- core/general-server/op/general_reader_op.cpp | 2 +- doc/LATEST_PACKAGES.md | 1 - java/README.md | 2 -- .../include/fluid_cpu_engine.h | 5 ++-- .../include/fluid_gpu_engine.h | 5 +--- .../grpc_impl_example/fit_a_line/README_CN.md | 1 - .../fit_a_line/test_asyn_client.py | 5 ++-- .../fit_a_line/test_batch_client.py | 3 ++- .../fit_a_line/test_sync_client.py | 6 ++--- .../fit_a_line/test_timeout_client.py | 5 ++-- .../grpc_impl_example/yolov4/test_client.py | 3 ++- .../simple_web_service/web_service_java.py | 6 +++-- python/paddle_serving_client/__init__.py | 2 +- python/paddle_serving_client/io/__init__.py | 25 ++++++++++++------- python/paddle_serving_server/__init__.py | 9 ++++--- python/paddle_serving_server/serve.py | 2 ++ python/paddle_serving_server/web_service.py | 8 +++--- python/paddle_serving_server_gpu/__init__.py | 12 ++++----- .../paddle_serving_server_gpu/web_service.py | 4 ++- 19 files changed, 59 insertions(+), 47 deletions(-) diff --git a/core/general-server/op/general_reader_op.cpp b/core/general-server/op/general_reader_op.cpp index 24259e24..b46071a3 100644 --- a/core/general-server/op/general_reader_op.cpp +++ b/core/general-server/op/general_reader_op.cpp @@ -91,7 +91,7 @@ int GeneralReaderOp::inference() { capacity.resize(var_num); for (int i = 0; i < var_num; ++i) { std::string tensor_name = model_config->_feed_name[i]; - VLOG(2) << "(logid=" << log_id << ") get tensor name: " << tensor_name; + VLOG(2) << "(logid=" << log_id << ") get tensor name: " << tensor_name; auto lod_tensor = InferManager::instance().GetInputHandle( engine_name.c_str(), tensor_name.c_str()); std::vector> lod; diff --git a/doc/LATEST_PACKAGES.md b/doc/LATEST_PACKAGES.md index bc45cb2e..63f9f4e6 100644 --- a/doc/LATEST_PACKAGES.md +++ b/doc/LATEST_PACKAGES.md @@ -77,4 +77,3 @@ https://paddle-serving.bj.bcebos.com/whl/xpu/paddle_serving_client-0.0.0-cp36-no # App https://paddle-serving.bj.bcebos.com/whl/xpu/paddle_serving_app-0.0.0-py3-none-any.whl ``` - diff --git a/java/README.md b/java/README.md index a4934eb3..225c8b03 100644 --- a/java/README.md +++ b/java/README.md @@ -115,5 +115,3 @@ The second is to deploy GPU Serving and Java Client separately. If they are on t **Currently Serving has launched the Pipeline mode (see [Pipeline Serving](../doc/PIPELINE_SERVING.md) for details). Pipeline Serving Client for Java is released.** **It should be noted that in the example, Java Pipeline Client code is in path /Java/Examples and /Java/src/main, and the Pipeline server code is in path /python/examples/pipeline/ The Client IP and Port(which is configured in java/examples/src/main/java/PipelineClientExample.java) should be corresponding to the Pipeline Server IP and Port(which is configured in config.yaml) ** - - diff --git a/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h b/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h index 10b962fd..681f2fe2 100644 --- a/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h +++ b/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h @@ -295,7 +295,7 @@ class FluidCpuAnalysisEncryptCore : public FluidFamilyCore { std::string real_params_buffer = cipher->Decrypt(params_buffer, key_buffer); Config analysis_config; - //paddle::AnalysisConfig analysis_config; + // paddle::AnalysisConfig analysis_config; analysis_config.SetModelBuffer(&real_model_buffer[0], real_model_buffer.size(), &real_params_buffer[0], @@ -308,8 +308,7 @@ class FluidCpuAnalysisEncryptCore : public FluidFamilyCore { analysis_config.SwitchSpecifyInputNames(true); AutoLock lock(GlobalPaddleCreateMutex::instance()); VLOG(2) << "decrypt model file sucess"; - _core = - CreatePredictor(analysis_config); + _core = CreatePredictor(analysis_config); if (NULL == _core.get()) { LOG(ERROR) << "create paddle predictor failed, path: " << data_path; return -1; diff --git a/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h b/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h index 7cac57a4..d3f63f72 100644 --- a/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h +++ b/paddle_inference/inferencer-fluid-gpu/include/fluid_gpu_engine.h @@ -283,7 +283,6 @@ class Parameter { float* _params; }; - class FluidGpuAnalysisEncryptCore : public FluidFamilyCore { public: void ReadBinaryFile(const std::string& filename, std::string* contents) { @@ -328,8 +327,7 @@ class FluidGpuAnalysisEncryptCore : public FluidFamilyCore { analysis_config.SwitchSpecifyInputNames(true); AutoLock lock(GlobalPaddleCreateMutex::instance()); VLOG(2) << "decrypt model file sucess"; - _core = - CreatePredictor(analysis_config); + _core = CreatePredictor(analysis_config); if (NULL == _core.get()) { LOG(ERROR) << "create paddle predictor failed, path: " << data_path; return -1; @@ -339,7 +337,6 @@ class FluidGpuAnalysisEncryptCore : public FluidFamilyCore { } }; - } // namespace fluid_gpu } // namespace paddle_serving } // namespace baidu diff --git a/python/examples/grpc_impl_example/fit_a_line/README_CN.md b/python/examples/grpc_impl_example/fit_a_line/README_CN.md index 4b2bd59e..728932b2 100644 --- a/python/examples/grpc_impl_example/fit_a_line/README_CN.md +++ b/python/examples/grpc_impl_example/fit_a_line/README_CN.md @@ -43,4 +43,3 @@ python test_batch_client.py ``` shell python test_timeout_client.py ``` - diff --git a/python/examples/grpc_impl_example/fit_a_line/test_asyn_client.py b/python/examples/grpc_impl_example/fit_a_line/test_asyn_client.py index eb0e1c2d..e9562359 100644 --- a/python/examples/grpc_impl_example/fit_a_line/test_asyn_client.py +++ b/python/examples/grpc_impl_example/fit_a_line/test_asyn_client.py @@ -43,8 +43,9 @@ x = [ ] task_count = 0 for i in range(3): - new_data = np.array(x).astype("float32").reshape((1,13)) - future = client.predict(feed={"x": new_data}, fetch=["price"], batch=False, asyn=True) + new_data = np.array(x).astype("float32").reshape((1, 13)) + future = client.predict( + feed={"x": new_data}, fetch=["price"], batch=False, asyn=True) task_count += 1 future.add_done_callback(functools.partial(call_back)) diff --git a/python/examples/grpc_impl_example/fit_a_line/test_batch_client.py b/python/examples/grpc_impl_example/fit_a_line/test_batch_client.py index 30da5934..41494e71 100644 --- a/python/examples/grpc_impl_example/fit_a_line/test_batch_client.py +++ b/python/examples/grpc_impl_example/fit_a_line/test_batch_client.py @@ -27,7 +27,8 @@ for i in range(3): new_data = np.array(x).astype("float32").reshape((1, 1, 13)) batch_data = np.concatenate([new_data, new_data, new_data], axis=0) print(batch_data.shape) - fetch_map = client.predict(feed={"x":batch_data}, fetch=["price"], batch=True) + fetch_map = client.predict( + feed={"x": batch_data}, fetch=["price"], batch=True) if fetch_map["serving_status_code"] == 0: print(fetch_map) diff --git a/python/examples/grpc_impl_example/fit_a_line/test_sync_client.py b/python/examples/grpc_impl_example/fit_a_line/test_sync_client.py index dbc9a7bb..879bc1ce 100644 --- a/python/examples/grpc_impl_example/fit_a_line/test_sync_client.py +++ b/python/examples/grpc_impl_example/fit_a_line/test_sync_client.py @@ -17,7 +17,6 @@ from paddle_serving_client import MultiLangClient as Client import numpy as np client = Client() client.connect(["127.0.0.1:9393"]) - """ for data in test_reader(): new_data = np.zeros((1, 1, 13)).astype("float32") @@ -33,8 +32,9 @@ x = [ 0.4919, 0.1856, 0.0795, -0.0332 ] for i in range(3): - new_data = np.array(x).astype("float32").reshape((1,13)) - fetch_map = client.predict(feed={"x": new_data}, fetch=["price"], batch=False) + new_data = np.array(x).astype("float32").reshape((1, 13)) + fetch_map = client.predict( + feed={"x": new_data}, fetch=["price"], batch=False) if fetch_map["serving_status_code"] == 0: print(fetch_map) else: diff --git a/python/examples/grpc_impl_example/fit_a_line/test_timeout_client.py b/python/examples/grpc_impl_example/fit_a_line/test_timeout_client.py index 082fc908..3e9dcc90 100644 --- a/python/examples/grpc_impl_example/fit_a_line/test_timeout_client.py +++ b/python/examples/grpc_impl_example/fit_a_line/test_timeout_client.py @@ -25,8 +25,9 @@ x = [ 0.4919, 0.1856, 0.0795, -0.0332 ] for i in range(3): - new_data = np.array(x).astype("float32").reshape((1,13)) - fetch_map = client.predict(feed={"x": new_data}, fetch=["price"], batch=False) + new_data = np.array(x).astype("float32").reshape((1, 13)) + fetch_map = client.predict( + feed={"x": new_data}, fetch=["price"], batch=False) if fetch_map["serving_status_code"] == 0: print(fetch_map) elif fetch_map["serving_status_code"] == grpc.StatusCode.DEADLINE_EXCEEDED: diff --git a/python/examples/grpc_impl_example/yolov4/test_client.py b/python/examples/grpc_impl_example/yolov4/test_client.py index 49573bb7..520d8bec 100644 --- a/python/examples/grpc_impl_example/yolov4/test_client.py +++ b/python/examples/grpc_impl_example/yolov4/test_client.py @@ -35,7 +35,8 @@ fetch_map = client.predict( "image": im, "im_size": np.array(list(im.shape[1:])), }, - fetch=["save_infer_model/scale_0.tmp_0"], batch=False) + fetch=["save_infer_model/scale_0.tmp_0"], + batch=False) print(fetch_map) fetch_map.pop("serving_status_code") fetch_map["image"] = sys.argv[1] diff --git a/python/examples/pipeline/simple_web_service/web_service_java.py b/python/examples/pipeline/simple_web_service/web_service_java.py index ef6a1448..d06bc584 100644 --- a/python/examples/pipeline/simple_web_service/web_service_java.py +++ b/python/examples/pipeline/simple_web_service/web_service_java.py @@ -23,6 +23,8 @@ import base64 _LOGGER = logging.getLogger() np.set_printoptions(threshold=sys.maxsize) + + class UciOp(Op): def init_op(self): self.separator = "," @@ -38,8 +40,8 @@ class UciOp(Op): log_id, input_dict)) proc_dict = {} x_value = input_dict["x"] - input_dict["x"] = x_value.reshape(1,13) - + input_dict["x"] = x_value.reshape(1, 13) + return input_dict, False, None, "" def postprocess(self, input_dicts, fetch_dict, log_id): diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index 047c97d7..4c9a9dea 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -228,7 +228,7 @@ class Client(object): "You must set the endpoints parameter or use add_variant function to create a variant." ) else: - if encryption: + if encryption: endpoints = self.get_serving_port(endpoints) if self.predictor_sdk_ is None: self.add_variant('default_tag_{}'.format(id(self)), endpoints, diff --git a/python/paddle_serving_client/io/__init__.py b/python/paddle_serving_client/io/__init__.py index f18d4b2b..b7b0898a 100644 --- a/python/paddle_serving_client/io/__init__.py +++ b/python/paddle_serving_client/io/__init__.py @@ -31,20 +31,24 @@ import paddle.nn.functional as F import errno from paddle.jit import to_static + def save_dygraph_model(serving_model_folder, client_config_folder, model): paddle.jit.save(model, "serving_tmp") - loaded_layer = paddle.jit.load(path=".", model_filename="serving_tmp.pdmodel", params_filename="serving_tmp.pdiparams") + loaded_layer = paddle.jit.load( + path=".", + model_filename="serving_tmp.pdmodel", + params_filename="serving_tmp.pdiparams") feed_target_names = [x.name for x in loaded_layer._input_spec()] fetch_target_names = [x.name for x in loaded_layer._output_spec()] inference_program = loaded_layer.program() feed_var_dict = { - x: inference_program.global_block().var(x) - for x in feed_target_names + x: inference_program.global_block().var(x) + for x in feed_target_names } fetch_var_dict = { - x: inference_program.global_block().var(x) - for x in fetch_target_names + x: inference_program.global_block().var(x) + for x in fetch_target_names } config = model_conf.GeneralModelConfig() @@ -93,9 +97,11 @@ def save_dygraph_model(serving_model_folder, client_config_folder, model): os.system(cmd) cmd = "mkdir -p {}".format(serving_model_folder) os.system(cmd) - cmd = "mv {} {}/__model__".format("serving_tmp.pdmodel", serving_model_folder) + cmd = "mv {} {}/__model__".format("serving_tmp.pdmodel", + serving_model_folder) os.system(cmd) - cmd = "mv {} {}/__params__".format("serving_tmp.pdiparams", serving_model_folder) + cmd = "mv {} {}/__params__".format("serving_tmp.pdiparams", + serving_model_folder) os.system(cmd) cmd = "rm -rf serving_tmp.pd*" os.system(cmd) @@ -112,11 +118,12 @@ def save_dygraph_model(serving_model_folder, client_config_folder, model): serving_model_folder), "wb") as fout: fout.write(config.SerializeToString()) + def save_model(server_model_folder, client_config_folder, feed_var_dict, fetch_var_dict, - main_program=None, + main_program=None, encryption=False, key_len=128, encrypt_conf=None): @@ -130,7 +137,7 @@ def save_model(server_model_folder, target_var_names.append(key) if not encryption: - save_inference_model( + save_inference_model( server_model_folder, feed_var_names, target_vars, diff --git a/python/paddle_serving_server/__init__.py b/python/paddle_serving_server/__init__.py index ac58ed21..5ef3cf75 100644 --- a/python/paddle_serving_server/__init__.py +++ b/python/paddle_serving_server/__init__.py @@ -158,7 +158,7 @@ class Server(object): self.use_local_bin = False self.mkl_flag = False self.encryption_model = False - self.product_name = None + self.product_name = None self.container_id = None self.model_config_paths = None # for multi-model in a workflow @@ -197,6 +197,7 @@ class Server(object): def set_ir_optimize(self, flag=False): self.ir_optimization = flag + def use_encryption_model(self, flag=False): self.encryption_model = flag @@ -236,15 +237,15 @@ class Server(object): if os.path.exists('{}/__params__'.format(model_config_path)): suffix = "" else: - suffix = "_DIR" + suffix = "_DIR" if device == "cpu": - if self.encryption_model: + if self.encryption_model: engine.type = "FLUID_CPU_ANALYSIS_ENCRYPT" else: engine.type = "FLUID_CPU_ANALYSIS" + suffix elif device == "gpu": - if self.encryption_model: + if self.encryption_model: engine.type = "FLUID_GPU_ANALYSIS_ENCRYPT" else: engine.type = "FLUID_GPU_ANALYSIS" + suffix diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index 0fa7984b..a8f1ad29 100644 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -133,6 +133,7 @@ def start_standard_model(serving_port): # pylint: disable=doc-string-missing server.prepare_server(workdir=workdir, port=port, device=device) server.run_server() + class MainService(BaseHTTPRequestHandler): def get_available_port(self): default_port = 12000 @@ -200,6 +201,7 @@ class MainService(BaseHTTPRequestHandler): self.end_headers() self.wfile.write(json.dumps(response)) + if __name__ == "__main__": args = parse_args() diff --git a/python/paddle_serving_server/web_service.py b/python/paddle_serving_server/web_service.py index f1eb8409..3be818f0 100644 --- a/python/paddle_serving_server/web_service.py +++ b/python/paddle_serving_server/web_service.py @@ -120,7 +120,7 @@ class WebService(object): self.mem_optim = mem_optim self.ir_optim = ir_optim for i in range(1000): - if port_is_available(default_port + i): + if port_is_available(default_port + i): self.port_list.append(default_port + i) break @@ -216,10 +216,12 @@ class WebService(object): feed_dict[var_name] = [] for feed_ins in feed: for key in feed_ins: - feed_dict[key].append(np.array(feed_ins[key]).reshape(list(self.feed_vars[key].shape))[np.newaxis,:]) + feed_dict[key].append( + np.array(feed_ins[key]).reshape( + list(self.feed_vars[key].shape))[np.newaxis, :]) feed = {} for key in feed_dict: - feed[key] = np.concatenate(feed_dict[key], axis=0) + feed[key] = np.concatenate(feed_dict[key], axis=0) return feed, fetch, is_batch def postprocess(self, feed=[], fetch=[], fetch_map=None): diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py index f951d321..04566bfa 100644 --- a/python/paddle_serving_server_gpu/__init__.py +++ b/python/paddle_serving_server_gpu/__init__.py @@ -323,20 +323,20 @@ class Server(object): if os.path.exists('{}/__params__'.format(model_config_path)): suffix = "" else: - suffix = "_DIR" + suffix = "_DIR" if device == "arm": engine.use_lite = self.use_lite engine.use_xpu = self.use_xpu if device == "cpu": - if use_encryption_model: + if use_encryption_model: engine.type = "FLUID_CPU_ANALYSIS_ENCRPT" else: - engine.type = "FLUID_CPU_ANALYSIS"+suffix + engine.type = "FLUID_CPU_ANALYSIS" + suffix elif device == "gpu": - if use_encryption_model: + if use_encryption_model: engine.type = "FLUID_GPU_ANALYSIS_ENCRPT" else: - engine.type = "FLUID_GPU_ANALYSIS"+suffix + engine.type = "FLUID_GPU_ANALYSIS" + suffix elif device == "arm": engine.type = "FLUID_ARM_ANALYSIS" + suffix self.model_toolkit_conf.engines.extend([engine]) @@ -496,7 +496,7 @@ class Server(object): workdir=None, port=9292, device="cpu", - use_encryption_model=False, + use_encryption_model=False, cube_conf=None): if workdir == None: workdir = "./tmp" diff --git a/python/paddle_serving_server_gpu/web_service.py b/python/paddle_serving_server_gpu/web_service.py index ec1f93ef..67b78926 100644 --- a/python/paddle_serving_server_gpu/web_service.py +++ b/python/paddle_serving_server_gpu/web_service.py @@ -295,7 +295,9 @@ class WebService(object): feed_dict[var_name] = [] for feed_ins in feed: for key in feed_ins: - feed_dict[key].append(np.array(feed_ins[key]).reshape(list(self.feed_vars[key].shape))[np.newaxis,:]) + feed_dict[key].append( + np.array(feed_ins[key]).reshape( + list(self.feed_vars[key].shape))[np.newaxis, :]) feed = {} for key in feed_dict: feed[key] = np.concatenate(feed_dict[key], axis=0) -- GitLab