未验证 提交 6def4bcd 编写于 作者: J Jiawei Wang 提交者: GitHub

Merge pull request #999 from wangjiawei04/codestyle

fix Codestyle
...@@ -91,7 +91,7 @@ int GeneralReaderOp::inference() { ...@@ -91,7 +91,7 @@ int GeneralReaderOp::inference() {
capacity.resize(var_num); capacity.resize(var_num);
for (int i = 0; i < var_num; ++i) { for (int i = 0; i < var_num; ++i) {
std::string tensor_name = model_config->_feed_name[i]; std::string tensor_name = model_config->_feed_name[i];
VLOG(2) << "(logid=" << log_id << ") get tensor name: " << tensor_name; VLOG(2) << "(logid=" << log_id << ") get tensor name: " << tensor_name;
auto lod_tensor = InferManager::instance().GetInputHandle( auto lod_tensor = InferManager::instance().GetInputHandle(
engine_name.c_str(), tensor_name.c_str()); engine_name.c_str(), tensor_name.c_str());
std::vector<std::vector<size_t>> lod; std::vector<std::vector<size_t>> lod;
......
...@@ -77,4 +77,3 @@ https://paddle-serving.bj.bcebos.com/whl/xpu/paddle_serving_client-0.0.0-cp36-no ...@@ -77,4 +77,3 @@ https://paddle-serving.bj.bcebos.com/whl/xpu/paddle_serving_client-0.0.0-cp36-no
# App # App
https://paddle-serving.bj.bcebos.com/whl/xpu/paddle_serving_app-0.0.0-py3-none-any.whl https://paddle-serving.bj.bcebos.com/whl/xpu/paddle_serving_app-0.0.0-py3-none-any.whl
``` ```
...@@ -115,5 +115,3 @@ The second is to deploy GPU Serving and Java Client separately. If they are on t ...@@ -115,5 +115,3 @@ The second is to deploy GPU Serving and Java Client separately. If they are on t
**Currently Serving has launched the Pipeline mode (see [Pipeline Serving](../doc/PIPELINE_SERVING.md) for details). Pipeline Serving Client for Java is released.** **Currently Serving has launched the Pipeline mode (see [Pipeline Serving](../doc/PIPELINE_SERVING.md) for details). Pipeline Serving Client for Java is released.**
**It should be noted that in the example, Java Pipeline Client code is in path /Java/Examples and /Java/src/main, and the Pipeline server code is in path /python/examples/pipeline/ The Client IP and Port(which is configured in java/examples/src/main/java/PipelineClientExample.java) should be corresponding to the Pipeline Server IP and Port(which is configured in config.yaml) ** **It should be noted that in the example, Java Pipeline Client code is in path /Java/Examples and /Java/src/main, and the Pipeline server code is in path /python/examples/pipeline/ The Client IP and Port(which is configured in java/examples/src/main/java/PipelineClientExample.java) should be corresponding to the Pipeline Server IP and Port(which is configured in config.yaml) **
...@@ -295,7 +295,7 @@ class FluidCpuAnalysisEncryptCore : public FluidFamilyCore { ...@@ -295,7 +295,7 @@ class FluidCpuAnalysisEncryptCore : public FluidFamilyCore {
std::string real_params_buffer = cipher->Decrypt(params_buffer, key_buffer); std::string real_params_buffer = cipher->Decrypt(params_buffer, key_buffer);
Config analysis_config; Config analysis_config;
//paddle::AnalysisConfig analysis_config; // paddle::AnalysisConfig analysis_config;
analysis_config.SetModelBuffer(&real_model_buffer[0], analysis_config.SetModelBuffer(&real_model_buffer[0],
real_model_buffer.size(), real_model_buffer.size(),
&real_params_buffer[0], &real_params_buffer[0],
...@@ -308,8 +308,7 @@ class FluidCpuAnalysisEncryptCore : public FluidFamilyCore { ...@@ -308,8 +308,7 @@ class FluidCpuAnalysisEncryptCore : public FluidFamilyCore {
analysis_config.SwitchSpecifyInputNames(true); analysis_config.SwitchSpecifyInputNames(true);
AutoLock lock(GlobalPaddleCreateMutex::instance()); AutoLock lock(GlobalPaddleCreateMutex::instance());
VLOG(2) << "decrypt model file sucess"; VLOG(2) << "decrypt model file sucess";
_core = _core = CreatePredictor(analysis_config);
CreatePredictor(analysis_config);
if (NULL == _core.get()) { if (NULL == _core.get()) {
LOG(ERROR) << "create paddle predictor failed, path: " << data_path; LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
return -1; return -1;
......
...@@ -283,7 +283,6 @@ class Parameter { ...@@ -283,7 +283,6 @@ class Parameter {
float* _params; float* _params;
}; };
class FluidGpuAnalysisEncryptCore : public FluidFamilyCore { class FluidGpuAnalysisEncryptCore : public FluidFamilyCore {
public: public:
void ReadBinaryFile(const std::string& filename, std::string* contents) { void ReadBinaryFile(const std::string& filename, std::string* contents) {
...@@ -328,8 +327,7 @@ class FluidGpuAnalysisEncryptCore : public FluidFamilyCore { ...@@ -328,8 +327,7 @@ class FluidGpuAnalysisEncryptCore : public FluidFamilyCore {
analysis_config.SwitchSpecifyInputNames(true); analysis_config.SwitchSpecifyInputNames(true);
AutoLock lock(GlobalPaddleCreateMutex::instance()); AutoLock lock(GlobalPaddleCreateMutex::instance());
VLOG(2) << "decrypt model file sucess"; VLOG(2) << "decrypt model file sucess";
_core = _core = CreatePredictor(analysis_config);
CreatePredictor(analysis_config);
if (NULL == _core.get()) { if (NULL == _core.get()) {
LOG(ERROR) << "create paddle predictor failed, path: " << data_path; LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
return -1; return -1;
...@@ -339,7 +337,6 @@ class FluidGpuAnalysisEncryptCore : public FluidFamilyCore { ...@@ -339,7 +337,6 @@ class FluidGpuAnalysisEncryptCore : public FluidFamilyCore {
} }
}; };
} // namespace fluid_gpu } // namespace fluid_gpu
} // namespace paddle_serving } // namespace paddle_serving
} // namespace baidu } // namespace baidu
...@@ -43,4 +43,3 @@ python test_batch_client.py ...@@ -43,4 +43,3 @@ python test_batch_client.py
``` shell ``` shell
python test_timeout_client.py python test_timeout_client.py
``` ```
...@@ -43,8 +43,9 @@ x = [ ...@@ -43,8 +43,9 @@ x = [
] ]
task_count = 0 task_count = 0
for i in range(3): for i in range(3):
new_data = np.array(x).astype("float32").reshape((1,13)) new_data = np.array(x).astype("float32").reshape((1, 13))
future = client.predict(feed={"x": new_data}, fetch=["price"], batch=False, asyn=True) future = client.predict(
feed={"x": new_data}, fetch=["price"], batch=False, asyn=True)
task_count += 1 task_count += 1
future.add_done_callback(functools.partial(call_back)) future.add_done_callback(functools.partial(call_back))
......
...@@ -27,7 +27,8 @@ for i in range(3): ...@@ -27,7 +27,8 @@ for i in range(3):
new_data = np.array(x).astype("float32").reshape((1, 1, 13)) new_data = np.array(x).astype("float32").reshape((1, 1, 13))
batch_data = np.concatenate([new_data, new_data, new_data], axis=0) batch_data = np.concatenate([new_data, new_data, new_data], axis=0)
print(batch_data.shape) print(batch_data.shape)
fetch_map = client.predict(feed={"x":batch_data}, fetch=["price"], batch=True) fetch_map = client.predict(
feed={"x": batch_data}, fetch=["price"], batch=True)
if fetch_map["serving_status_code"] == 0: if fetch_map["serving_status_code"] == 0:
print(fetch_map) print(fetch_map)
......
...@@ -17,7 +17,6 @@ from paddle_serving_client import MultiLangClient as Client ...@@ -17,7 +17,6 @@ from paddle_serving_client import MultiLangClient as Client
import numpy as np import numpy as np
client = Client() client = Client()
client.connect(["127.0.0.1:9393"]) client.connect(["127.0.0.1:9393"])
""" """
for data in test_reader(): for data in test_reader():
new_data = np.zeros((1, 1, 13)).astype("float32") new_data = np.zeros((1, 1, 13)).astype("float32")
...@@ -33,8 +32,9 @@ x = [ ...@@ -33,8 +32,9 @@ x = [
0.4919, 0.1856, 0.0795, -0.0332 0.4919, 0.1856, 0.0795, -0.0332
] ]
for i in range(3): for i in range(3):
new_data = np.array(x).astype("float32").reshape((1,13)) new_data = np.array(x).astype("float32").reshape((1, 13))
fetch_map = client.predict(feed={"x": new_data}, fetch=["price"], batch=False) fetch_map = client.predict(
feed={"x": new_data}, fetch=["price"], batch=False)
if fetch_map["serving_status_code"] == 0: if fetch_map["serving_status_code"] == 0:
print(fetch_map) print(fetch_map)
else: else:
......
...@@ -25,8 +25,9 @@ x = [ ...@@ -25,8 +25,9 @@ x = [
0.4919, 0.1856, 0.0795, -0.0332 0.4919, 0.1856, 0.0795, -0.0332
] ]
for i in range(3): for i in range(3):
new_data = np.array(x).astype("float32").reshape((1,13)) new_data = np.array(x).astype("float32").reshape((1, 13))
fetch_map = client.predict(feed={"x": new_data}, fetch=["price"], batch=False) fetch_map = client.predict(
feed={"x": new_data}, fetch=["price"], batch=False)
if fetch_map["serving_status_code"] == 0: if fetch_map["serving_status_code"] == 0:
print(fetch_map) print(fetch_map)
elif fetch_map["serving_status_code"] == grpc.StatusCode.DEADLINE_EXCEEDED: elif fetch_map["serving_status_code"] == grpc.StatusCode.DEADLINE_EXCEEDED:
......
...@@ -35,7 +35,8 @@ fetch_map = client.predict( ...@@ -35,7 +35,8 @@ fetch_map = client.predict(
"image": im, "image": im,
"im_size": np.array(list(im.shape[1:])), "im_size": np.array(list(im.shape[1:])),
}, },
fetch=["save_infer_model/scale_0.tmp_0"], batch=False) fetch=["save_infer_model/scale_0.tmp_0"],
batch=False)
print(fetch_map) print(fetch_map)
fetch_map.pop("serving_status_code") fetch_map.pop("serving_status_code")
fetch_map["image"] = sys.argv[1] fetch_map["image"] = sys.argv[1]
......
...@@ -23,6 +23,8 @@ import base64 ...@@ -23,6 +23,8 @@ import base64
_LOGGER = logging.getLogger() _LOGGER = logging.getLogger()
np.set_printoptions(threshold=sys.maxsize) np.set_printoptions(threshold=sys.maxsize)
class UciOp(Op): class UciOp(Op):
def init_op(self): def init_op(self):
self.separator = "," self.separator = ","
...@@ -38,8 +40,8 @@ class UciOp(Op): ...@@ -38,8 +40,8 @@ class UciOp(Op):
log_id, input_dict)) log_id, input_dict))
proc_dict = {} proc_dict = {}
x_value = input_dict["x"] x_value = input_dict["x"]
input_dict["x"] = x_value.reshape(1,13) input_dict["x"] = x_value.reshape(1, 13)
return input_dict, False, None, "" return input_dict, False, None, ""
def postprocess(self, input_dicts, fetch_dict, log_id): def postprocess(self, input_dicts, fetch_dict, log_id):
......
...@@ -228,7 +228,7 @@ class Client(object): ...@@ -228,7 +228,7 @@ class Client(object):
"You must set the endpoints parameter or use add_variant function to create a variant." "You must set the endpoints parameter or use add_variant function to create a variant."
) )
else: else:
if encryption: if encryption:
endpoints = self.get_serving_port(endpoints) endpoints = self.get_serving_port(endpoints)
if self.predictor_sdk_ is None: if self.predictor_sdk_ is None:
self.add_variant('default_tag_{}'.format(id(self)), endpoints, self.add_variant('default_tag_{}'.format(id(self)), endpoints,
......
...@@ -31,20 +31,24 @@ import paddle.nn.functional as F ...@@ -31,20 +31,24 @@ import paddle.nn.functional as F
import errno import errno
from paddle.jit import to_static from paddle.jit import to_static
def save_dygraph_model(serving_model_folder, client_config_folder, model): def save_dygraph_model(serving_model_folder, client_config_folder, model):
paddle.jit.save(model, "serving_tmp") paddle.jit.save(model, "serving_tmp")
loaded_layer = paddle.jit.load(path=".", model_filename="serving_tmp.pdmodel", params_filename="serving_tmp.pdiparams") loaded_layer = paddle.jit.load(
path=".",
model_filename="serving_tmp.pdmodel",
params_filename="serving_tmp.pdiparams")
feed_target_names = [x.name for x in loaded_layer._input_spec()] feed_target_names = [x.name for x in loaded_layer._input_spec()]
fetch_target_names = [x.name for x in loaded_layer._output_spec()] fetch_target_names = [x.name for x in loaded_layer._output_spec()]
inference_program = loaded_layer.program() inference_program = loaded_layer.program()
feed_var_dict = { feed_var_dict = {
x: inference_program.global_block().var(x) x: inference_program.global_block().var(x)
for x in feed_target_names for x in feed_target_names
} }
fetch_var_dict = { fetch_var_dict = {
x: inference_program.global_block().var(x) x: inference_program.global_block().var(x)
for x in fetch_target_names for x in fetch_target_names
} }
config = model_conf.GeneralModelConfig() config = model_conf.GeneralModelConfig()
...@@ -93,9 +97,11 @@ def save_dygraph_model(serving_model_folder, client_config_folder, model): ...@@ -93,9 +97,11 @@ def save_dygraph_model(serving_model_folder, client_config_folder, model):
os.system(cmd) os.system(cmd)
cmd = "mkdir -p {}".format(serving_model_folder) cmd = "mkdir -p {}".format(serving_model_folder)
os.system(cmd) os.system(cmd)
cmd = "mv {} {}/__model__".format("serving_tmp.pdmodel", serving_model_folder) cmd = "mv {} {}/__model__".format("serving_tmp.pdmodel",
serving_model_folder)
os.system(cmd) os.system(cmd)
cmd = "mv {} {}/__params__".format("serving_tmp.pdiparams", serving_model_folder) cmd = "mv {} {}/__params__".format("serving_tmp.pdiparams",
serving_model_folder)
os.system(cmd) os.system(cmd)
cmd = "rm -rf serving_tmp.pd*" cmd = "rm -rf serving_tmp.pd*"
os.system(cmd) os.system(cmd)
...@@ -112,11 +118,12 @@ def save_dygraph_model(serving_model_folder, client_config_folder, model): ...@@ -112,11 +118,12 @@ def save_dygraph_model(serving_model_folder, client_config_folder, model):
serving_model_folder), "wb") as fout: serving_model_folder), "wb") as fout:
fout.write(config.SerializeToString()) fout.write(config.SerializeToString())
def save_model(server_model_folder, def save_model(server_model_folder,
client_config_folder, client_config_folder,
feed_var_dict, feed_var_dict,
fetch_var_dict, fetch_var_dict,
main_program=None, main_program=None,
encryption=False, encryption=False,
key_len=128, key_len=128,
encrypt_conf=None): encrypt_conf=None):
...@@ -130,7 +137,7 @@ def save_model(server_model_folder, ...@@ -130,7 +137,7 @@ def save_model(server_model_folder,
target_var_names.append(key) target_var_names.append(key)
if not encryption: if not encryption:
save_inference_model( save_inference_model(
server_model_folder, server_model_folder,
feed_var_names, feed_var_names,
target_vars, target_vars,
......
...@@ -158,7 +158,7 @@ class Server(object): ...@@ -158,7 +158,7 @@ class Server(object):
self.use_local_bin = False self.use_local_bin = False
self.mkl_flag = False self.mkl_flag = False
self.encryption_model = False self.encryption_model = False
self.product_name = None self.product_name = None
self.container_id = None self.container_id = None
self.model_config_paths = None # for multi-model in a workflow self.model_config_paths = None # for multi-model in a workflow
...@@ -197,6 +197,7 @@ class Server(object): ...@@ -197,6 +197,7 @@ class Server(object):
def set_ir_optimize(self, flag=False): def set_ir_optimize(self, flag=False):
self.ir_optimization = flag self.ir_optimization = flag
def use_encryption_model(self, flag=False): def use_encryption_model(self, flag=False):
self.encryption_model = flag self.encryption_model = flag
...@@ -236,15 +237,15 @@ class Server(object): ...@@ -236,15 +237,15 @@ class Server(object):
if os.path.exists('{}/__params__'.format(model_config_path)): if os.path.exists('{}/__params__'.format(model_config_path)):
suffix = "" suffix = ""
else: else:
suffix = "_DIR" suffix = "_DIR"
if device == "cpu": if device == "cpu":
if self.encryption_model: if self.encryption_model:
engine.type = "FLUID_CPU_ANALYSIS_ENCRYPT" engine.type = "FLUID_CPU_ANALYSIS_ENCRYPT"
else: else:
engine.type = "FLUID_CPU_ANALYSIS" + suffix engine.type = "FLUID_CPU_ANALYSIS" + suffix
elif device == "gpu": elif device == "gpu":
if self.encryption_model: if self.encryption_model:
engine.type = "FLUID_GPU_ANALYSIS_ENCRYPT" engine.type = "FLUID_GPU_ANALYSIS_ENCRYPT"
else: else:
engine.type = "FLUID_GPU_ANALYSIS" + suffix engine.type = "FLUID_GPU_ANALYSIS" + suffix
......
...@@ -133,6 +133,7 @@ def start_standard_model(serving_port): # pylint: disable=doc-string-missing ...@@ -133,6 +133,7 @@ def start_standard_model(serving_port): # pylint: disable=doc-string-missing
server.prepare_server(workdir=workdir, port=port, device=device) server.prepare_server(workdir=workdir, port=port, device=device)
server.run_server() server.run_server()
class MainService(BaseHTTPRequestHandler): class MainService(BaseHTTPRequestHandler):
def get_available_port(self): def get_available_port(self):
default_port = 12000 default_port = 12000
...@@ -200,6 +201,7 @@ class MainService(BaseHTTPRequestHandler): ...@@ -200,6 +201,7 @@ class MainService(BaseHTTPRequestHandler):
self.end_headers() self.end_headers()
self.wfile.write(json.dumps(response)) self.wfile.write(json.dumps(response))
if __name__ == "__main__": if __name__ == "__main__":
args = parse_args() args = parse_args()
......
...@@ -120,7 +120,7 @@ class WebService(object): ...@@ -120,7 +120,7 @@ class WebService(object):
self.mem_optim = mem_optim self.mem_optim = mem_optim
self.ir_optim = ir_optim self.ir_optim = ir_optim
for i in range(1000): for i in range(1000):
if port_is_available(default_port + i): if port_is_available(default_port + i):
self.port_list.append(default_port + i) self.port_list.append(default_port + i)
break break
...@@ -216,10 +216,12 @@ class WebService(object): ...@@ -216,10 +216,12 @@ class WebService(object):
feed_dict[var_name] = [] feed_dict[var_name] = []
for feed_ins in feed: for feed_ins in feed:
for key in feed_ins: for key in feed_ins:
feed_dict[key].append(np.array(feed_ins[key]).reshape(list(self.feed_vars[key].shape))[np.newaxis,:]) feed_dict[key].append(
np.array(feed_ins[key]).reshape(
list(self.feed_vars[key].shape))[np.newaxis, :])
feed = {} feed = {}
for key in feed_dict: for key in feed_dict:
feed[key] = np.concatenate(feed_dict[key], axis=0) feed[key] = np.concatenate(feed_dict[key], axis=0)
return feed, fetch, is_batch return feed, fetch, is_batch
def postprocess(self, feed=[], fetch=[], fetch_map=None): def postprocess(self, feed=[], fetch=[], fetch_map=None):
......
...@@ -323,20 +323,20 @@ class Server(object): ...@@ -323,20 +323,20 @@ class Server(object):
if os.path.exists('{}/__params__'.format(model_config_path)): if os.path.exists('{}/__params__'.format(model_config_path)):
suffix = "" suffix = ""
else: else:
suffix = "_DIR" suffix = "_DIR"
if device == "arm": if device == "arm":
engine.use_lite = self.use_lite engine.use_lite = self.use_lite
engine.use_xpu = self.use_xpu engine.use_xpu = self.use_xpu
if device == "cpu": if device == "cpu":
if use_encryption_model: if use_encryption_model:
engine.type = "FLUID_CPU_ANALYSIS_ENCRPT" engine.type = "FLUID_CPU_ANALYSIS_ENCRPT"
else: else:
engine.type = "FLUID_CPU_ANALYSIS"+suffix engine.type = "FLUID_CPU_ANALYSIS" + suffix
elif device == "gpu": elif device == "gpu":
if use_encryption_model: if use_encryption_model:
engine.type = "FLUID_GPU_ANALYSIS_ENCRPT" engine.type = "FLUID_GPU_ANALYSIS_ENCRPT"
else: else:
engine.type = "FLUID_GPU_ANALYSIS"+suffix engine.type = "FLUID_GPU_ANALYSIS" + suffix
elif device == "arm": elif device == "arm":
engine.type = "FLUID_ARM_ANALYSIS" + suffix engine.type = "FLUID_ARM_ANALYSIS" + suffix
self.model_toolkit_conf.engines.extend([engine]) self.model_toolkit_conf.engines.extend([engine])
...@@ -496,7 +496,7 @@ class Server(object): ...@@ -496,7 +496,7 @@ class Server(object):
workdir=None, workdir=None,
port=9292, port=9292,
device="cpu", device="cpu",
use_encryption_model=False, use_encryption_model=False,
cube_conf=None): cube_conf=None):
if workdir == None: if workdir == None:
workdir = "./tmp" workdir = "./tmp"
......
...@@ -295,7 +295,9 @@ class WebService(object): ...@@ -295,7 +295,9 @@ class WebService(object):
feed_dict[var_name] = [] feed_dict[var_name] = []
for feed_ins in feed: for feed_ins in feed:
for key in feed_ins: for key in feed_ins:
feed_dict[key].append(np.array(feed_ins[key]).reshape(list(self.feed_vars[key].shape))[np.newaxis,:]) feed_dict[key].append(
np.array(feed_ins[key]).reshape(
list(self.feed_vars[key].shape))[np.newaxis, :])
feed = {} feed = {}
for key in feed_dict: for key in feed_dict:
feed[key] = np.concatenate(feed_dict[key], axis=0) feed[key] = np.concatenate(feed_dict[key], axis=0)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册