From 8c7956ba002df8e529c8e908125937fffc2db7c6 Mon Sep 17 00:00:00 2001 From: MRXLT Date: Tue, 16 Jun 2020 11:26:37 +0800 Subject: [PATCH] encry --- .../include/fluid_cpu_engine.h | 64 +++++++++++++++++++ python/paddle_serving_server/__init__.py | 31 +++++++++ python/paddle_serving_server/serve.py | 35 +++++++++- 3 files changed, 129 insertions(+), 1 deletion(-) diff --git a/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h b/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h index a4d8dda7..6dd5534f 100644 --- a/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h +++ b/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h @@ -19,6 +19,7 @@ #include #include #include +#include "cipher_utils.h" // NOLINT #include "core/configure/include/configure_parser.h" #include "core/configure/inferencer_configure.pb.h" #include "core/predictor/framework/infer.h" @@ -531,6 +532,69 @@ class FluidCpuAnalysisDirWithSigmoidCore : public FluidCpuWithSigmoidCore { } }; +class FluidCpuAnalysisEncryCore : public FluidFamilyCore { + public: + int create(const predictor::InferEngineCreationParams& params) { + std::string data_path = params.get_path(); + if (access(data_path.c_str(), F_OK) == -1) { + LOG(ERROR) << "create paddle predictor failed, path note exits: " + << data_path; + return -1; + } + std::ifstream model_file(data_path + "encry_model", + std::ios::in | std::ios::binary); + std::string model_string; + if (model_file.is_open()) { + std::istreambuf_iterator begin(model_file), end; + model_string = std::string(begin, end); + model_file.close(); + } + std::ifstream params_file(data_path + "encry_params", + std::ios::in | std::ios::binary); + std::string params_string; + if (params_file.is_open()) { + std::istreambuf_iterator begin(params_file), end; + params_string = std::string(begin, end); + params_file.close(); + } + std::ifstream key_file(data_path + "key", std::ios::in | std::ios::binary); + std::string key_string; + if (key_file.is_open()) { + std::istreambuf_iterator begin(key_file), end; + key_string = std::string(begin, end); + key_file.close(); + } + + auto cipher = paddle::CipherFactory::CreateCipher(); + std::string real_model_string = cipher->Decrypt(model_string, key_string); + std::string real_params_string = cipher->Decrypt(params_string, key_string); + + const char* real_model_buffer = real_model_string.c_str(); + const char* real_params_buffer = real_params_string.c_str(); + + paddle::AnalysisConfig analysis_config; + analysis_config.SetModelBuffer(real_model_buffer, + real_model_string.size(), + real_params_buffer, + real_model_string.size()); + analysis_config.DisableGpu(); + analysis_config.SetCpuMathLibraryNumThreads(1); + + if (params.enable_memory_optimization()) { + analysis_config.EnableMemoryOptim(); + } + analysis_config.SwitchSpecifyInputNames(true); + AutoLock lock(GlobalPaddleCreateMutex::instance()); + _core = + paddle::CreatePaddlePredictor(analysis_config); + if (NULL == _core.get()) { + LOG(ERROR) << "create paddle predictor failed, path: " << data_path; + return -1; + } + VLOG(2) << "create paddle predictor sucess, path: " << data_path; + return 0; + } +}; } // namespace fluid_cpu } // namespace paddle_serving } // namespace baidu diff --git a/python/paddle_serving_server/__init__.py b/python/paddle_serving_server/__init__.py index 3a5c0701..5ee4bf9a 100644 --- a/python/paddle_serving_server/__init__.py +++ b/python/paddle_serving_server/__init__.py @@ -593,3 +593,34 @@ class MultiLangServer(object): server.start() p_bserver.join() server.wait_for_termination() + + +from BaseHTTPServer import BaseHTTPRequestHandler +import urllib +import json +from .serve import start_standard_model +import subprocess + + +class MainService(BaseHTTPRequestHandler): + def _set_headers(self): + self.send_response(200) + self.send_header('Content-type', 'application/json') + self.end_headers() + + def do_GET(self): + response = {'status': 'SUCCESS', 'data': 'hello from server'} + + self._set_headers() + self.wfile.write(json.dumps(response)) + + def do_POST(self): + path = self.path + print(path) + content_length = int(self.headers['Content-Length']) + post_data = self.rfile.read(content_length) + print(post_data) + p = subprocess.popen(start_standard_model) + response = {"endpoint_list": ["9292"]} + self._set_headers() + self.wfile.write(json.dumps(response)) diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index e75240df..baf125d1 100644 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -97,11 +97,44 @@ def start_standard_model(): # pylint: disable=doc-string-missing server.run_server() +from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer +import urllib +import json +import subprocess + + +class MainService(BaseHTTPRequestHandler): + def _set_headers(self): + self.send_response(200) + self.send_header('Content-type', 'application/json') + self.end_headers() + + def do_GET(self): + response = {'status': 'SUCCESS', 'data': 'hello from server'} + + self._set_headers() + self.wfile.write(json.dumps(response)) + + def do_POST(self): + path = self.path + print(path) + content_length = int(self.headers['Content-Length']) + post_data = self.rfile.read(content_length) + print(post_data) + p = subprocess.popen(start_standard_model) + response = {"endpoint_list": ["9292"]} + self._set_headers() + self.wfile.write(json.dumps(response)) + + if __name__ == "__main__": args = parse_args() if args.name == "None": - start_standard_model() + #start_standard_model() + server = HTTPServer(('', int(args.port)), MainService) + server.serve_forever() + else: service = WebService(name=args.name) service.load_model_config(args.model) -- GitLab