diff --git a/core/configure/proto/server_configure.proto b/core/configure/proto/server_configure.proto index 8956022685090c94be2037445c646e9fbffd1a5c..de32637b2a523df1a8d8cd2e28dcf29e79ff96dc 100644 --- a/core/configure/proto/server_configure.proto +++ b/core/configure/proto/server_configure.proto @@ -58,6 +58,8 @@ message ResourceConf { optional string cube_config_path = 5; optional string cube_config_file = 6; optional int32 cube_quant_bits = 7; // set 0 if no quant. + optional string auth_product_name = 8; + optional string auth_container_id = 9; }; // DAG node depency info diff --git a/core/cube/cube-api/include/meta.h b/core/cube/cube-api/include/meta.h index 69bbb8ccc12e423d286183ed5dd87e90bf2e59de..ec872a38d8b0294f7b06e8557848f6e8ca79aa2b 100644 --- a/core/cube/cube-api/include/meta.h +++ b/core/cube/cube-api/include/meta.h @@ -22,7 +22,8 @@ #ifdef BCLOUD #include "baidu/rpc/channel.h" #include "baidu/rpc/parallel_channel.h" -#include "rapidjson/document.h" +#include "rapidjson_1.0/document.h" +#include "rapidjson_1.0/rapidjson.h" #else #include "brpc/channel.h" #include "brpc/parallel_channel.h" diff --git a/core/general-client/src/general_model.cpp b/core/general-client/src/general_model.cpp index 9f709c71045577f7b043777a7ad1528a0e2ccc28..5cd03394520ed49aa8c81307a6b94923300f0cb4 100644 --- a/core/general-client/src/general_model.cpp +++ b/core/general-client/src/general_model.cpp @@ -39,7 +39,9 @@ using configure::GeneralModelConfig; void PredictorClient::init_gflags(std::vector argv) { std::call_once(gflags_init_flag, [&]() { +#ifndef BCLOUD FLAGS_logtostderr = true; +#endif argv.insert(argv.begin(), "dummy"); int argc = argv.size(); char **arr = new char *[argv.size()]; diff --git a/core/general-server/op/general_copy_op.h b/core/general-server/op/general_copy_op.h index 89627ffb9e4d15bbcbfa6c7fc3a608ada03dad6e..9b4caadc6a82f1f1a601ab66394b3f629af703ff 100644 --- a/core/general-server/op/general_copy_op.h +++ b/core/general-server/op/general_copy_op.h @@ -13,20 +13,12 @@ // limitations under the License. #pragma once -#include -#ifdef BCLOUD -#ifdef WITH_GPU -#include "paddle/paddle_inference_api.h" -#else -#include "paddle/fluid/inference/api/paddle_inference_api.h" -#endif -#else -#include "paddle_inference_api.h" // NOLINT -#endif #include +#include #include "core/general-server/general_model_service.pb.h" #include "core/general-server/op/general_infer_helper.h" #include "core/predictor/framework/resource.h" +#include "paddle_inference_api.h" // NOLINT namespace baidu { namespace paddle_serving { diff --git a/core/general-server/op/general_dist_kv_infer_op.h b/core/general-server/op/general_dist_kv_infer_op.h index 2dee5bca6f9e12dbb8b36a6c39aa0a8e77763d23..56d19ee366feaf000d7b24f4017b39155b7e65c1 100644 --- a/core/general-server/op/general_dist_kv_infer_op.h +++ b/core/general-server/op/general_dist_kv_infer_op.h @@ -15,17 +15,9 @@ #pragma once #include #include -#ifdef BCLOUD -#ifdef WITH_GPU -#include "paddle/paddle_inference_api.h" -#else -#include "paddle/fluid/inference/api/paddle_inference_api.h" -#endif -#else -#include "paddle_inference_api.h" // NOLINT -#endif #include "core/general-server/general_model_service.pb.h" #include "core/general-server/op/general_infer_helper.h" +#include "paddle_inference_api.h" // NOLINT namespace baidu { namespace paddle_serving { diff --git a/core/general-server/op/general_dist_kv_quant_infer_op.h b/core/general-server/op/general_dist_kv_quant_infer_op.h index e153311a2a2e2df1bd12720e2ce6cbe9ddb31ec0..0f99e2072374bc4bc0b76a1ca876a152f98488b6 100644 --- a/core/general-server/op/general_dist_kv_quant_infer_op.h +++ b/core/general-server/op/general_dist_kv_quant_infer_op.h @@ -15,17 +15,9 @@ #pragma once #include #include -#ifdef BCLOUD -#ifdef WITH_GPU -#include "paddle/paddle_inference_api.h" -#else -#include "paddle/fluid/inference/api/paddle_inference_api.h" -#endif -#else -#include "paddle_inference_api.h" // NOLINT -#endif #include "core/general-server/general_model_service.pb.h" #include "core/general-server/op/general_infer_helper.h" +#include "paddle_inference_api.h" // NOLINT namespace baidu { namespace paddle_serving { diff --git a/core/general-server/op/general_infer_helper.h b/core/general-server/op/general_infer_helper.h index 4fa1995664a2dca449ebc228079c86919a32d328..520ee77e23276d819e641ccfab8e4eec5ac87a0c 100644 --- a/core/general-server/op/general_infer_helper.h +++ b/core/general-server/op/general_infer_helper.h @@ -15,17 +15,9 @@ #pragma once #include +#include #include -#ifdef BCLOUD -#ifdef WITH_GPU -#include "paddle/paddle_inference_api.h" -#else -#include "paddle/fluid/inference/api/paddle_inference_api.h" -#endif -#else #include "paddle_inference_api.h" // NOLINT -#endif -#include namespace baidu { namespace paddle_serving { diff --git a/core/general-server/op/general_infer_op.cpp b/core/general-server/op/general_infer_op.cpp index a9ff2e7226b25842889e391d82217b3b6a140170..d83528a279188d463b8a273930b13de51da43f24 100644 --- a/core/general-server/op/general_infer_op.cpp +++ b/core/general-server/op/general_infer_op.cpp @@ -57,10 +57,10 @@ int GeneralInferOp::inference() { const TensorVector *in = &input_blob->tensor_vector; TensorVector *out = &output_blob->tensor_vector; - int batch_size = input_blob->GetBatchSize(); + int batch_size = input_blob->_batch_size; VLOG(2) << "input batch size: " << batch_size; - output_blob->SetBatchSize(batch_size); + output_blob->_batch_size = batch_size; VLOG(2) << "infer batch size: " << batch_size; diff --git a/core/general-server/op/general_infer_op.h b/core/general-server/op/general_infer_op.h index ff0b210ad7c6824a7e8a61e9ac504a65eafa4c58..b41784185ff445c540774b8b24ef897caf6fbf96 100644 --- a/core/general-server/op/general_infer_op.h +++ b/core/general-server/op/general_infer_op.h @@ -15,17 +15,9 @@ #pragma once #include #include -#ifdef BCLOUD -#ifdef WITH_GPU -#include "paddle/paddle_inference_api.h" -#else -#include "paddle/fluid/inference/api/paddle_inference_api.h" -#endif -#else -#include "paddle_inference_api.h" // NOLINT -#endif #include "core/general-server/general_model_service.pb.h" #include "core/general-server/op/general_infer_helper.h" +#include "paddle_inference_api.h" // NOLINT namespace baidu { namespace paddle_serving { diff --git a/core/general-server/op/general_reader_op.cpp b/core/general-server/op/general_reader_op.cpp index 380f861606a7719a33407dd946c5ac476629fdb7..c8721f09ad099ebe6b3a8816272107fa9737cd81 100644 --- a/core/general-server/op/general_reader_op.cpp +++ b/core/general-server/op/general_reader_op.cpp @@ -72,7 +72,9 @@ int conf_check(const Request *req, int GeneralReaderOp::inference() { // reade request from client const Request *req = dynamic_cast(get_request_message()); - + VLOG(2) << "start to call load general model_conf op"; + baidu::paddle_serving::predictor::Resource &resource = + baidu::paddle_serving::predictor::Resource::instance(); int batch_size = req->insts_size(); int input_var_num = 0; std::vector elem_type; @@ -82,8 +84,6 @@ int GeneralReaderOp::inference() { GeneralBlob *res = mutable_data(); TensorVector *out = &res->tensor_vector; - res->SetBatchSize(batch_size); - if (!res) { LOG(ERROR) << "Failed get op tls reader object output"; } @@ -93,10 +93,6 @@ int GeneralReaderOp::inference() { int var_num = req->insts(0).tensor_array_size(); VLOG(2) << "var num: " << var_num; - VLOG(2) << "start to call load general model_conf op"; - baidu::paddle_serving::predictor::Resource &resource = - baidu::paddle_serving::predictor::Resource::instance(); - VLOG(2) << "get resource pointer done."; std::shared_ptr model_config = resource.get_general_model_config(); @@ -257,6 +253,7 @@ int GeneralReaderOp::inference() { timeline.Pause(); int64_t end = timeline.TimeStampUS(); res->p_size = 0; + res->_batch_size = batch_size; AddBlobInfo(res, start); AddBlobInfo(res, end); diff --git a/core/general-server/op/general_reader_op.h b/core/general-server/op/general_reader_op.h index c45d6ad5139a7a9a267f1c6556028a99295500de..cb9693982ff659214dd21ff09f189f86b6b3a339 100644 --- a/core/general-server/op/general_reader_op.h +++ b/core/general-server/op/general_reader_op.h @@ -13,21 +13,13 @@ // limitations under the License. #pragma once -#include -#ifdef BCLOUD -#ifdef WITH_GPU -#include "paddle/paddle_inference_api.h" -#else -#include "paddle/fluid/inference/api/paddle_inference_api.h" -#endif -#else -#include "paddle_inference_api.h" // NOLINT -#endif #include +#include #include "core/general-server/general_model_service.pb.h" #include "core/general-server/load_general_model_service.pb.h" #include "core/general-server/op/general_infer_helper.h" #include "core/predictor/framework/resource.h" +#include "paddle_inference_api.h" // NOLINT namespace baidu { namespace paddle_serving { diff --git a/core/general-server/op/general_response_op.h b/core/general-server/op/general_response_op.h index 4b0f6ed17b5a66dbda7bccef25cec03bf044e6c5..0f72b8f98df336dd515560129a8cfd27650738bb 100644 --- a/core/general-server/op/general_response_op.h +++ b/core/general-server/op/general_response_op.h @@ -15,16 +15,8 @@ #pragma once #include #include -#ifdef BCLOUD -#ifdef WITH_GPU -#include "paddle/paddle_inference_api.h" -#else -#include "paddle/fluid/inference/api/paddle_inference_api.h" -#endif -#else -#include "paddle_inference_api.h" // NOLINT -#endif #include "core/general-server/general_model_service.pb.h" +#include "paddle_inference_api.h" // NOLINT namespace baidu { namespace paddle_serving { diff --git a/core/general-server/op/general_text_reader_op.h b/core/general-server/op/general_text_reader_op.h index ca134256fce4aaa003f4b07033d4c471ebdb59b7..af822993dc37fae23c1fa584d640cbfe8d9950c8 100644 --- a/core/general-server/op/general_text_reader_op.h +++ b/core/general-server/op/general_text_reader_op.h @@ -13,21 +13,13 @@ // limitations under the License. #pragma once -#include -#ifdef BCLOUD -#ifdef WITH_GPU -#include "paddle/paddle_inference_api.h" -#else -#include "paddle/fluid/inference/api/paddle_inference_api.h" -#endif -#else -#include "paddle_inference_api.h" // NOLINT -#endif #include +#include #include "core/general-server/general_model_service.pb.h" #include "core/general-server/load_general_model_service.pb.h" #include "core/general-server/op/general_infer_helper.h" #include "core/predictor/framework/resource.h" +#include "paddle_inference_api.h" // NOLINT namespace baidu { namespace paddle_serving { diff --git a/core/general-server/op/general_text_response_op.h b/core/general-server/op/general_text_response_op.h index 52f7bbf0f7d76122bad14cf513302f70c35aa1d8..334d98476e67f745635f7d66d7b8682de62da355 100644 --- a/core/general-server/op/general_text_response_op.h +++ b/core/general-server/op/general_text_response_op.h @@ -15,17 +15,9 @@ #pragma once #include #include -#ifdef BCLOUD -#ifdef WITH_GPU -#include "paddle/paddle_inference_api.h" -#else -#include "paddle/fluid/inference/api/paddle_inference_api.h" -#endif -#else -#include "paddle_inference_api.h" // NOLINT -#endif #include "core/general-server/general_model_service.pb.h" #include "core/general-server/op/general_infer_helper.h" +#include "paddle_inference_api.h" // NOLINT namespace baidu { namespace paddle_serving { diff --git a/core/predictor/common/inner_common.h b/core/predictor/common/inner_common.h index 96b8a8027070da559e239cdc5f6057d534ff3412..f6847146ba14b2b9fc1b07485c748e6e8300d7bd 100644 --- a/core/predictor/common/inner_common.h +++ b/core/predictor/common/inner_common.h @@ -50,7 +50,7 @@ #include "butil/time.h" #endif -#include "glog/raw_logging.h" +#define ERROR_STRING_LEN 10240 #include "core/configure/general_model_config.pb.h" #include "core/configure/include/configure_parser.h" diff --git a/core/predictor/framework/factory.h b/core/predictor/framework/factory.h index 8d5fc9a1c40b047351f38a1136728ee179a191ed..826b1134c1310cce97812d5d9840b40297ec67a3 100644 --- a/core/predictor/framework/factory.h +++ b/core/predictor/framework/factory.h @@ -17,21 +17,24 @@ #include #include #include "core/predictor/common/inner_common.h" -#include "glog/raw_logging.h" + namespace baidu { namespace paddle_serving { namespace predictor { //////////////// DECLARE INTERFACE //////////////// -#define DECLARE_FACTORY_OBJECT(D, B) \ - static int regist(const std::string& tag) { \ - FactoryDerive* factory = new (std::nothrow) FactoryDerive(); \ - if (factory == NULL || \ - FactoryPool::instance().register_factory(tag, factory) != 0) { \ - RAW_LOG_FATAL("Failed regist factory: %s in macro!", #D); \ - return -1; \ - } \ - return 0; \ +#define DECLARE_FACTORY_OBJECT(D, B) \ + static int regist(const std::string& tag) { \ + FactoryDerive* factory = new (std::nothrow) FactoryDerive();\ + if (factory == NULL || \ + FactoryPool::instance().register_factory(tag, factory) != 0) { \ + char err_str[ERROR_STRING_LEN]; \ + snprintf(err_str, ERROR_STRING_LEN - 1, \ + "Failed regist factory: %s in macro!", #D); \ + RAW_LOG(FATAL, err_str); \ + return -1; \ + } \ + return 0; \ } #define PDS_STR_CAT(a, b) PDS_STR_CAT_I(a, b) @@ -54,7 +57,10 @@ namespace predictor { if (factory == NULL || \ ::baidu::paddle_serving::predictor::FactoryPool::instance() \ .register_factory(#D, factory) != 0) { \ - RAW_LOG_FATAL("Failed regist factory: %s->%s in macro!", #D, #B); \ + char err_str[ERROR_STRING_LEN]; \ + snprintf(err_str, ERROR_STRING_LEN - 1, \ + "Failed regist factory: %s->%s in macro!", #D, #B); \ + RAW_LOG(FATAL, err_str); \ return; \ } \ return; \ @@ -66,15 +72,18 @@ namespace predictor { ::baidu::paddle_serving::predictor::FactoryDerive* factory = new ( \ ::std::nothrow)::baidu::paddle_serving::predictor::FactoryDerive(); \ + char err_str[ERROR_STRING_LEN]; \ if (factory == NULL || \ ::baidu::paddle_serving::predictor::FactoryPool::instance() \ .register_factory(N, factory) != 0) { \ - RAW_LOG_FATAL( \ - "Failed regist factory: %s->%s, tag: %s in macro!", #D, #B, N); \ + snprintf(err_str, ERROR_STRING_LEN - 1, \ + "Failed regist factory: %s->%s, tag: %s in macro!", #D, #B, N); \ + RAW_LOG(FATAL, err_str); \ return; \ } \ - RAW_LOG_WARNING( \ - "Succ regist factory: %s->%s, tag: %s in macro!", #D, #B, N); \ + snprintf(err_str, ERROR_STRING_LEN - 1, \ + "Succ regist factory: %s->%s, tag: %s in macro!", #D, #B, N); \ + RAW_LOG(WARNING, err_str); \ return; \ } @@ -102,24 +111,29 @@ class FactoryPool { } int register_factory(const std::string& tag, FactoryBase* factory) { + char err_str[ERROR_STRING_LEN]; typename std::map*>::iterator it = _pool.find(tag); if (it != _pool.end()) { - RAW_LOG_FATAL("Insert duplicate with tag: %s", tag.c_str()); + snprintf(err_str, ERROR_STRING_LEN - 1, + "Insert duplicate with tag: %s", tag.c_str()); + RAW_LOG(FATAL, err_str); return -1; } std::pair*>::iterator, bool> r = _pool.insert(std::make_pair(tag, factory)); if (!r.second) { - RAW_LOG_FATAL("Failed insert new factory with: %s", tag.c_str()); + snprintf(err_str, ERROR_STRING_LEN - 1, + "Failed insert new factory with: %s", tag.c_str()); + RAW_LOG(FATAL, err_str); return -1; } - - RAW_LOG_INFO("Succ insert one factory, tag: %s, base type %s", - tag.c_str(), - typeid(B).name()); - + + snprintf(err_str, ERROR_STRING_LEN - 1, + "Succ insert one factory, tag: %s, base type %s", tag.c_str(), + typeid(B).name()); + RAW_LOG(INFO, err_str); return 0; } @@ -127,9 +141,11 @@ class FactoryPool { typename std::map*>::iterator it = _pool.find(tag); if (it == _pool.end() || it->second == NULL) { - RAW_LOG_FATAL("Not found factory pool, tag: %s, pool size %u", - tag.c_str(), - _pool.size()); + char err_str[ERROR_STRING_LEN]; + snprintf(err_str, ERROR_STRING_LEN - 1, + "Not found factory pool, tag: %s, pool size %u", tag.c_str(), + _pool.size()); + RAW_LOG(FATAL, err_str); return NULL; } diff --git a/core/predictor/framework/infer.h b/core/predictor/framework/infer.h index 51cfb95a8d56d4261b9dab99df5216c5e6c79733..1cff7647e2dbbcc8df4d144f81488fde35aeb798 100644 --- a/core/predictor/framework/infer.h +++ b/core/predictor/framework/infer.h @@ -603,6 +603,7 @@ class VersionedInferEngine : public InferEngine { LOG(ERROR) << "Failed generate engine with type:" << engine_type; return -1; } +#ifndef BCLOUD VLOG(2) << "FLAGS_logtostderr " << FLAGS_logtostderr; int tmp = FLAGS_logtostderr; if (engine->proc_initialize(conf, version) != 0) { @@ -611,6 +612,12 @@ class VersionedInferEngine : public InferEngine { } VLOG(2) << "FLAGS_logtostderr " << FLAGS_logtostderr; FLAGS_logtostderr = tmp; +#else + if (engine->proc_initialize(conf, version) != 0) { + LOG(ERROR) << "Failed initialize engine, type:" << engine_type; + return -1; + } +#endif auto r = _versions.insert(std::make_pair(engine->version(), engine)); if (!r.second) { LOG(ERROR) << "Failed insert item: " << engine->version() diff --git a/core/predictor/framework/op_repository.h b/core/predictor/framework/op_repository.h index d27e68c1dbcd98e7393aac6e8b0f001e7300a2bc..dcd0b13438c9d040336922c03dbbfe2c9dbccf8d 100644 --- a/core/predictor/framework/op_repository.h +++ b/core/predictor/framework/op_repository.h @@ -62,7 +62,10 @@ class OpRepository { template void regist_op(std::string op_type) { _repository[op_type] = &OpFactory::instance(); - RAW_LOG_INFO("Succ regist op: %s", op_type.c_str()); + char err_str[ERROR_STRING_LEN]; + snprintf(err_str, ERROR_STRING_LEN - 1, "Succ regist op: %s", + op_type.c_str()); + RAW_LOG(INFO, err_str); } Op* get_op(std::string op_type); diff --git a/core/predictor/framework/resource.cpp b/core/predictor/framework/resource.cpp index ca219519e2dcf20bc961d991e3f2eb0ad060f38f..cdb21097fdf40ca6060d99088ed5649a08507720 100644 --- a/core/predictor/framework/resource.cpp +++ b/core/predictor/framework/resource.cpp @@ -17,6 +17,9 @@ #include #include "core/predictor/common/inner_common.h" #include "core/predictor/framework/kv_manager.h" +#ifdef BCLOUD +#include "aipe_sec_client.h" // NOLINT +#endif namespace baidu { namespace paddle_serving { namespace predictor { @@ -109,6 +112,42 @@ int Resource::initialize(const std::string& path, const std::string& file) { } LOG(WARNING) << "Successfully proc initialized mempool wrapper"; +#ifdef WITH_AUTH + std::string product_name_str = resource_conf.auth_product_name(); + std::string container_id_str = resource_conf.auth_container_id(); + + char* product_name = new char[product_name_str.size() + 1]; + snprintf(product_name, + product_name_str.size() + 1, + "%s", + product_name_str.c_str()); + char* container_id = new char[container_id_str.size() + 1]; + snprintf(container_id, + container_id_str.size() + 1, + "%s", + container_id_str.c_str()); + + aipe_auth_request request; + request.product_name = product_name; + request.container_id = container_id; + request.request_ts = (int64_t)time(NULL); + + LOG(INFO) << "\nEasypack info" + << "\nproduct name: " << request.product_name + << "\ncontainer_id: " << request.container_id + << "\nrequest time stamp: " << request.request_ts; + + aipe_auth_response response; + response = check_auth(request); + + if (response.result == 0) { + LOG(INFO) << "Authentication succeed."; + } else { + LOG(ERROR) << "Authentication failed. Error code: " << response.result; + return -1; + } +#endif + if (FLAGS_enable_model_toolkit) { int err = 0; std::string model_toolkit_path = resource_conf.model_toolkit_path(); diff --git a/core/predictor/framework/service_manager.h b/core/predictor/framework/service_manager.h index fa5e872625739ce233d7dd5efe11e1a0fa61d49d..a12f13cd67ffde9cad810c7755900f224cd0c07f 100644 --- a/core/predictor/framework/service_manager.h +++ b/core/predictor/framework/service_manager.h @@ -21,20 +21,23 @@ namespace baidu { namespace paddle_serving { namespace predictor { -#define REGIST_FORMAT_SERVICE(svr_name, svr) \ - do { \ - int ret = \ - ::baidu::paddle_serving::predictor::FormatServiceManager::instance() \ - .regist_service(svr_name, svr); \ - if (ret != 0) { \ - RAW_LOG_ERROR("Failed regist service[%s][%s]", \ - svr_name.c_str(), \ - typeid(svr).name()); \ - } else { \ - RAW_LOG_INFO("Success regist service[%s][%s]", \ - svr_name.c_str(), \ - typeid(svr).name()); \ - } \ +#define REGIST_FORMAT_SERVICE(svr_name, svr) \ + do { \ + char err_str[ERROR_STRING_LEN]; \ + int ret = \ + ::baidu::paddle_serving::predictor::FormatServiceManager::instance() \ + .regist_service(svr_name, svr); \ + if (ret != 0) { \ + snprintf(err_str, ERROR_STRING_LEN - 1, "Failed regist service[%s][%s]", \ + svr_name.c_str(), \ + typeid(svr).name()); \ + RAW_LOG(ERROR, err_str); \ + } else { \ + snprintf(err_str, ERROR_STRING_LEN - 1, "Success regist service[%s][%s]", \ + svr_name.c_str(), \ + typeid(svr).name()); \ + RAW_LOG(INFO, err_str); \ + } \ } while (0) class FormatServiceManager { @@ -42,31 +45,38 @@ class FormatServiceManager { typedef google::protobuf::Service Service; int regist_service(const std::string& svr_name, Service* svr) { + char err_str[ERROR_STRING_LEN]; if (_service_map.find(svr_name) != _service_map.end()) { - RAW_LOG_ERROR("Service[%s][%s] already exist!", - svr_name.c_str(), - typeid(svr).name()); + snprintf(err_str, ERROR_STRING_LEN - 1, "Service[%s][%s] already exist!", + svr_name.c_str(), + typeid(svr).name()); + RAW_LOG(ERROR, err_str); return -1; } std::pair::iterator, bool> ret; ret = _service_map.insert(std::make_pair(svr_name, svr)); if (ret.second == false) { - RAW_LOG_ERROR("Service[%s][%s] insert failed!", - svr_name.c_str(), - typeid(svr).name()); + snprintf(err_str, ERROR_STRING_LEN - 1, "Service[%s][%s] insert failed!", + svr_name.c_str(), + typeid(svr).name()); + RAW_LOG(ERROR, err_str); return -1; } - RAW_LOG_INFO("Service[%s] insert successfully!", svr_name.c_str()); + snprintf(err_str, ERROR_STRING_LEN - 1, "Service[%s] insert successfully!", + svr_name.c_str()); + RAW_LOG(INFO, err_str); return 0; } Service* get_service(const std::string& svr_name) { + char err_str[ERROR_STRING_LEN]; boost::unordered_map::iterator res; if ((res = _service_map.find(svr_name)) == _service_map.end()) { - RAW_LOG_WARNING("Service[%s] not found in service manager!", - svr_name.c_str()); + snprintf(err_str, ERROR_STRING_LEN - 1, + "Service[%s] not found in service manager!", svr_name.c_str()); + RAW_LOG(WARNING, err_str); return NULL; } return (*res).second; diff --git a/core/predictor/src/pdserving.cpp b/core/predictor/src/pdserving.cpp index 157d52cee1adaea0524ebde01f75a90a6b2adc2f..59ec59d9012c94c322eee2ab3f357218deeedbb4 100644 --- a/core/predictor/src/pdserving.cpp +++ b/core/predictor/src/pdserving.cpp @@ -202,8 +202,6 @@ int main(int argc, char** argv) { } VLOG(2) << "Succ call pthread worker start function"; -#ifndef BCLOUD - if (Resource::instance().general_model_initialize(FLAGS_resource_path, FLAGS_resource_file) != 0) { LOG(ERROR) << "Failed to initialize general model conf: " @@ -213,6 +211,7 @@ int main(int argc, char** argv) { VLOG(2) << "Succ initialize general model"; +#ifndef BCLOUD // FATAL messages are output to stderr FLAGS_stderrthreshold = 3; #endif diff --git a/core/sdk-cpp/include/abtest.h b/core/sdk-cpp/include/abtest.h index 4833325416cfd6418bf33444001917d887f08cc0..53be4dffece35b2cd5886fd94ab60da58db464bd 100644 --- a/core/sdk-cpp/include/abtest.h +++ b/core/sdk-cpp/include/abtest.h @@ -50,9 +50,9 @@ class WeightedRandomRender : public EndpointRouterBase { Factory* factory = new (std::nothrow) Factory(); if (factory == NULL) { - RAW_LOG_ERROR( - "Failed regist factory: WeightedRandomRender->EndpointRouterBase in " - "macro!"); + RAW_LOG(ERROR, + "Failed regist factory: WeightedRandomRender->EndpointRouterBase \ + in macro!"); return -1; } @@ -62,9 +62,8 @@ class WeightedRandomRender : public EndpointRouterBase { // together. if (FactoryPool::instance().register_factory( "WeightedRandomRender", factory) != 0) { - RAW_LOG_INFO( - "Factory has been registed: " - "WeightedRandomRender->EndpointRouterBase."); + RAW_LOG(INFO, "Factory has been registed: \ + WeightedRandomRender->EndpointRouterBase."); } return 0; diff --git a/core/sdk-cpp/include/factory.h b/core/sdk-cpp/include/factory.h index 4a3d988afcd981dd92eca5f65c3f254d5f2255d5..86935b936d1a81c130501b386656e8eb5a4247b7 100644 --- a/core/sdk-cpp/include/factory.h +++ b/core/sdk-cpp/include/factory.h @@ -18,7 +18,6 @@ #include #include "core/sdk-cpp/include/common.h" #include "core/sdk-cpp/include/stub_impl.h" -#include "glog/raw_logging.h" namespace baidu { namespace paddle_serving { @@ -28,25 +27,33 @@ namespace sdk_cpp { namespace brpc = baidu::rpc; #endif -#define INLINE_REGIST_OBJECT(D, B, E) \ - do { \ - Factory* factory = new (std::nothrow) Factory(); \ - if (factory == NULL || \ - FactoryPool::instance().register_factory(#D, factory) != 0) { \ - RAW_LOG_ERROR("Failed regist factory: %s->%s in macro!", #D, #B); \ - return E; \ - } \ +#define ERROR_STRING_LEN 10240 + +#define INLINE_REGIST_OBJECT(D, B, E) \ + do { \ + Factory* factory = new (std::nothrow) Factory(); \ + if (factory == NULL || \ + FactoryPool::instance().register_factory(#D, factory) != 0) {\ + char err_str[ERROR_STRING_LEN]; \ + snprintf(err_str, ERROR_STRING_LEN - 1, \ + "Failed regist factory: %s->%s in macro!", #D, #B); \ + RAW_LOG(ERROR, err_str); \ + return E; \ + } \ } while (0) -#define DECLARE_FACTORY_OBJECT(D, B) \ - static int regist(const std::string& tag) { \ - Factory* factory = new (std::nothrow) Factory(); \ - if (factory == NULL || \ - FactoryPool::instance().register_factory(tag, factory) != 0) { \ - RAW_LOG_ERROR("Failed regist factory: %s in macro!", #D); \ - return -1; \ - } \ - return 0; \ +#define DECLARE_FACTORY_OBJECT(D, B) \ + static int regist(const std::string& tag) { \ + Factory* factory = new (std::nothrow) Factory(); \ + if (factory == NULL || \ + FactoryPool::instance().register_factory(tag, factory) != 0) { \ + char err_str[ERROR_STRING_LEN]; \ + snprintf(err_str, ERROR_STRING_LEN - 1, \ + "Failed regist factory: %s in macro!", #D); \ + RAW_LOG(ERROR, err_str); \ + return -1; \ + } \ + return 0; \ } #define PDS_STR_CAT(a, b) PDS_STR_CAT_I(a, b) @@ -58,34 +65,39 @@ namespace brpc = baidu::rpc; D::regist(#D); \ } -#define REGIST_FACTORY_OBJECT_IMPL(D, B) \ - __attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \ - __LINE__)(void) { \ - ::baidu::paddle_serving::sdk_cpp::Factory* factory = \ - new (::std::nothrow)::baidu::paddle_serving::sdk_cpp::Factory(); \ - if (factory == NULL || \ - ::baidu::paddle_serving::sdk_cpp::FactoryPool::instance() \ - .register_factory(#D, factory) != 0) { \ - RAW_LOG_ERROR("Failed regist factory: %s->%s in macro!", #D, #B); \ - return; \ - } \ - return; \ +#define REGIST_FACTORY_OBJECT_IMPL(D, B) \ + __attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \ + __LINE__)(void) { \ + ::baidu::paddle_serving::sdk_cpp::Factory* factory = \ + new (::std::nothrow)::baidu::paddle_serving::sdk_cpp::Factory();\ + if (factory == NULL || \ + ::baidu::paddle_serving::sdk_cpp::FactoryPool::instance() \ + .register_factory(#D, factory) != 0) { \ + char err_str[ERROR_STRING_LEN]; \ + snprintf(err_str, ERROR_STRING_LEN - 1, \ + "Failed regist factory: %s->%s in macro!", #D, #B); \ + RAW_LOG(ERROR, err_str); \ + return; \ + } \ + return; \ } -#define REGIST_FACTORY_OBJECT_IMPL_WITH_TAG(D, B, T) \ - __attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \ - __LINE__)(void) { \ - ::baidu::paddle_serving::sdk_cpp::Factory* factory = \ - new (::std::nothrow)::baidu::paddle_serving::sdk_cpp::Factory(); \ - if (factory == NULL || \ - ::baidu::paddle_serving::sdk_cpp::FactoryPool::instance() \ - .register_factory(T, factory) != 0) { \ - RAW_LOG_ERROR( \ - "Failed regist factory: %s->%s, tag %s in macro!", #D, #B, T); \ - return; \ - } \ - return; \ - } +#define REGIST_FACTORY_OBJECT_IMPL_WITH_TAG(D, B, T) \ + __attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \ + __LINE__)(void) { \ + ::baidu::paddle_serving::sdk_cpp::Factory* factory = \ + new (::std::nothrow)::baidu::paddle_serving::sdk_cpp::Factory();\ + if (factory == NULL || \ + ::baidu::paddle_serving::sdk_cpp::FactoryPool::instance() \ + .register_factory(T, factory) != 0) { \ + char err_str[ERROR_STRING_LEN]; \ + snprintf(err_str, ERROR_STRING_LEN - 1, \ + "Failed regist factory: %s->%s, tag %s in macro!", #D, #B, T); \ + RAW_LOG(ERROR, err_str); \ + return; \ + } \ + return; \ + } #define REGIST_ABTEST_OBJECT(D) \ REGIST_FACTORY_OBJECT_IMPL( \ @@ -95,24 +107,26 @@ namespace brpc = baidu::rpc; REGIST_FACTORY_OBJECT_IMPL_WITH_TAG( \ D, ::baidu::paddle_serving::sdk_cpp::ABTestRouterBase, T) -#define REGIST_STUB_OBJECT_WITH_TAG(D, C, R, I, O, T) \ - __attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \ - __LINE__)(void) { \ - ::baidu::paddle_serving::sdk_cpp::Factory< \ - ::baidu::paddle_serving::sdk_cpp::StubImpl, \ - ::baidu::paddle_serving::sdk_cpp::Stub>* factory = \ - new (::std::nothrow)::baidu::paddle_serving::sdk_cpp::Factory< \ - ::baidu::paddle_serving::sdk_cpp::StubImpl, \ - ::baidu::paddle_serving::sdk_cpp::Stub>(); \ - if (factory == NULL || \ - ::baidu::paddle_serving::sdk_cpp::FactoryPool< \ - ::baidu::paddle_serving::sdk_cpp::Stub>::instance() \ - .register_factory(T, factory) != 0) { \ - RAW_LOG_ERROR( \ - "Failed regist factory: %s->Stub, tag: %s in macro!", #D, T); \ - return; \ - } \ - return; \ +#define REGIST_STUB_OBJECT_WITH_TAG(D, C, R, I, O, T) \ + __attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \ + __LINE__)(void) { \ + ::baidu::paddle_serving::sdk_cpp::Factory< \ + ::baidu::paddle_serving::sdk_cpp::StubImpl, \ + ::baidu::paddle_serving::sdk_cpp::Stub>* factory = \ + new (::std::nothrow)::baidu::paddle_serving::sdk_cpp::Factory< \ + ::baidu::paddle_serving::sdk_cpp::StubImpl, \ + ::baidu::paddle_serving::sdk_cpp::Stub>(); \ + if (factory == NULL || \ + ::baidu::paddle_serving::sdk_cpp::FactoryPool< \ + ::baidu::paddle_serving::sdk_cpp::Stub>::instance() \ + .register_factory(T, factory) != 0) { \ + char err_str[ERROR_STRING_LEN]; \ + snprintf(err_str, ERROR_STRING_LEN - 1, \ + "Failed regist factory: %s->Stub, tag: %s in macro!", #D, T); \ + RAW_LOG(ERROR, err_str); \ + return; \ + } \ + return; \ } class Stub; @@ -146,14 +160,20 @@ class FactoryPool { typename std::map*>::iterator it = _pool.find(tag); if (it != _pool.end()) { - RAW_LOG_ERROR("Insert duplicate with tag: %s", tag.c_str()); + char err_str[ERROR_STRING_LEN]; + snprintf(err_str, ERROR_STRING_LEN - 1, + "Insert duplicate with tag: %s", tag.c_str()); + RAW_LOG(ERROR, err_str); return -1; } std::pair*>::iterator, bool> r = _pool.insert(std::make_pair(tag, factory)); if (!r.second) { - RAW_LOG_ERROR("Failed insert new factory with: %s", tag.c_str()); + char err_str[ERROR_STRING_LEN]; + snprintf(err_str, ERROR_STRING_LEN - 1, + "Failed insert new factory with: %s", tag.c_str()); + RAW_LOG(ERROR, err_str); return -1; } @@ -164,9 +184,11 @@ class FactoryPool { typename std::map*>::iterator it = _pool.find(tag); if (it == _pool.end() || it->second == NULL) { - RAW_LOG_ERROR("Not found factory pool, tag: %s, pool size: %u", - tag.c_str(), - _pool.size()); + char err_str[ERROR_STRING_LEN]; + snprintf(err_str, ERROR_STRING_LEN - 1, + "Not found factory pool, tag: %s, pool size: %u", + tag.c_str(), _pool.size()); + RAW_LOG(ERROR, err_str); return NULL; } diff --git a/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h b/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h index a4d8dda71a7977185106bb1552cb8f39ef6bc50e..f65711e04cf601e40f693b045adbaba0cf7ada71 100644 --- a/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h +++ b/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h @@ -23,6 +23,7 @@ #include "core/configure/inferencer_configure.pb.h" #include "core/predictor/framework/infer.h" #include "paddle_inference_api.h" // NOLINT +//#include "predictor/framework/infer.h" namespace baidu { namespace paddle_serving { diff --git a/python/paddle_serving_server/__init__.py b/python/paddle_serving_server/__init__.py index 678c0583d1e132791a1199e315ea380a4ae3108b..33d176b10dcb01188ebaa9bf1e6abd7abce08493 100644 --- a/python/paddle_serving_server/__init__.py +++ b/python/paddle_serving_server/__init__.py @@ -157,6 +157,8 @@ class Server(object): self.cur_path = os.getcwd() self.use_local_bin = False self.mkl_flag = False + self.product_name = None + self.container_id = None self.model_config_paths = None # for multi-model in a workflow def set_max_concurrency(self, concurrency): @@ -191,6 +193,16 @@ class Server(object): def set_ir_optimize(self, flag=False): self.ir_optimization = flag + def set_product_name(self, product_name=None): + if product_name == None: + raise ValueError("product_name can't be None.") + self.product_name = product_name + + def set_container_id(self, container_id): + if container_id == None: + raise ValueError("container_id can't be None.") + self.container_id = container_id + def check_local_bin(self): if "SERVING_BIN" in os.environ: self.use_local_bin = True @@ -254,6 +266,10 @@ class Server(object): self.resource_conf.model_toolkit_file = self.model_toolkit_fn self.resource_conf.general_model_path = workdir self.resource_conf.general_model_file = self.general_model_config_fn + if self.product_name != None: + self.resource_conf.auth_product_name = self.product_name + if self.container_id != None: + self.resource_conf.auth_container_id = self.container_id def _write_pb_str(self, filepath, pb_obj): with open(filepath, "w") as fout: @@ -540,7 +556,6 @@ class MultiLangServerServiceServicer(multi_lang_general_model_service_pb2_grpc. results, tag = ret resp.tag = tag resp.err_code = 0 - if not self.is_multi_model_: results = {'general_infer_0': results} for model_name, model_result in results.items(): @@ -560,7 +575,7 @@ class MultiLangServerServiceServicer(multi_lang_general_model_service_pb2_grpc. .tolist()) elif v_type == 2: # int32 tensor.int_data.extend(model_result[name].reshape(-1) - .tolist()) + .tolist()) else: raise Exception("error type.") tensor.shape.extend(list(model_result[name].shape)) diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index 704cf0304adf1ac647c244063c2b23049f92b221..d282ac076e377806e9a3b320b880ffed6300b971 100644 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -58,6 +58,16 @@ def parse_args(): # pylint: disable=doc-string-missing default=False, action="store_true", help="Use Multi-language-service") + parser.add_argument( + "--product_name", + type=str, + default=None, + help="product_name for authentication") + parser.add_argument( + "--container_id", + type=str, + default=None, + help="container_id for authentication") return parser.parse_args() @@ -101,6 +111,10 @@ def start_standard_model(): # pylint: disable=doc-string-missing server.use_mkl(use_mkl) server.set_max_body_size(max_body_size) server.set_port(port) + if args.product_name != None: + server.set_product_name(args.product_name) + if args.container_id != None: + server.set_container_id(args.container_id) server.load_model_config(model) server.prepare_server(workdir=workdir, port=port, device=device) diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py index becfbb20090799aaf40d79973964e497cf599436..3b4261eef2e8ab366c43b2c375cb1b5129457245 100644 --- a/python/paddle_serving_server_gpu/__init__.py +++ b/python/paddle_serving_server_gpu/__init__.py @@ -73,6 +73,16 @@ def serve_args(): default=False, action="store_true", help="Use Multi-language-service") + parser.add_argument( + "--product_name", + type=str, + default=None, + help="product_name for authentication") + parser.add_argument( + "--container_id", + type=str, + default=None, + help="container_id for authentication") return parser.parse_args() @@ -196,6 +206,8 @@ class Server(object): self.use_local_bin = False self.gpuid = 0 self.model_config_paths = None # for multi-model in a workflow + self.product_name = None + self.container_id = None def set_max_concurrency(self, concurrency): self.max_concurrency = concurrency @@ -229,6 +241,16 @@ class Server(object): def set_ir_optimize(self, flag=False): self.ir_optimization = flag + def set_product_name(self, product_name=None): + if product_name == None: + raise ValueError("product_name can't be None.") + self.product_name = product_name + + def set_container_id(self, container_id): + if container_id == None: + raise ValueError("container_id can't be None.") + self.container_id = container_id + def check_local_bin(self): if "SERVING_BIN" in os.environ: self.use_local_bin = True @@ -302,6 +324,10 @@ class Server(object): self.resource_conf.model_toolkit_file = self.model_toolkit_fn self.resource_conf.general_model_path = workdir self.resource_conf.general_model_file = self.general_model_config_fn + if self.product_name != None: + self.resource_conf.auth_product_name = self.product_name + if self.container_id != None: + self.resource_conf.auth_container_id = self.container_id def _write_pb_str(self, filepath, pb_obj): with open(filepath, "w") as fout: diff --git a/python/paddle_serving_server_gpu/serve.py b/python/paddle_serving_server_gpu/serve.py index 3b0941a97560f11a52808fc7e152419e2cec0ba0..4af0ec8f32db0836d1ff64e3d0621060d474deda 100644 --- a/python/paddle_serving_server_gpu/serve.py +++ b/python/paddle_serving_server_gpu/serve.py @@ -34,7 +34,7 @@ def start_gpu_card_model(index, gpuid, args): # pylint: disable=doc-string-miss port = args.port + index thread_num = args.thread model = args.model - mem_optim = args.mem_optim_off is False + mem_optim = args.mem_optim_off ir_optim = args.ir_optim max_body_size = args.max_body_size use_multilang = args.use_multilang @@ -65,6 +65,11 @@ def start_gpu_card_model(index, gpuid, args): # pylint: disable=doc-string-miss server.set_ir_optimize(ir_optim) server.set_max_body_size(max_body_size) + if args.product_name != None: + server.set_product_name(args.product_name) + if args.container_id != None: + server.set_container_id(args.container_id) + server.load_model_config(model) server.prepare_server(workdir=workdir, port=port, device=device) if gpuid >= 0: