未验证 提交 c4310a4b 编写于 作者: B barriery 提交者: GitHub

Merge branch 'develop' into add-nvidia-wiki-to-docker-images-doc

...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
syntax = "proto2"; syntax = "proto2";
package baidu.paddle_serving.multi_lang;
option java_multiple_files = true; option java_multiple_files = true;
option java_package = "io.paddle.serving.grpc"; option java_package = "io.paddle.serving.grpc";
option java_outer_classname = "ServingProto"; option java_outer_classname = "ServingProto";
......
...@@ -58,6 +58,8 @@ message ResourceConf { ...@@ -58,6 +58,8 @@ message ResourceConf {
optional string cube_config_path = 5; optional string cube_config_path = 5;
optional string cube_config_file = 6; optional string cube_config_file = 6;
optional int32 cube_quant_bits = 7; // set 0 if no quant. optional int32 cube_quant_bits = 7; // set 0 if no quant.
optional string auth_product_name = 8;
optional string auth_container_id = 9;
}; };
// DAG node depency info // DAG node depency info
......
...@@ -22,7 +22,8 @@ ...@@ -22,7 +22,8 @@
#ifdef BCLOUD #ifdef BCLOUD
#include "baidu/rpc/channel.h" #include "baidu/rpc/channel.h"
#include "baidu/rpc/parallel_channel.h" #include "baidu/rpc/parallel_channel.h"
#include "rapidjson/document.h" #include "rapidjson_1.0/document.h"
#include "rapidjson_1.0/rapidjson.h"
#else #else
#include "brpc/channel.h" #include "brpc/channel.h"
#include "brpc/parallel_channel.h" #include "brpc/parallel_channel.h"
......
...@@ -39,7 +39,9 @@ using configure::GeneralModelConfig; ...@@ -39,7 +39,9 @@ using configure::GeneralModelConfig;
void PredictorClient::init_gflags(std::vector<std::string> argv) { void PredictorClient::init_gflags(std::vector<std::string> argv) {
std::call_once(gflags_init_flag, [&]() { std::call_once(gflags_init_flag, [&]() {
#ifndef BCLOUD
FLAGS_logtostderr = true; FLAGS_logtostderr = true;
#endif
argv.insert(argv.begin(), "dummy"); argv.insert(argv.begin(), "dummy");
int argc = argv.size(); int argc = argv.size();
char **arr = new char *[argv.size()]; char **arr = new char *[argv.size()];
......
...@@ -13,20 +13,12 @@ ...@@ -13,20 +13,12 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT
#endif
#include <string> #include <string>
#include <vector>
#include "core/general-server/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h" #include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/resource.h" #include "core/predictor/framework/resource.h"
#include "paddle_inference_api.h" // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -15,17 +15,9 @@ ...@@ -15,17 +15,9 @@
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT
#endif
#include "core/general-server/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h" #include "core/general-server/op/general_infer_helper.h"
#include "paddle_inference_api.h" // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -15,17 +15,9 @@ ...@@ -15,17 +15,9 @@
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT
#endif
#include "core/general-server/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h" #include "core/general-server/op/general_infer_helper.h"
#include "paddle_inference_api.h" // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -15,17 +15,9 @@ ...@@ -15,17 +15,9 @@
#pragma once #pragma once
#include <string.h> #include <string.h>
#include <string>
#include <vector> #include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT #include "paddle_inference_api.h" // NOLINT
#endif
#include <string>
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -60,10 +60,11 @@ int GeneralInferOp::inference() { ...@@ -60,10 +60,11 @@ int GeneralInferOp::inference() {
const TensorVector *in = &input_blob->tensor_vector; const TensorVector *in = &input_blob->tensor_vector;
TensorVector *out = &output_blob->tensor_vector; TensorVector *out = &output_blob->tensor_vector;
int batch_size = input_blob->GetBatchSize();
int batch_size = input_blob->_batch_size;
VLOG(2) << "(logid=" << log_id << ") input batch size: " << batch_size; VLOG(2) << "(logid=" << log_id << ") input batch size: " << batch_size;
output_blob->SetBatchSize(batch_size); output_blob->_batch_size = batch_size;
VLOG(2) << "(logid=" << log_id << ") infer batch size: " << batch_size; VLOG(2) << "(logid=" << log_id << ") infer batch size: " << batch_size;
......
...@@ -15,17 +15,9 @@ ...@@ -15,17 +15,9 @@
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT
#endif
#include "core/general-server/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h" #include "core/general-server/op/general_infer_helper.h"
#include "paddle_inference_api.h" // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -102,6 +102,7 @@ int GeneralReaderOp::inference() { ...@@ -102,6 +102,7 @@ int GeneralReaderOp::inference() {
baidu::paddle_serving::predictor::Resource::instance(); baidu::paddle_serving::predictor::Resource::instance();
VLOG(2) << "(logid=" << log_id << ") get resource pointer done."; VLOG(2) << "(logid=" << log_id << ") get resource pointer done.";
std::shared_ptr<PaddleGeneralModelConfig> model_config = std::shared_ptr<PaddleGeneralModelConfig> model_config =
resource.get_general_model_config(); resource.get_general_model_config();
...@@ -265,6 +266,7 @@ int GeneralReaderOp::inference() { ...@@ -265,6 +266,7 @@ int GeneralReaderOp::inference() {
timeline.Pause(); timeline.Pause();
int64_t end = timeline.TimeStampUS(); int64_t end = timeline.TimeStampUS();
res->p_size = 0; res->p_size = 0;
res->_batch_size = batch_size;
AddBlobInfo(res, start); AddBlobInfo(res, start);
AddBlobInfo(res, end); AddBlobInfo(res, end);
......
...@@ -13,21 +13,13 @@ ...@@ -13,21 +13,13 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT
#endif
#include <string> #include <string>
#include <vector>
#include "core/general-server/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
#include "core/general-server/load_general_model_service.pb.h" #include "core/general-server/load_general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h" #include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/resource.h" #include "core/predictor/framework/resource.h"
#include "paddle_inference_api.h" // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -15,16 +15,8 @@ ...@@ -15,16 +15,8 @@
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT
#endif
#include "core/general-server/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
#include "paddle_inference_api.h" // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -13,21 +13,13 @@ ...@@ -13,21 +13,13 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT
#endif
#include <string> #include <string>
#include <vector>
#include "core/general-server/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
#include "core/general-server/load_general_model_service.pb.h" #include "core/general-server/load_general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h" #include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/resource.h" #include "core/predictor/framework/resource.h"
#include "paddle_inference_api.h" // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -15,17 +15,9 @@ ...@@ -15,17 +15,9 @@
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT
#endif
#include "core/general-server/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h" #include "core/general-server/op/general_infer_helper.h"
#include "paddle_inference_api.h" // NOLINT
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#include "butil/time.h" #include "butil/time.h"
#endif #endif
#include "glog/raw_logging.h" #define ERROR_STRING_LEN 10240
#include "core/configure/general_model_config.pb.h" #include "core/configure/general_model_config.pb.h"
#include "core/configure/include/configure_parser.h" #include "core/configure/include/configure_parser.h"
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include <string> #include <string>
#include <utility> #include <utility>
#include "core/predictor/common/inner_common.h" #include "core/predictor/common/inner_common.h"
#include "glog/raw_logging.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace predictor { namespace predictor {
...@@ -28,7 +28,12 @@ namespace predictor { ...@@ -28,7 +28,12 @@ namespace predictor {
FactoryDerive<D, B>* factory = new (std::nothrow) FactoryDerive<D, B>(); \ FactoryDerive<D, B>* factory = new (std::nothrow) FactoryDerive<D, B>(); \
if (factory == NULL || \ if (factory == NULL || \
FactoryPool<B>::instance().register_factory(tag, factory) != 0) { \ FactoryPool<B>::instance().register_factory(tag, factory) != 0) { \
RAW_LOG_FATAL("Failed regist factory: %s in macro!", #D); \ char err_str[ERROR_STRING_LEN]; \
snprintf(err_str, \
ERROR_STRING_LEN - 1, \
"Failed regist factory: %s in macro!", \
#D); \
RAW_LOG(FATAL, err_str); \
return -1; \ return -1; \
} \ } \
return 0; \ return 0; \
...@@ -54,7 +59,13 @@ namespace predictor { ...@@ -54,7 +59,13 @@ namespace predictor {
if (factory == NULL || \ if (factory == NULL || \
::baidu::paddle_serving::predictor::FactoryPool<B>::instance() \ ::baidu::paddle_serving::predictor::FactoryPool<B>::instance() \
.register_factory(#D, factory) != 0) { \ .register_factory(#D, factory) != 0) { \
RAW_LOG_FATAL("Failed regist factory: %s->%s in macro!", #D, #B); \ char err_str[ERROR_STRING_LEN]; \
snprintf(err_str, \
ERROR_STRING_LEN - 1, \
"Failed regist factory: %s->%s in macro!", \
#D, \
#B); \
RAW_LOG(FATAL, err_str); \
return; \ return; \
} \ } \
return; \ return; \
...@@ -66,15 +77,26 @@ namespace predictor { ...@@ -66,15 +77,26 @@ namespace predictor {
::baidu::paddle_serving::predictor::FactoryDerive<D, B>* factory = new ( \ ::baidu::paddle_serving::predictor::FactoryDerive<D, B>* factory = new ( \
::std::nothrow)::baidu::paddle_serving::predictor::FactoryDerive<D, \ ::std::nothrow)::baidu::paddle_serving::predictor::FactoryDerive<D, \
B>(); \ B>(); \
char err_str[ERROR_STRING_LEN]; \
if (factory == NULL || \ if (factory == NULL || \
::baidu::paddle_serving::predictor::FactoryPool<B>::instance() \ ::baidu::paddle_serving::predictor::FactoryPool<B>::instance() \
.register_factory(N, factory) != 0) { \ .register_factory(N, factory) != 0) { \
RAW_LOG_FATAL( \ snprintf(err_str, \
"Failed regist factory: %s->%s, tag: %s in macro!", #D, #B, N); \ ERROR_STRING_LEN - 1, \
"Failed regist factory: %s->%s, tag: %s in macro!", \
#D, \
#B, \
N); \
RAW_LOG(FATAL, err_str); \
return; \ return; \
} \ } \
RAW_LOG_WARNING( \ snprintf(err_str, \
"Succ regist factory: %s->%s, tag: %s in macro!", #D, #B, N); \ ERROR_STRING_LEN - 1, \
"Succ regist factory: %s->%s, tag: %s in macro!", \
#D, \
#B, \
N); \
RAW_LOG(WARNING, err_str); \
return; \ return; \
} }
...@@ -102,24 +124,35 @@ class FactoryPool { ...@@ -102,24 +124,35 @@ class FactoryPool {
} }
int register_factory(const std::string& tag, FactoryBase<B>* factory) { int register_factory(const std::string& tag, FactoryBase<B>* factory) {
char err_str[ERROR_STRING_LEN];
typename std::map<std::string, FactoryBase<B>*>::iterator it = typename std::map<std::string, FactoryBase<B>*>::iterator it =
_pool.find(tag); _pool.find(tag);
if (it != _pool.end()) { if (it != _pool.end()) {
RAW_LOG_FATAL("Insert duplicate with tag: %s", tag.c_str()); snprintf(err_str,
ERROR_STRING_LEN - 1,
"Insert duplicate with tag: %s",
tag.c_str());
RAW_LOG(FATAL, err_str);
return -1; return -1;
} }
std::pair<typename std::map<std::string, FactoryBase<B>*>::iterator, bool> std::pair<typename std::map<std::string, FactoryBase<B>*>::iterator, bool>
r = _pool.insert(std::make_pair(tag, factory)); r = _pool.insert(std::make_pair(tag, factory));
if (!r.second) { if (!r.second) {
RAW_LOG_FATAL("Failed insert new factory with: %s", tag.c_str()); snprintf(err_str,
ERROR_STRING_LEN - 1,
"Failed insert new factory with: %s",
tag.c_str());
RAW_LOG(FATAL, err_str);
return -1; return -1;
} }
RAW_LOG_INFO("Succ insert one factory, tag: %s, base type %s", snprintf(err_str,
tag.c_str(), ERROR_STRING_LEN - 1,
typeid(B).name()); "Succ insert one factory, tag: %s, base type %s",
tag.c_str(),
typeid(B).name());
RAW_LOG(INFO, err_str);
return 0; return 0;
} }
...@@ -127,9 +160,13 @@ class FactoryPool { ...@@ -127,9 +160,13 @@ class FactoryPool {
typename std::map<std::string, FactoryBase<B>*>::iterator it = typename std::map<std::string, FactoryBase<B>*>::iterator it =
_pool.find(tag); _pool.find(tag);
if (it == _pool.end() || it->second == NULL) { if (it == _pool.end() || it->second == NULL) {
RAW_LOG_FATAL("Not found factory pool, tag: %s, pool size %u", char err_str[ERROR_STRING_LEN];
tag.c_str(), snprintf(err_str,
_pool.size()); ERROR_STRING_LEN - 1,
"Not found factory pool, tag: %s, pool size %u",
tag.c_str(),
_pool.size());
RAW_LOG(FATAL, err_str);
return NULL; return NULL;
} }
......
...@@ -603,6 +603,7 @@ class VersionedInferEngine : public InferEngine { ...@@ -603,6 +603,7 @@ class VersionedInferEngine : public InferEngine {
LOG(ERROR) << "Failed generate engine with type:" << engine_type; LOG(ERROR) << "Failed generate engine with type:" << engine_type;
return -1; return -1;
} }
#ifndef BCLOUD
VLOG(2) << "FLAGS_logtostderr " << FLAGS_logtostderr; VLOG(2) << "FLAGS_logtostderr " << FLAGS_logtostderr;
int tmp = FLAGS_logtostderr; int tmp = FLAGS_logtostderr;
if (engine->proc_initialize(conf, version) != 0) { if (engine->proc_initialize(conf, version) != 0) {
...@@ -611,6 +612,12 @@ class VersionedInferEngine : public InferEngine { ...@@ -611,6 +612,12 @@ class VersionedInferEngine : public InferEngine {
} }
VLOG(2) << "FLAGS_logtostderr " << FLAGS_logtostderr; VLOG(2) << "FLAGS_logtostderr " << FLAGS_logtostderr;
FLAGS_logtostderr = tmp; FLAGS_logtostderr = tmp;
#else
if (engine->proc_initialize(conf, version) != 0) {
LOG(ERROR) << "Failed initialize engine, type:" << engine_type;
return -1;
}
#endif
auto r = _versions.insert(std::make_pair(engine->version(), engine)); auto r = _versions.insert(std::make_pair(engine->version(), engine));
if (!r.second) { if (!r.second) {
LOG(ERROR) << "Failed insert item: " << engine->version() LOG(ERROR) << "Failed insert item: " << engine->version()
......
...@@ -62,7 +62,10 @@ class OpRepository { ...@@ -62,7 +62,10 @@ class OpRepository {
template <typename OP_TYPE> template <typename OP_TYPE>
void regist_op(std::string op_type) { void regist_op(std::string op_type) {
_repository[op_type] = &OpFactory<OP_TYPE>::instance(); _repository[op_type] = &OpFactory<OP_TYPE>::instance();
RAW_LOG_INFO("Succ regist op: %s", op_type.c_str()); char err_str[ERROR_STRING_LEN];
snprintf(
err_str, ERROR_STRING_LEN - 1, "Succ regist op: %s", op_type.c_str());
RAW_LOG(INFO, err_str);
} }
Op* get_op(std::string op_type); Op* get_op(std::string op_type);
......
...@@ -17,6 +17,9 @@ ...@@ -17,6 +17,9 @@
#include <string> #include <string>
#include "core/predictor/common/inner_common.h" #include "core/predictor/common/inner_common.h"
#include "core/predictor/framework/kv_manager.h" #include "core/predictor/framework/kv_manager.h"
#ifdef BCLOUD
#include "aipe_sec_client.h" // NOLINT
#endif
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace predictor { namespace predictor {
...@@ -109,6 +112,42 @@ int Resource::initialize(const std::string& path, const std::string& file) { ...@@ -109,6 +112,42 @@ int Resource::initialize(const std::string& path, const std::string& file) {
} }
LOG(WARNING) << "Successfully proc initialized mempool wrapper"; LOG(WARNING) << "Successfully proc initialized mempool wrapper";
#ifdef WITH_AUTH
std::string product_name_str = resource_conf.auth_product_name();
std::string container_id_str = resource_conf.auth_container_id();
char* product_name = new char[product_name_str.size() + 1];
snprintf(product_name,
product_name_str.size() + 1,
"%s",
product_name_str.c_str());
char* container_id = new char[container_id_str.size() + 1];
snprintf(container_id,
container_id_str.size() + 1,
"%s",
container_id_str.c_str());
aipe_auth_request request;
request.product_name = product_name;
request.container_id = container_id;
request.request_ts = (int64_t)time(NULL);
LOG(INFO) << "\nEasypack info"
<< "\nproduct name: " << request.product_name
<< "\ncontainer_id: " << request.container_id
<< "\nrequest time stamp: " << request.request_ts;
aipe_auth_response response;
response = check_auth(request);
if (response.result == 0) {
LOG(INFO) << "Authentication succeed.";
} else {
LOG(ERROR) << "Authentication failed. Error code: " << response.result;
return -1;
}
#endif
if (FLAGS_enable_model_toolkit) { if (FLAGS_enable_model_toolkit) {
int err = 0; int err = 0;
std::string model_toolkit_path = resource_conf.model_toolkit_path(); std::string model_toolkit_path = resource_conf.model_toolkit_path();
......
...@@ -23,17 +23,24 @@ namespace predictor { ...@@ -23,17 +23,24 @@ namespace predictor {
#define REGIST_FORMAT_SERVICE(svr_name, svr) \ #define REGIST_FORMAT_SERVICE(svr_name, svr) \
do { \ do { \
char err_str[ERROR_STRING_LEN]; \
int ret = \ int ret = \
::baidu::paddle_serving::predictor::FormatServiceManager::instance() \ ::baidu::paddle_serving::predictor::FormatServiceManager::instance() \
.regist_service(svr_name, svr); \ .regist_service(svr_name, svr); \
if (ret != 0) { \ if (ret != 0) { \
RAW_LOG_ERROR("Failed regist service[%s][%s]", \ snprintf(err_str, \
svr_name.c_str(), \ ERROR_STRING_LEN - 1, \
typeid(svr).name()); \ "Failed regist service[%s][%s]", \
svr_name.c_str(), \
typeid(svr).name()); \
RAW_LOG(ERROR, err_str); \
} else { \ } else { \
RAW_LOG_INFO("Success regist service[%s][%s]", \ snprintf(err_str, \
svr_name.c_str(), \ ERROR_STRING_LEN - 1, \
typeid(svr).name()); \ "Success regist service[%s][%s]", \
svr_name.c_str(), \
typeid(svr).name()); \
RAW_LOG(INFO, err_str); \
} \ } \
} while (0) } while (0)
...@@ -42,31 +49,46 @@ class FormatServiceManager { ...@@ -42,31 +49,46 @@ class FormatServiceManager {
typedef google::protobuf::Service Service; typedef google::protobuf::Service Service;
int regist_service(const std::string& svr_name, Service* svr) { int regist_service(const std::string& svr_name, Service* svr) {
char err_str[ERROR_STRING_LEN];
if (_service_map.find(svr_name) != _service_map.end()) { if (_service_map.find(svr_name) != _service_map.end()) {
RAW_LOG_ERROR("Service[%s][%s] already exist!", snprintf(err_str,
svr_name.c_str(), ERROR_STRING_LEN - 1,
typeid(svr).name()); "Service[%s][%s] already exist!",
svr_name.c_str(),
typeid(svr).name());
RAW_LOG(ERROR, err_str);
return -1; return -1;
} }
std::pair<boost::unordered_map<std::string, Service*>::iterator, bool> ret; std::pair<boost::unordered_map<std::string, Service*>::iterator, bool> ret;
ret = _service_map.insert(std::make_pair(svr_name, svr)); ret = _service_map.insert(std::make_pair(svr_name, svr));
if (ret.second == false) { if (ret.second == false) {
RAW_LOG_ERROR("Service[%s][%s] insert failed!", snprintf(err_str,
svr_name.c_str(), ERROR_STRING_LEN - 1,
typeid(svr).name()); "Service[%s][%s] insert failed!",
svr_name.c_str(),
typeid(svr).name());
RAW_LOG(ERROR, err_str);
return -1; return -1;
} }
RAW_LOG_INFO("Service[%s] insert successfully!", svr_name.c_str()); snprintf(err_str,
ERROR_STRING_LEN - 1,
"Service[%s] insert successfully!",
svr_name.c_str());
RAW_LOG(INFO, err_str);
return 0; return 0;
} }
Service* get_service(const std::string& svr_name) { Service* get_service(const std::string& svr_name) {
char err_str[ERROR_STRING_LEN];
boost::unordered_map<std::string, Service*>::iterator res; boost::unordered_map<std::string, Service*>::iterator res;
if ((res = _service_map.find(svr_name)) == _service_map.end()) { if ((res = _service_map.find(svr_name)) == _service_map.end()) {
RAW_LOG_WARNING("Service[%s] not found in service manager!", snprintf(err_str,
svr_name.c_str()); ERROR_STRING_LEN - 1,
"Service[%s] not found in service manager!",
svr_name.c_str());
RAW_LOG(WARNING, err_str);
return NULL; return NULL;
} }
return (*res).second; return (*res).second;
......
...@@ -202,8 +202,6 @@ int main(int argc, char** argv) { ...@@ -202,8 +202,6 @@ int main(int argc, char** argv) {
} }
VLOG(2) << "Succ call pthread worker start function"; VLOG(2) << "Succ call pthread worker start function";
#ifndef BCLOUD
if (Resource::instance().general_model_initialize(FLAGS_resource_path, if (Resource::instance().general_model_initialize(FLAGS_resource_path,
FLAGS_resource_file) != 0) { FLAGS_resource_file) != 0) {
LOG(ERROR) << "Failed to initialize general model conf: " LOG(ERROR) << "Failed to initialize general model conf: "
...@@ -213,6 +211,7 @@ int main(int argc, char** argv) { ...@@ -213,6 +211,7 @@ int main(int argc, char** argv) {
VLOG(2) << "Succ initialize general model"; VLOG(2) << "Succ initialize general model";
#ifndef BCLOUD
// FATAL messages are output to stderr // FATAL messages are output to stderr
FLAGS_stderrthreshold = 3; FLAGS_stderrthreshold = 3;
#endif #endif
......
...@@ -50,9 +50,9 @@ class WeightedRandomRender : public EndpointRouterBase { ...@@ -50,9 +50,9 @@ class WeightedRandomRender : public EndpointRouterBase {
Factory<WeightedRandomRender, EndpointRouterBase>* factory = Factory<WeightedRandomRender, EndpointRouterBase>* factory =
new (std::nothrow) Factory<WeightedRandomRender, EndpointRouterBase>(); new (std::nothrow) Factory<WeightedRandomRender, EndpointRouterBase>();
if (factory == NULL) { if (factory == NULL) {
RAW_LOG_ERROR( RAW_LOG(ERROR,
"Failed regist factory: WeightedRandomRender->EndpointRouterBase in " "Failed regist factory: WeightedRandomRender->EndpointRouterBase \
"macro!"); in macro!");
return -1; return -1;
} }
...@@ -62,9 +62,9 @@ class WeightedRandomRender : public EndpointRouterBase { ...@@ -62,9 +62,9 @@ class WeightedRandomRender : public EndpointRouterBase {
// together. // together.
if (FactoryPool<EndpointRouterBase>::instance().register_factory( if (FactoryPool<EndpointRouterBase>::instance().register_factory(
"WeightedRandomRender", factory) != 0) { "WeightedRandomRender", factory) != 0) {
RAW_LOG_INFO( RAW_LOG(INFO,
"Factory has been registed: " "Factory has been registed: \
"WeightedRandomRender->EndpointRouterBase."); WeightedRandomRender->EndpointRouterBase.");
} }
return 0; return 0;
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include <utility> #include <utility>
#include "core/sdk-cpp/include/common.h" #include "core/sdk-cpp/include/common.h"
#include "core/sdk-cpp/include/stub_impl.h" #include "core/sdk-cpp/include/stub_impl.h"
#include "glog/raw_logging.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
...@@ -28,12 +27,20 @@ namespace sdk_cpp { ...@@ -28,12 +27,20 @@ namespace sdk_cpp {
namespace brpc = baidu::rpc; namespace brpc = baidu::rpc;
#endif #endif
#define ERROR_STRING_LEN 10240
#define INLINE_REGIST_OBJECT(D, B, E) \ #define INLINE_REGIST_OBJECT(D, B, E) \
do { \ do { \
Factory<D, B>* factory = new (std::nothrow) Factory<D, B>(); \ Factory<D, B>* factory = new (std::nothrow) Factory<D, B>(); \
if (factory == NULL || \ if (factory == NULL || \
FactoryPool<B>::instance().register_factory(#D, factory) != 0) { \ FactoryPool<B>::instance().register_factory(#D, factory) != 0) { \
RAW_LOG_ERROR("Failed regist factory: %s->%s in macro!", #D, #B); \ char err_str[ERROR_STRING_LEN]; \
snprintf(err_str, \
ERROR_STRING_LEN - 1, \
"Failed regist factory: %s->%s in macro!", \
#D, \
#B); \
RAW_LOG(ERROR, err_str); \
return E; \ return E; \
} \ } \
} while (0) } while (0)
...@@ -43,7 +50,12 @@ namespace brpc = baidu::rpc; ...@@ -43,7 +50,12 @@ namespace brpc = baidu::rpc;
Factory<D, B>* factory = new (std::nothrow) Factory<D, B>(); \ Factory<D, B>* factory = new (std::nothrow) Factory<D, B>(); \
if (factory == NULL || \ if (factory == NULL || \
FactoryPool<B>::instance().register_factory(tag, factory) != 0) { \ FactoryPool<B>::instance().register_factory(tag, factory) != 0) { \
RAW_LOG_ERROR("Failed regist factory: %s in macro!", #D); \ char err_str[ERROR_STRING_LEN]; \
snprintf(err_str, \
ERROR_STRING_LEN - 1, \
"Failed regist factory: %s in macro!", \
#D); \
RAW_LOG(ERROR, err_str); \
return -1; \ return -1; \
} \ } \
return 0; \ return 0; \
...@@ -66,7 +78,13 @@ namespace brpc = baidu::rpc; ...@@ -66,7 +78,13 @@ namespace brpc = baidu::rpc;
if (factory == NULL || \ if (factory == NULL || \
::baidu::paddle_serving::sdk_cpp::FactoryPool<B>::instance() \ ::baidu::paddle_serving::sdk_cpp::FactoryPool<B>::instance() \
.register_factory(#D, factory) != 0) { \ .register_factory(#D, factory) != 0) { \
RAW_LOG_ERROR("Failed regist factory: %s->%s in macro!", #D, #B); \ char err_str[ERROR_STRING_LEN]; \
snprintf(err_str, \
ERROR_STRING_LEN - 1, \
"Failed regist factory: %s->%s in macro!", \
#D, \
#B); \
RAW_LOG(ERROR, err_str); \
return; \ return; \
} \ } \
return; \ return; \
...@@ -80,8 +98,14 @@ namespace brpc = baidu::rpc; ...@@ -80,8 +98,14 @@ namespace brpc = baidu::rpc;
if (factory == NULL || \ if (factory == NULL || \
::baidu::paddle_serving::sdk_cpp::FactoryPool<B>::instance() \ ::baidu::paddle_serving::sdk_cpp::FactoryPool<B>::instance() \
.register_factory(T, factory) != 0) { \ .register_factory(T, factory) != 0) { \
RAW_LOG_ERROR( \ char err_str[ERROR_STRING_LEN]; \
"Failed regist factory: %s->%s, tag %s in macro!", #D, #B, T); \ snprintf(err_str, \
ERROR_STRING_LEN - 1, \
"Failed regist factory: %s->%s, tag %s in macro!", \
#D, \
#B, \
T); \
RAW_LOG(ERROR, err_str); \
return; \ return; \
} \ } \
return; \ return; \
...@@ -108,8 +132,13 @@ namespace brpc = baidu::rpc; ...@@ -108,8 +132,13 @@ namespace brpc = baidu::rpc;
::baidu::paddle_serving::sdk_cpp::FactoryPool< \ ::baidu::paddle_serving::sdk_cpp::FactoryPool< \
::baidu::paddle_serving::sdk_cpp::Stub>::instance() \ ::baidu::paddle_serving::sdk_cpp::Stub>::instance() \
.register_factory(T, factory) != 0) { \ .register_factory(T, factory) != 0) { \
RAW_LOG_ERROR( \ char err_str[ERROR_STRING_LEN]; \
"Failed regist factory: %s->Stub, tag: %s in macro!", #D, T); \ snprintf(err_str, \
ERROR_STRING_LEN - 1, \
"Failed regist factory: %s->Stub, tag: %s in macro!", \
#D, \
T); \
RAW_LOG(ERROR, err_str); \
return; \ return; \
} \ } \
return; \ return; \
...@@ -146,14 +175,24 @@ class FactoryPool { ...@@ -146,14 +175,24 @@ class FactoryPool {
typename std::map<std::string, FactoryBase<B>*>::iterator it = typename std::map<std::string, FactoryBase<B>*>::iterator it =
_pool.find(tag); _pool.find(tag);
if (it != _pool.end()) { if (it != _pool.end()) {
RAW_LOG_ERROR("Insert duplicate with tag: %s", tag.c_str()); char err_str[ERROR_STRING_LEN];
snprintf(err_str,
ERROR_STRING_LEN - 1,
"Insert duplicate with tag: %s",
tag.c_str());
RAW_LOG(ERROR, err_str);
return -1; return -1;
} }
std::pair<typename std::map<std::string, FactoryBase<B>*>::iterator, bool> std::pair<typename std::map<std::string, FactoryBase<B>*>::iterator, bool>
r = _pool.insert(std::make_pair(tag, factory)); r = _pool.insert(std::make_pair(tag, factory));
if (!r.second) { if (!r.second) {
RAW_LOG_ERROR("Failed insert new factory with: %s", tag.c_str()); char err_str[ERROR_STRING_LEN];
snprintf(err_str,
ERROR_STRING_LEN - 1,
"Failed insert new factory with: %s",
tag.c_str());
RAW_LOG(ERROR, err_str);
return -1; return -1;
} }
...@@ -164,9 +203,13 @@ class FactoryPool { ...@@ -164,9 +203,13 @@ class FactoryPool {
typename std::map<std::string, FactoryBase<B>*>::iterator it = typename std::map<std::string, FactoryBase<B>*>::iterator it =
_pool.find(tag); _pool.find(tag);
if (it == _pool.end() || it->second == NULL) { if (it == _pool.end() || it->second == NULL) {
RAW_LOG_ERROR("Not found factory pool, tag: %s, pool size: %u", char err_str[ERROR_STRING_LEN];
tag.c_str(), snprintf(err_str,
_pool.size()); ERROR_STRING_LEN - 1,
"Not found factory pool, tag: %s, pool size: %u",
tag.c_str(),
_pool.size());
RAW_LOG(ERROR, err_str);
return NULL; return NULL;
} }
......
...@@ -4,26 +4,25 @@ ...@@ -4,26 +4,25 @@
## Compilation environment requirements ## Compilation environment requirements
| module | version | | module | version |
| :--------------------------: | :----------------------------------------------------------: | | :--------------------------: | :-------------------------------: |
| OS | CentOS 7 | | OS | CentOS 7 |
| gcc | 4.8.5 and later | | gcc | 4.8.5 and later |
| gcc-c++ | 4.8.5 and later | | gcc-c++ | 4.8.5 and later |
| git | 3.82 and later | | git | 3.82 and later |
| cmake | 3.2.0 and later | | cmake | 3.2.0 and later |
| Python | 2.7.2 and later / 3.6 and later | | Python | 2.7.2 and later / 3.6 and later |
| Go | 1.9.2 and later | | Go | 1.9.2 and later |
| git | 2.17.1 and later | | git | 2.17.1 and later |
| glibc-static | 2.17 | | glibc-static | 2.17 |
| openssl-devel | 1.0.2k | | openssl-devel | 1.0.2k |
| bzip2-devel | 1.0.6 and later | | bzip2-devel | 1.0.6 and later |
| python-devel / python3-devel | 2.7.5 and later / 3.6.8 and later | | python-devel / python3-devel | 2.7.5 and later / 3.6.8 and later |
| sqlite-devel | 3.7.17 and later | | sqlite-devel | 3.7.17 and later |
| patchelf | 0.9 and later | | patchelf | 0.9 and later |
| libXext | 1.3.3 | | libXext | 1.3.3 |
| libSM | 1.2.2 | | libSM | 1.2.2 |
| libXrender | 0.9.10 | | libXrender | 0.9.10 |
| python-whl | numpy>=1.12, <=1.16.4<br/>wheel>=0.34.0, <0.35.0<br/>setuptools>=44.1.0<br/>opencv-python==4.2.0.32<br/>google>=2.0.3<br/>protobuf>=3.12.2<br/>grpcio-tools>=1.28.1<br/>grpcio>=1.28.1<br/>func-timeout>=4.3.5<br/>pyyaml>=1.3.0<br/>sentencepiece==0.1.92<br>flask>=1.1.2<br>ujson>=2.0.3 |
It is recommended to use Docker for compilation. We have prepared the Paddle Serving compilation environment for you, see [this document](DOCKER_IMAGES.md). It is recommended to use Docker for compilation. We have prepared the Paddle Serving compilation environment for you, see [this document](DOCKER_IMAGES.md).
...@@ -114,7 +113,7 @@ make ...@@ -114,7 +113,7 @@ make
## Install wheel package ## Install wheel package
Regardless of the client, server or App part, after compiling, install the whl package under `python/dist/`. Regardless of the client, server or App part, after compiling, install the whl package in `python/dist/` in the temporary directory(`server-build-cpu`, `server-build-gpu`, `client-build`,`app-build`) of the compilation process.
...@@ -124,6 +123,12 @@ When running the python server, it will check the `SERVING_BIN` environment vari ...@@ -124,6 +123,12 @@ When running the python server, it will check the `SERVING_BIN` environment vari
## Verify
Please use the example under `python/examples` to verify.
## CMake Option Description ## CMake Option Description
| Compile Options | Description | Default | | Compile Options | Description | Default |
......
...@@ -4,26 +4,25 @@ ...@@ -4,26 +4,25 @@
## 编译环境设置 ## 编译环境设置
| 组件 | 版本要求 | | 组件 | 版本要求 |
| :--------------------------: | :----------------------------------------------------------: | | :--------------------------: | :-------------------------------: |
| OS | CentOS 7 | | OS | CentOS 7 |
| gcc | 4.8.5 and later | | gcc | 4.8.5 and later |
| gcc-c++ | 4.8.5 and later | | gcc-c++ | 4.8.5 and later |
| git | 3.82 and later | | git | 3.82 and later |
| cmake | 3.2.0 and later | | cmake | 3.2.0 and later |
| Python | 2.7.2 and later / 3.6 and later | | Python | 2.7.2 and later / 3.6 and later |
| Go | 1.9.2 and later | | Go | 1.9.2 and later |
| git | 2.17.1 and later | | git | 2.17.1 and later |
| glibc-static | 2.17 | | glibc-static | 2.17 |
| openssl-devel | 1.0.2k | | openssl-devel | 1.0.2k |
| bzip2-devel | 1.0.6 and later | | bzip2-devel | 1.0.6 and later |
| python-devel / python3-devel | 2.7.5 and later / 3.6.8 and later | | python-devel / python3-devel | 2.7.5 and later / 3.6.8 and later |
| sqlite-devel | 3.7.17 and later | | sqlite-devel | 3.7.17 and later |
| patchelf | 0.9 | | patchelf | 0.9 |
| libXext | 1.3.3 | | libXext | 1.3.3 |
| libSM | 1.2.2 | | libSM | 1.2.2 |
| libXrender | 0.9.10 | | libXrender | 0.9.10 |
| python-whl | numpy>=1.12, <=1.16.4<br/>wheel>=0.34.0, <0.35.0<br/>setuptools>=44.1.0<br/>opencv-python==4.2.0.32<br/>google>=2.0.3<br/>protobuf>=3.12.2<br/>grpcio-tools>=1.28.1<br/>grpcio>=1.28.1<br/>func-timeout>=4.3.5<br/>pyyaml>=1.3.0<br/>sentencepiece==0.1.92<br/>flask>=1.1.2<br/>ujson>=2.0.3 |
推荐使用Docker编译,我们已经为您准备好了Paddle Serving编译环境,详见[该文档](DOCKER_IMAGES_CN.md) 推荐使用Docker编译,我们已经为您准备好了Paddle Serving编译环境,详见[该文档](DOCKER_IMAGES_CN.md)
...@@ -114,7 +113,7 @@ make ...@@ -114,7 +113,7 @@ make
## 安装wheel包 ## 安装wheel包
无论是Client端,Server端还是App部分,编译完成后,安装`python/dist/`的whl包即可。 无论是Client端,Server端还是App部分,编译完成后,安装编译过程临时目录(`server-build-cpu``server-build-gpu``client-build``app-build`)下的`python/dist/`的whl包即可。
...@@ -124,6 +123,12 @@ make ...@@ -124,6 +123,12 @@ make
## 如何验证
请使用 `python/examples` 下的例子进行验证。
## CMake选项说明 ## CMake选项说明
| 编译选项 | 说明 | 默认 | | 编译选项 | 说明 | 默认 |
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
syntax = "proto2"; syntax = "proto2";
package baidu.paddle_serving.multi_lang;
option java_multiple_files = true; option java_multiple_files = true;
option java_package = "io.paddle.serving.grpc"; option java_package = "io.paddle.serving.grpc";
option java_outer_classname = "ServingProto"; option java_outer_classname = "ServingProto";
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "core/configure/inferencer_configure.pb.h" #include "core/configure/inferencer_configure.pb.h"
#include "core/predictor/framework/infer.h" #include "core/predictor/framework/infer.h"
#include "paddle_inference_api.h" // NOLINT #include "paddle_inference_api.h" // NOLINT
//#include "predictor/framework/infer.h"
namespace baidu { namespace baidu {
namespace paddle_serving { namespace paddle_serving {
......
...@@ -103,8 +103,8 @@ class OpSeqMaker(object): ...@@ -103,8 +103,8 @@ class OpSeqMaker(object):
elif len(node.dependencies) == 1: elif len(node.dependencies) == 1:
if node.dependencies[0].name != self.workflow.nodes[-1].name: if node.dependencies[0].name != self.workflow.nodes[-1].name:
raise Exception( raise Exception(
'You must add op in order in OpSeqMaker. The previous op is {}, but the current op is followed by {}.'. 'You must add op in order in OpSeqMaker. The previous op is {}, but the current op is followed by {}.'
format(node.dependencies[0].name, self.workflow.nodes[ .format(node.dependencies[0].name, self.workflow.nodes[
-1].name)) -1].name))
self.workflow.nodes.extend([node]) self.workflow.nodes.extend([node])
...@@ -157,6 +157,8 @@ class Server(object): ...@@ -157,6 +157,8 @@ class Server(object):
self.cur_path = os.getcwd() self.cur_path = os.getcwd()
self.use_local_bin = False self.use_local_bin = False
self.mkl_flag = False self.mkl_flag = False
self.product_name = None
self.container_id = None
self.model_config_paths = None # for multi-model in a workflow self.model_config_paths = None # for multi-model in a workflow
def set_max_concurrency(self, concurrency): def set_max_concurrency(self, concurrency):
...@@ -191,6 +193,16 @@ class Server(object): ...@@ -191,6 +193,16 @@ class Server(object):
def set_ir_optimize(self, flag=False): def set_ir_optimize(self, flag=False):
self.ir_optimization = flag self.ir_optimization = flag
def set_product_name(self, product_name=None):
if product_name == None:
raise ValueError("product_name can't be None.")
self.product_name = product_name
def set_container_id(self, container_id):
if container_id == None:
raise ValueError("container_id can't be None.")
self.container_id = container_id
def check_local_bin(self): def check_local_bin(self):
if "SERVING_BIN" in os.environ: if "SERVING_BIN" in os.environ:
self.use_local_bin = True self.use_local_bin = True
...@@ -254,6 +266,10 @@ class Server(object): ...@@ -254,6 +266,10 @@ class Server(object):
self.resource_conf.model_toolkit_file = self.model_toolkit_fn self.resource_conf.model_toolkit_file = self.model_toolkit_fn
self.resource_conf.general_model_path = workdir self.resource_conf.general_model_path = workdir
self.resource_conf.general_model_file = self.general_model_config_fn self.resource_conf.general_model_file = self.general_model_config_fn
if self.product_name != None:
self.resource_conf.auth_product_name = self.product_name
if self.container_id != None:
self.resource_conf.auth_container_id = self.container_id
def _write_pb_str(self, filepath, pb_obj): def _write_pb_str(self, filepath, pb_obj):
with open(filepath, "w") as fout: with open(filepath, "w") as fout:
...@@ -351,8 +367,8 @@ class Server(object): ...@@ -351,8 +367,8 @@ class Server(object):
if os.path.exists(tar_name): if os.path.exists(tar_name):
os.remove(tar_name) os.remove(tar_name)
raise SystemExit( raise SystemExit(
'Download failed, please check your network or permission of {}.'. 'Download failed, please check your network or permission of {}.'
format(self.module_path)) .format(self.module_path))
else: else:
try: try:
print('Decompressing files ..') print('Decompressing files ..')
...@@ -363,8 +379,8 @@ class Server(object): ...@@ -363,8 +379,8 @@ class Server(object):
if os.path.exists(exe_path): if os.path.exists(exe_path):
os.remove(exe_path) os.remove(exe_path)
raise SystemExit( raise SystemExit(
'Decompressing failed, please check your permission of {} or disk space left.'. 'Decompressing failed, please check your permission of {} or disk space left.'
format(self.module_path)) .format(self.module_path))
finally: finally:
os.remove(tar_name) os.remove(tar_name)
#release lock #release lock
...@@ -541,7 +557,6 @@ class MultiLangServerServiceServicer(multi_lang_general_model_service_pb2_grpc. ...@@ -541,7 +557,6 @@ class MultiLangServerServiceServicer(multi_lang_general_model_service_pb2_grpc.
results, tag = ret results, tag = ret
resp.tag = tag resp.tag = tag
resp.err_code = 0 resp.err_code = 0
if not self.is_multi_model_: if not self.is_multi_model_:
results = {'general_infer_0': results} results = {'general_infer_0': results}
for model_name, model_result in results.items(): for model_name, model_result in results.items():
......
...@@ -58,6 +58,16 @@ def parse_args(): # pylint: disable=doc-string-missing ...@@ -58,6 +58,16 @@ def parse_args(): # pylint: disable=doc-string-missing
default=False, default=False,
action="store_true", action="store_true",
help="Use Multi-language-service") help="Use Multi-language-service")
parser.add_argument(
"--product_name",
type=str,
default=None,
help="product_name for authentication")
parser.add_argument(
"--container_id",
type=str,
default=None,
help="container_id for authentication")
return parser.parse_args() return parser.parse_args()
...@@ -101,6 +111,10 @@ def start_standard_model(): # pylint: disable=doc-string-missing ...@@ -101,6 +111,10 @@ def start_standard_model(): # pylint: disable=doc-string-missing
server.use_mkl(use_mkl) server.use_mkl(use_mkl)
server.set_max_body_size(max_body_size) server.set_max_body_size(max_body_size)
server.set_port(port) server.set_port(port)
if args.product_name != None:
server.set_product_name(args.product_name)
if args.container_id != None:
server.set_container_id(args.container_id)
server.load_model_config(model) server.load_model_config(model)
server.prepare_server(workdir=workdir, port=port, device=device) server.prepare_server(workdir=workdir, port=port, device=device)
......
...@@ -73,6 +73,16 @@ def serve_args(): ...@@ -73,6 +73,16 @@ def serve_args():
default=False, default=False,
action="store_true", action="store_true",
help="Use Multi-language-service") help="Use Multi-language-service")
parser.add_argument(
"--product_name",
type=str,
default=None,
help="product_name for authentication")
parser.add_argument(
"--container_id",
type=str,
default=None,
help="container_id for authentication")
return parser.parse_args() return parser.parse_args()
...@@ -141,8 +151,8 @@ class OpSeqMaker(object): ...@@ -141,8 +151,8 @@ class OpSeqMaker(object):
elif len(node.dependencies) == 1: elif len(node.dependencies) == 1:
if node.dependencies[0].name != self.workflow.nodes[-1].name: if node.dependencies[0].name != self.workflow.nodes[-1].name:
raise Exception( raise Exception(
'You must add op in order in OpSeqMaker. The previous op is {}, but the current op is followed by {}.'. 'You must add op in order in OpSeqMaker. The previous op is {}, but the current op is followed by {}.'
format(node.dependencies[0].name, self.workflow.nodes[ .format(node.dependencies[0].name, self.workflow.nodes[
-1].name)) -1].name))
self.workflow.nodes.extend([node]) self.workflow.nodes.extend([node])
...@@ -196,6 +206,8 @@ class Server(object): ...@@ -196,6 +206,8 @@ class Server(object):
self.use_local_bin = False self.use_local_bin = False
self.gpuid = 0 self.gpuid = 0
self.model_config_paths = None # for multi-model in a workflow self.model_config_paths = None # for multi-model in a workflow
self.product_name = None
self.container_id = None
def set_max_concurrency(self, concurrency): def set_max_concurrency(self, concurrency):
self.max_concurrency = concurrency self.max_concurrency = concurrency
...@@ -229,6 +241,16 @@ class Server(object): ...@@ -229,6 +241,16 @@ class Server(object):
def set_ir_optimize(self, flag=False): def set_ir_optimize(self, flag=False):
self.ir_optimization = flag self.ir_optimization = flag
def set_product_name(self, product_name=None):
if product_name == None:
raise ValueError("product_name can't be None.")
self.product_name = product_name
def set_container_id(self, container_id):
if container_id == None:
raise ValueError("container_id can't be None.")
self.container_id = container_id
def check_local_bin(self): def check_local_bin(self):
if "SERVING_BIN" in os.environ: if "SERVING_BIN" in os.environ:
self.use_local_bin = True self.use_local_bin = True
...@@ -302,6 +324,10 @@ class Server(object): ...@@ -302,6 +324,10 @@ class Server(object):
self.resource_conf.model_toolkit_file = self.model_toolkit_fn self.resource_conf.model_toolkit_file = self.model_toolkit_fn
self.resource_conf.general_model_path = workdir self.resource_conf.general_model_path = workdir
self.resource_conf.general_model_file = self.general_model_config_fn self.resource_conf.general_model_file = self.general_model_config_fn
if self.product_name != None:
self.resource_conf.auth_product_name = self.product_name
if self.container_id != None:
self.resource_conf.auth_container_id = self.container_id
def _write_pb_str(self, filepath, pb_obj): def _write_pb_str(self, filepath, pb_obj):
with open(filepath, "w") as fout: with open(filepath, "w") as fout:
...@@ -393,8 +419,8 @@ class Server(object): ...@@ -393,8 +419,8 @@ class Server(object):
if os.path.exists(tar_name): if os.path.exists(tar_name):
os.remove(tar_name) os.remove(tar_name)
raise SystemExit( raise SystemExit(
'Download failed, please check your network or permission of {}.'. 'Download failed, please check your network or permission of {}.'
format(self.module_path)) .format(self.module_path))
else: else:
try: try:
print('Decompressing files ..') print('Decompressing files ..')
...@@ -405,8 +431,8 @@ class Server(object): ...@@ -405,8 +431,8 @@ class Server(object):
if os.path.exists(exe_path): if os.path.exists(exe_path):
os.remove(exe_path) os.remove(exe_path)
raise SystemExit( raise SystemExit(
'Decompressing failed, please check your permission of {} or disk space left.'. 'Decompressing failed, please check your permission of {} or disk space left.'
format(self.module_path)) .format(self.module_path))
finally: finally:
os.remove(tar_name) os.remove(tar_name)
#release lock #release lock
......
...@@ -65,6 +65,11 @@ def start_gpu_card_model(index, gpuid, args): # pylint: disable=doc-string-miss ...@@ -65,6 +65,11 @@ def start_gpu_card_model(index, gpuid, args): # pylint: disable=doc-string-miss
server.set_ir_optimize(ir_optim) server.set_ir_optimize(ir_optim)
server.set_max_body_size(max_body_size) server.set_max_body_size(max_body_size)
if args.product_name != None:
server.set_product_name(args.product_name)
if args.container_id != None:
server.set_container_id(args.container_id)
server.load_model_config(model) server.load_model_config(model)
server.prepare_server(workdir=workdir, port=port, device=device) server.prepare_server(workdir=workdir, port=port, device=device)
if gpuid >= 0: if gpuid >= 0:
...@@ -83,8 +88,8 @@ def start_multi_card(args): # pylint: disable=doc-string-missing ...@@ -83,8 +88,8 @@ def start_multi_card(args): # pylint: disable=doc-string-missing
for ids in gpus: for ids in gpus:
if int(ids) >= len(env_gpus): if int(ids) >= len(env_gpus):
print( print(
" Max index of gpu_ids out of range, the number of CUDA_VISIBLE_DEVICES is {}.". " Max index of gpu_ids out of range, the number of CUDA_VISIBLE_DEVICES is {}."
format(len(env_gpus))) .format(len(env_gpus)))
exit(-1) exit(-1)
else: else:
env_gpus = [] env_gpus = []
......
numpy>=1.12, <=1.16.4 ; python_version<"3.5" numpy>=1.12, <=1.16.4 ; python_version<"3.5"
shapely==1.7.0
wheel>=0.34.0, <0.35.0 wheel>=0.34.0, <0.35.0
setuptools>=44.1.0 setuptools>=44.1.0
opencv-python==4.2.0.32 opencv-python==4.2.0.32
......
...@@ -43,7 +43,7 @@ if '${PACK}' == 'ON': ...@@ -43,7 +43,7 @@ if '${PACK}' == 'ON':
REQUIRED_PACKAGES = [ REQUIRED_PACKAGES = [
'six >= 1.10.0', 'sentencepiece', 'opencv-python<=4.2.0.32', 'pillow', 'six >= 1.10.0', 'sentencepiece', 'opencv-python<=4.2.0.32', 'pillow',
'shapely', 'pyclipper' 'shapely<=1.6.1', 'pyclipper'
] ]
packages=['paddle_serving_app', packages=['paddle_serving_app',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册