提交 1bb982eb 编写于 作者: W wangguibao

Codestyle check

Change-Id: Iff5fdd63bb96832212d31bfa114f663f1190c678
上级 3f2c56e7
......@@ -70,7 +70,8 @@ include(generic)
include(paddlepaddle)
include(external/opencv)
include_directories("${PADDLE_SERVING_SOURCE_DIR}")
include_directories(${PADDLE_SERVING_SOURCE_DIR})
include_directories(${PADDLE_SERVING_BINARY_DIR})
set(EXTERNAL_LIBS
gflags
......@@ -94,6 +95,3 @@ add_subdirectory(predictor)
add_subdirectory(inferencer-fluid-cpu)
add_subdirectory(serving)
add_subdirectory(sdk-cpp)
......@@ -336,4 +336,3 @@ Paddle serving框架为策略工程师提供以下三层面的功能性扩展:
{:name => 'main', :conf => 'predictor_valid.conf', :target => 'port'}, // valid工具向这个端口发送测试请求,确保服务已正常启动
]
```
......@@ -11,6 +11,9 @@ list(APPEND configure_srcs ${CMAKE_CURRENT_LIST_DIR}/src/configure_parser.cpp)
add_library(configure ${configure_srcs})
add_dependencies(configure brpc)
target_include_directories(configure PUBLIC
${CMAKE_CURRENT_LIST_DIR}/
)
add_executable(test_configure
${CMAKE_CURRENT_LIST_DIR}/tests/test_configure.cpp)
......@@ -23,4 +26,3 @@ target_link_libraries(test_configure configure protobuf)
install(TARGETS configure
ARCHIVE DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/lib
)
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <google/protobuf/message.h>
#include <string>
namespace baidu {
namespace paddle_serving {
......@@ -12,6 +27,6 @@ int write_proto_conf(google::protobuf::Message *message,
const std::string &output_path,
const std::string &output_file);
} // configure
} // paddle_serving
} // baidu
} // namespace configure
} // namespace paddle_serving
} // namespace baidu
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
package baidu.paddle_serving.configure;
message SigmoidConf {
......@@ -8,4 +22,3 @@ message SigmoidConf {
required float exp_max_input = 4;
required float exp_min_input = 5;
};
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
package baidu.paddle_serving.configure;
message ConnectionConf {
......@@ -25,7 +39,7 @@ message RpcParameter {
required uint32 max_channel_per_request = 4;
};
message SplitConf{
message SplitConf {
optional string split_tag_name = 1;
optional string tag_candidates = 2;
};
......@@ -39,9 +53,7 @@ message VariantConf {
optional string variant_router = 6;
};
message WeightedRandomRenderConf {
required string variant_weight_list = 1;
};
message WeightedRandomRenderConf { required string variant_weight_list = 1; };
message Predictor {
required string name = 1;
......@@ -56,4 +68,3 @@ message SDKConf {
required VariantConf default_variant_conf = 1;
repeated Predictor predictors = 2;
};
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
package baidu.paddle_serving.configure;
message EngineDesc {
......@@ -15,9 +29,7 @@ message EngineDesc {
};
// model_toolkit conf
message ModelToolkitConf {
repeated EngineDesc engines = 1;
};
message ModelToolkitConf { repeated EngineDesc engines = 1; };
// reource conf
message ResourceConf {
......@@ -46,10 +58,7 @@ message Workflow {
};
// Workflow conf
message WorkflowConf {
repeated Workflow workflows = 1;
}
message WorkflowConf { repeated Workflow workflows = 1; }
// request_field_key: specifies use which request field as mapping key (see
// request_field_key in InferService below)
......@@ -68,7 +77,7 @@ message InferService {
required string name = 1;
optional string merger = 2;
optional bool enable_map_request_to_workflow = 3 [default = false];
optional bool enable_map_request_to_workflow = 3 [ default = false ];
// If enable_map_request_to_workfow = true
//
......
#include <sys/types.h>
#include <sys/stat.h>
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "include/configure_parser.h"
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <fstream>
#include "butil/logging.h"
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/text_format.h"
namespace baidu {
namespace paddle_serving {
......@@ -13,8 +28,7 @@ namespace configure {
int read_proto_conf(const std::string &conf_path,
const std::string &conf_file,
google::protobuf::Message *conf)
{
google::protobuf::Message *conf) {
std::string file_str = conf_path + "/" + conf_file;
int fd = open(file_str.c_str(), O_RDONLY);
if (fd == -1) {
......@@ -34,8 +48,7 @@ int read_proto_conf(const std::string &conf_path,
int write_proto_conf(google::protobuf::Message *message,
const std::string &output_path,
const std::string &output_file)
{
const std::string &output_file) {
std::string binary_str;
google::protobuf::TextFormat::PrintToString(*message, &binary_str);
......@@ -46,13 +59,13 @@ int write_proto_conf(google::protobuf::Message *message,
return -1;
}
fout_bin.write((char *)binary_str.c_str(), binary_str.size());
fout_bin.write(binary_str.c_str(), binary_str.size());
fout_bin.close();
return 0;
}
} // configure
} // paddle_serving
} // baidu
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace configure
} // namespace paddle_serving
} // namespace baidu
/* vim: set expandtab ts=2 sw=2 sts=2 tw=100: */
#include <sys/types.h>
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <iostream>
#include "server_configure.pb.h"
#include "sdk_configure.pb.h"
#include "inferencer_configure.pb.h"
#include "configure_parser.h"
#include "configure/inferencer_configure.pb.h"
#include "configure/sdk_configure.pb.h"
#include "configure/server_configure.pb.h"
#include "include/configure_parser.h"
using baidu::paddle_serving::configure::EngineDesc;
using baidu::paddle_serving::configure::ModelToolkitConf;
......@@ -30,16 +44,15 @@ using baidu::paddle_serving::configure::SDKConf;
using baidu::paddle_serving::configure::SigmoidConf;
const std::string output_dir = "./conf/";
const std::string model_toolkit_conf_file = "model_toolkit.prototxt";
const std::string resource_conf_file = "resource.prototxt";
const std::string workflow_conf_file = "workflow.prototxt";
const std::string service_conf_file = "service.prototxt";
const std::string sdk_conf_file = "predictors.prototxt";
const std::string sigmoid_conf_file = "inferencer.prototxt";
const char *output_dir = "./conf/";
const char *model_toolkit_conf_file = "model_toolkit.prototxt";
const char *resource_conf_file = "resource.prototxt";
const char *workflow_conf_file = "workflow.prototxt";
const char *service_conf_file = "service.prototxt";
const char *sdk_conf_file = "predictors.prototxt";
const char *sigmoid_conf_file = "inferencer.prototxt";
int test_write_conf()
{
int test_write_conf() {
// model_toolkit conf
ModelToolkitConf model_toolkit_conf;
......@@ -54,7 +67,8 @@ int test_write_conf()
engine->set_batch_infer_size(0);
engine->set_enable_batch_align(0);
int ret = baidu::paddle_serving::configure::write_proto_conf(&model_toolkit_conf, output_dir, model_toolkit_conf_file);
int ret = baidu::paddle_serving::configure::write_proto_conf(
&model_toolkit_conf, output_dir, model_toolkit_conf_file);
if (ret != 0) {
return ret;
}
......@@ -63,7 +77,8 @@ int test_write_conf()
ResourceConf resource_conf;
resource_conf.set_model_toolkit_path(output_dir);
resource_conf.set_model_toolkit_file("model_toolkit.prototxt");
ret = baidu::paddle_serving::configure::write_proto_conf(&resource_conf, output_dir, resource_conf_file);
ret = baidu::paddle_serving::configure::write_proto_conf(
&resource_conf, output_dir, resource_conf_file);
if (ret != 0) {
return ret;
}
......@@ -100,7 +115,8 @@ int test_write_conf()
dag_node->set_name("dense_op");
dag_node->set_type("DenseOp");
ret = baidu::paddle_serving::configure::write_proto_conf(&workflow_conf, output_dir, workflow_conf_file);
ret = baidu::paddle_serving::configure::write_proto_conf(
&workflow_conf, output_dir, workflow_conf_file);
if (ret != 0) {
return ret;
}
......@@ -116,7 +132,8 @@ int test_write_conf()
infer_service->set_name("BuiltinDenseFormatService");
infer_service->add_workflows("workflow2");
ret = baidu::paddle_serving::configure::write_proto_conf(&infer_service_conf, output_dir, service_conf_file);
ret = baidu::paddle_serving::configure::write_proto_conf(
&infer_service_conf, output_dir, service_conf_file);
if (ret != 0) {
return ret;
}
......@@ -125,7 +142,8 @@ int test_write_conf()
VariantConf *default_variant_conf = sdk_conf.mutable_default_variant_conf();
default_variant_conf->set_tag("default");
ConnectionConf *connection_conf = default_variant_conf->mutable_connection_conf();
ConnectionConf *connection_conf =
default_variant_conf->mutable_connection_conf();
connection_conf->set_connect_timeout_ms(2000);
connection_conf->set_rpc_timeout_ms(20000);
connection_conf->set_connect_retry_count(2);
......@@ -146,10 +164,13 @@ int test_write_conf()
Predictor *predictor = sdk_conf.add_predictors();
predictor->set_name("ximage");
predictor->set_service_name("baidu.paddle_serving.predictor.image_classification.ImageClassifyService");
predictor->set_service_name(
"baidu.paddle_serving.predictor.image_classification."
"ImageClassifyService");
predictor->set_endpoint_router("WeightedRandomRender");
WeightedRandomRenderConf *weighted_random_render_conf = predictor->mutable_weighted_random_render_conf();
WeightedRandomRenderConf *weighted_random_render_conf =
predictor->mutable_weighted_random_render_conf();
weighted_random_render_conf->set_variant_weight_list("50");
VariantConf *variant_conf = predictor->add_variants();
......@@ -157,7 +178,8 @@ int test_write_conf()
naming_conf = variant_conf->mutable_naming_conf();
naming_conf->set_cluster("list://127.0.0.1:8010");
ret = baidu::paddle_serving::configure::write_proto_conf(&sdk_conf, output_dir, sdk_conf_file);
ret = baidu::paddle_serving::configure::write_proto_conf(
&sdk_conf, output_dir, sdk_conf_file);
if (ret != 0) {
return ret;
}
......@@ -169,7 +191,8 @@ int test_write_conf()
sigmoid_conf.set_exp_max_input(0.75);
sigmoid_conf.set_exp_min_input(0.25);
ret = baidu::paddle_serving::configure::write_proto_conf(&sigmoid_conf, output_dir, sigmoid_conf_file);
ret = baidu::paddle_serving::configure::write_proto_conf(
&sigmoid_conf, output_dir, sigmoid_conf_file);
if (ret != 0) {
return ret;
}
......@@ -177,47 +200,52 @@ int test_write_conf()
return 0;
}
int test_read_conf()
{
int test_read_conf() {
int ret = 0;
ModelToolkitConf model_toolkit_conf;
ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, model_toolkit_conf_file, &model_toolkit_conf);
ret = baidu::paddle_serving::configure::read_proto_conf(
output_dir, model_toolkit_conf_file, &model_toolkit_conf);
if (ret != 0) {
std::cout << "Read conf fail: " << model_toolkit_conf_file << std::endl;
return -1;
}
ResourceConf resource_conf;
ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, resource_conf_file, &resource_conf);
ret = baidu::paddle_serving::configure::read_proto_conf(
output_dir, resource_conf_file, &resource_conf);
if (ret != 0) {
std::cout << "Read conf fail: " << resource_conf_file << std::endl;
return -1;
}
WorkflowConf workflow_conf;
ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, workflow_conf_file, &workflow_conf);
ret = baidu::paddle_serving::configure::read_proto_conf(
output_dir, workflow_conf_file, &workflow_conf);
if (ret != 0) {
std::cout << "Read conf fail: " << workflow_conf_file << std::endl;
return -1;
}
InferServiceConf service_conf;
ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, service_conf_file, &service_conf);
ret = baidu::paddle_serving::configure::read_proto_conf(
output_dir, service_conf_file, &service_conf);
if (ret != 0) {
std::cout << "Read conf fail: " << service_conf_file << std::endl;
return -1;
}
SDKConf sdk_conf;
ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, sdk_conf_file, &sdk_conf);
ret = baidu::paddle_serving::configure::read_proto_conf(
output_dir, sdk_conf_file, &sdk_conf);
if (ret != 0) {
std::cout << "Read conf fail: " << sdk_conf_file << std::endl;
return -1;
}
SigmoidConf sigmoid_conf;
ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, sigmoid_conf_file, &sigmoid_conf);
ret = baidu::paddle_serving::configure::read_proto_conf(
output_dir, sigmoid_conf_file, &sigmoid_conf);
if (ret != 0) {
std::cout << "Read conf fail: " << sdk_conf_file << std::endl;
return -1;
......@@ -226,11 +254,10 @@ int test_read_conf()
return 0;
}
int main()
{
int main() {
int ret = 0;
struct stat stat_buf;
if (stat(output_dir.c_str(), &stat_buf) != 0) {
if (stat(output_dir, &stat_buf) != 0) {
int ret = mkdir("./conf", 0777);
if (ret != 0) {
std::cout << "mkdir ./conf fail" << std::endl;
......
......@@ -10,4 +10,3 @@ target_link_libraries(fluid_cpu_engine pdserving paddle_fluid -liomp5 -lmklml_in
install(TARGETS fluid_cpu_engine
ARCHIVE DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/lib
)
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <pthread.h>
#include <fstream>
#include <map>
#include <string>
#include <vector>
#include <map>
#include <fstream>
#include "configure/include/configure_parser.h"
#include "configure/inferencer_configure.pb.h"
#include "framework/infer.h"
#include "paddle/fluid/inference/paddle_inference_api.h"
#include "inferencer_configure.pb.h"
#include "configure_parser.h"
namespace baidu {
namespace paddle_serving {
......@@ -17,59 +31,49 @@ namespace fluid_cpu {
using configure::SigmoidConf;
class AutoLock {
public:
AutoLock(pthread_mutex_t& mutex) : _mut(mutex){
public:
explicit AutoLock(pthread_mutex_t& mutex) : _mut(mutex) {
pthread_mutex_lock(&mutex);
}
~AutoLock() {
pthread_mutex_unlock(&_mut);
}
~AutoLock() { pthread_mutex_unlock(&_mut); }
private:
private:
pthread_mutex_t& _mut;
};
class GlobalPaddleCreateMutex {
public:
pthread_mutex_t& mutex() {
return _mut;
}
public:
pthread_mutex_t& mutex() { return _mut; }
static pthread_mutex_t& instance() {
static GlobalPaddleCreateMutex gmutex;
return gmutex.mutex();
}
private:
GlobalPaddleCreateMutex() {
pthread_mutex_init(&_mut, NULL);
}
private:
GlobalPaddleCreateMutex() { pthread_mutex_init(&_mut, NULL); }
pthread_mutex_t _mut;
};
class GlobalSigmoidCreateMutex {
public:
pthread_mutex_t& mutex() {
return _mut;
}
public:
pthread_mutex_t& mutex() { return _mut; }
static pthread_mutex_t& instance() {
static GlobalSigmoidCreateMutex gmutex;
return gmutex.mutex();
}
private:
GlobalSigmoidCreateMutex() {
pthread_mutex_init(&_mut, NULL);
}
private:
GlobalSigmoidCreateMutex() { pthread_mutex_init(&_mut, NULL); }
pthread_mutex_t _mut;
};
// data interface
class FluidFamilyCore {
public:
public:
virtual ~FluidFamilyCore() {}
virtual bool Run(const void* in_data, void* out_data) {
if (!_core->Run(*(std::vector<paddle::PaddleTensor>*)in_data,
......@@ -88,7 +92,8 @@ public:
LOG(ERROR) << "origin paddle Predictor is null.";
return -1;
}
paddle::PaddlePredictor* p_predictor = (paddle::PaddlePredictor*)origin_core;
paddle::PaddlePredictor* p_predictor =
(paddle::PaddlePredictor*)origin_core;
_core = p_predictor->Clone();
if (_core.get() == NULL) {
LOG(ERROR) << "fail to clone paddle predictor: " << origin_core;
......@@ -97,17 +102,15 @@ public:
return 0;
}
virtual void* get() {
return _core.get();
}
virtual void* get() { return _core.get(); }
protected:
protected:
std::unique_ptr<paddle::PaddlePredictor> _core;
};
// infer interface
class FluidCpuAnalysisCore : public FluidFamilyCore {
public:
public:
int create(const std::string& data_path) {
if (access(data_path.c_str(), F_OK) == -1) {
LOG(ERROR) << "create paddle predictor failed, path not exits: "
......@@ -122,21 +125,20 @@ public:
analysis_config.device = 0;
analysis_config.specify_input_name = true;
AutoLock lock(GlobalPaddleCreateMutex::instance());
_core = paddle::CreatePaddlePredictor<
paddle::contrib::AnalysisConfig>(analysis_config);
_core = paddle::CreatePaddlePredictor<paddle::contrib::AnalysisConfig>(
analysis_config);
if (NULL == _core.get()) {
LOG(ERROR) << "create paddle predictor failed, path: "
<< data_path;
LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: "<< data_path;
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
class FluidCpuNativeCore : public FluidFamilyCore {
public:
public:
int create(const std::string& data_path) {
if (access(data_path.c_str(), F_OK) == -1) {
LOG(ERROR) << "create paddle predictor failed, path not exits: "
......@@ -150,21 +152,21 @@ public:
native_config.use_gpu = false;
native_config.device = 0;
AutoLock lock(GlobalPaddleCreateMutex::instance());
_core = paddle::CreatePaddlePredictor<
paddle::NativeConfig, paddle::PaddleEngineKind::kNative>(native_config);
_core = paddle::CreatePaddlePredictor<paddle::NativeConfig,
paddle::PaddleEngineKind::kNative>(
native_config);
if (NULL == _core.get()) {
LOG(ERROR) << "create paddle predictor failed, path: "
<< data_path;
LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: "<< data_path;
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
class FluidCpuAnalysisDirCore : public FluidFamilyCore {
public:
public:
int create(const std::string& data_path) {
if (access(data_path.c_str(), F_OK) == -1) {
LOG(ERROR) << "create paddle predictor failed, path not exits: "
......@@ -178,22 +180,20 @@ public:
analysis_config.device = 0;
analysis_config.specify_input_name = true;
AutoLock lock(GlobalPaddleCreateMutex::instance());
_core = paddle::CreatePaddlePredictor<
paddle::contrib::AnalysisConfig>(analysis_config);
_core = paddle::CreatePaddlePredictor<paddle::contrib::AnalysisConfig>(
analysis_config);
if (NULL == _core.get()) {
LOG(ERROR) << "create paddle predictor failed, path: "
<< data_path;
LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: "<< data_path;
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
class FluidCpuNativeDirCore : public FluidFamilyCore {
public:
public:
int create(const std::string& data_path) {
if (access(data_path.c_str(), F_OK) == -1) {
LOG(ERROR) << "create paddle predictor failed, path not exits: "
......@@ -206,22 +206,21 @@ public:
native_config.use_gpu = false;
native_config.device = 0;
AutoLock lock(GlobalPaddleCreateMutex::instance());
_core = paddle::CreatePaddlePredictor<
paddle::NativeConfig, paddle::PaddleEngineKind::kNative>(native_config);
_core = paddle::CreatePaddlePredictor<paddle::NativeConfig,
paddle::PaddleEngineKind::kNative>(
native_config);
if (NULL == _core.get()) {
LOG(ERROR) << "create paddle predictor failed, path: "
<< data_path;
LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: "<< data_path;
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
class Parameter {
public:
public:
Parameter() : _row(0), _col(0), _params(NULL) {}
~Parameter() {
LOG(INFO) << "before destroy Parameter, file_name[" << _file_name << "]";
......@@ -233,7 +232,7 @@ public:
_file_name = file_name;
_row = row;
_col = col;
_params = (float*)malloc(_row * _col * sizeof(float));
_params = reinterpret_cast<float*>(malloc(_row * _col * sizeof(float)));
if (_params == NULL) {
LOG(ERROR) << "Load " << _file_name << " malloc error.";
return -1;
......@@ -294,7 +293,7 @@ public:
return 0;
}
public:
public:
std::string _file_name;
int _row;
int _col;
......@@ -302,40 +301,44 @@ public:
};
class SigmoidModel {
public:
~SigmoidModel() {
}
int load(const char* sigmoid_w_file, const char* sigmoid_b_file,
float exp_max, float exp_min) {
public:
~SigmoidModel() {}
int load(const char* sigmoid_w_file,
const char* sigmoid_b_file,
float exp_max,
float exp_min) {
AutoLock lock(GlobalSigmoidCreateMutex::instance());
if (0 != _sigmoid_w.init(2, 1, sigmoid_w_file) || 0 != _sigmoid_w.load()) {
LOG(ERROR) << "load params sigmoid_w failed.";
return -1;
}
LOG(WARNING) << "load sigmoid_w [" << _sigmoid_w._params[0]
<< "] [" << _sigmoid_w._params[1] << "].";
LOG(WARNING) << "load sigmoid_w [" << _sigmoid_w._params[0] << "] ["
<< _sigmoid_w._params[1] << "].";
if (0 != _sigmoid_b.init(2, 1, sigmoid_b_file) || 0 != _sigmoid_b.load()) {
LOG(ERROR) << "load params sigmoid_b failed.";
return -1;
}
LOG(WARNING) << "load sigmoid_b [" << _sigmoid_b._params[0]
<< "] [" << _sigmoid_b._params[1] << "].";
LOG(WARNING) << "load sigmoid_b [" << _sigmoid_b._params[0] << "] ["
<< _sigmoid_b._params[1] << "].";
_exp_max_input = exp_max;
_exp_min_input = exp_min;
return 0;
}
int softmax(float x, double& o) {
int softmax(float x, double& o) { // NOLINT
float _y0 = x * _sigmoid_w._params[0] + _sigmoid_b._params[0];
float _y1 = x * _sigmoid_w._params[1] + _sigmoid_b._params[1];
_y0 = (_y0 > _exp_max_input) ? _exp_max_input
_y0 = (_y0 > _exp_max_input)
? _exp_max_input
: ((_y0 < _exp_min_input) ? _exp_min_input : _y0);
_y1 = (_y1 > _exp_max_input) ? _exp_max_input
_y1 = (_y1 > _exp_max_input)
? _exp_max_input
: ((_y1 < _exp_min_input) ? _exp_min_input : _y1);
o = 1.0f / (1.0f + exp(_y0 - _y1));
return 0;
}
public:
public:
Parameter _sigmoid_w;
Parameter _sigmoid_b;
float _exp_max_input;
......@@ -343,10 +346,10 @@ public:
};
class SigmoidFluidModel {
public:
int softmax(float x, double& o) {
public:
int softmax(float x, double& o) { // NOLINT
return _sigmoid_core->softmax(x, o);
}
} // NOLINT
std::unique_ptr<SigmoidFluidModel> Clone() {
std::unique_ptr<SigmoidFluidModel> clone_model;
......@@ -356,16 +359,16 @@ public:
return std::move(clone_model);
}
public:
public:
std::unique_ptr<paddle::PaddlePredictor> _fluid_core;
std::shared_ptr<SigmoidModel> _sigmoid_core;
};
class FluidCpuWithSigmoidCore : public FluidFamilyCore {
public:
virtual ~FluidCpuWithSigmoidCore() {
}
public:
public:
virtual ~FluidCpuWithSigmoidCore() {}
public:
int create(const std::string& model_path) {
size_t pos = model_path.find_last_of("/\\");
std::string conf_path = model_path.substr(0, pos);
......@@ -391,7 +394,8 @@ public:
_core->_sigmoid_core.reset(new SigmoidModel);
LOG(INFO) << "create sigmoid core[" << _core->_sigmoid_core.get()
<< "], use count[" << _core->_sigmoid_core.use_count() << "].";
ret = _core->_sigmoid_core->load(sigmoid_w_file, sigmoid_b_file, exp_max, exp_min);
ret = _core->_sigmoid_core->load(
sigmoid_w_file, sigmoid_b_file, exp_max, exp_min);
if (ret < 0) {
LOG(ERROR) << "fail to load sigmoid model.";
return -1;
......@@ -400,7 +404,8 @@ public:
}
virtual bool Run(const void* in_data, void* out_data) {
if (!_core->_fluid_core->Run(*(std::vector<paddle::PaddleTensor>*)in_data,
if (!_core->_fluid_core->Run(
*(std::vector<paddle::PaddleTensor>*)in_data,
(std::vector<paddle::PaddleTensor>*)out_data)) {
LOG(ERROR) << "Failed call Run with paddle predictor";
return false;
......@@ -424,22 +429,20 @@ public:
return 0;
}
virtual SigmoidFluidModel* get() {
return _core.get();
}
virtual SigmoidFluidModel* get() { return _core.get(); }
virtual int load_fluid_model(const std::string& data_path) = 0;
int softmax(float x, double& o) {
int softmax(float x, double& o) { // NOLINT
return _core->_sigmoid_core->softmax(x, o);
}
protected:
protected:
std::unique_ptr<SigmoidFluidModel> _core;
};
class FluidCpuNativeDirWithSigmoidCore : public FluidCpuWithSigmoidCore {
public:
public:
int load_fluid_model(const std::string& data_path) {
if (access(data_path.c_str(), F_OK) == -1) {
LOG(ERROR) << "create paddle predictor failed, path not exits: "
......@@ -452,22 +455,22 @@ public:
native_config.use_gpu = false;
native_config.device = 0;
AutoLock lock(GlobalPaddleCreateMutex::instance());
_core->_fluid_core = paddle::CreatePaddlePredictor<
paddle::NativeConfig, paddle::PaddleEngineKind::kNative>(native_config);
_core->_fluid_core =
paddle::CreatePaddlePredictor<paddle::NativeConfig,
paddle::PaddleEngineKind::kNative>(
native_config);
if (NULL == _core.get()) {
LOG(ERROR) << "create paddle predictor failed, path: "
<< data_path;
LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: "<< data_path;
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
class FluidCpuAnalysisDirWithSigmoidCore : public FluidCpuWithSigmoidCore {
public:
public:
int load_fluid_model(const std::string& data_path) {
if (access(data_path.c_str(), F_OK) == -1) {
LOG(ERROR) << "create paddle predictor failed, path not exits: "
......@@ -481,15 +484,15 @@ public:
analysis_config.device = 0;
analysis_config.specify_input_name = true;
AutoLock lock(GlobalPaddleCreateMutex::instance());
_core->_fluid_core = paddle::CreatePaddlePredictor<
paddle::contrib::AnalysisConfig>(analysis_config);
_core->_fluid_core =
paddle::CreatePaddlePredictor<paddle::contrib::AnalysisConfig>(
analysis_config);
if (NULL == _core.get()) {
LOG(ERROR) << "create paddle predictor failed, path: "
<< data_path;
LOG(ERROR) << "create paddle predictor failed, path: " << data_path;
return -1;
}
LOG(WARNING) << "create paddle predictor sucess, path: "<< data_path;
LOG(WARNING) << "create paddle predictor sucess, path: " << data_path;
return 0;
}
};
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "inferencer-fluid-cpu/include/fluid_cpu_engine.h"
#include "framework/factory.h"
#include "fluid_cpu_engine.h"
namespace baidu {
namespace paddle_serving {
......@@ -7,27 +21,36 @@ namespace fluid_cpu {
REGIST_FACTORY_OBJECT_IMPL_WITH_NAME(
::baidu::paddle_serving::predictor::FluidInferEngine<FluidCpuAnalysisCore>,
::baidu::paddle_serving::predictor::InferEngine, "FLUID_CPU_ANALYSIS");
::baidu::paddle_serving::predictor::InferEngine,
"FLUID_CPU_ANALYSIS");
REGIST_FACTORY_OBJECT_IMPL_WITH_NAME(
::baidu::paddle_serving::predictor::FluidInferEngine<FluidCpuAnalysisDirCore>,
::baidu::paddle_serving::predictor::InferEngine, "FLUID_CPU_ANALYSIS_DIR");
::baidu::paddle_serving::predictor::FluidInferEngine<
FluidCpuAnalysisDirCore>,
::baidu::paddle_serving::predictor::InferEngine,
"FLUID_CPU_ANALYSIS_DIR");
REGIST_FACTORY_OBJECT_IMPL_WITH_NAME(
::baidu::paddle_serving::predictor::FluidInferEngine<FluidCpuAnalysisDirWithSigmoidCore>,
::baidu::paddle_serving::predictor::InferEngine, "FLUID_CPU_ANALYSIS_DIR_SIGMOID");
::baidu::paddle_serving::predictor::FluidInferEngine<
FluidCpuAnalysisDirWithSigmoidCore>,
::baidu::paddle_serving::predictor::InferEngine,
"FLUID_CPU_ANALYSIS_DIR_SIGMOID");
REGIST_FACTORY_OBJECT_IMPL_WITH_NAME(
::baidu::paddle_serving::predictor::FluidInferEngine<FluidCpuNativeCore>,
::baidu::paddle_serving::predictor::InferEngine, "FLUID_CPU_NATIVE");
::baidu::paddle_serving::predictor::InferEngine,
"FLUID_CPU_NATIVE");
REGIST_FACTORY_OBJECT_IMPL_WITH_NAME(
::baidu::paddle_serving::predictor::FluidInferEngine<FluidCpuNativeDirCore>,
::baidu::paddle_serving::predictor::InferEngine, "FLUID_CPU_NATIVE_DIR");
::baidu::paddle_serving::predictor::InferEngine,
"FLUID_CPU_NATIVE_DIR");
REGIST_FACTORY_OBJECT_IMPL_WITH_NAME(
::baidu::paddle_serving::predictor::FluidInferEngine<FluidCpuNativeDirWithSigmoidCore>,
::baidu::paddle_serving::predictor::InferEngine, "FLUID_CPU_NATIVE_DIR_SIGMOID");
::baidu::paddle_serving::predictor::FluidInferEngine<
FluidCpuNativeDirWithSigmoidCore>,
::baidu::paddle_serving::predictor::InferEngine,
"FLUID_CPU_NATIVE_DIR_SIGMOID");
} // namespace fluid_cpu
} // namespace paddle_serving
......
......@@ -34,5 +34,3 @@ install(TARGETS pdserving pdcodegen
ARCHIVE DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/lib
LIBRARY DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/so
)
[TOC]
# 概述
PaddlePaddle是公司开源的机器学习框架,广泛支持各种深度学习模型的定制化开发;
Paddle cloud是基于PaddlePaddle框架实现的一整套云平台,对外提供全流程的AI开发平台,对内托管集团内各产品线的机器学习云服务。
Paddle serving是Paddle cloud的在线预测部分,与Paddle cloud模型训练环节无缝衔接,对外提供机器学习预测共有云服务,对内为公司各业务线提供统一的模型预测开发框架和云服务。
# Getting Started
## 运行示例
说明:Imagenet图像分类模型,默认采用CPU模式(GPU模式请修改BCLOUD配置项,并用Dockerfile构建运行环境,[Docker部署请参考Wiki](http://agroup.baidu.com/share/md/044f552e866f4078900be503784e2468))。
Step1:启动Server端:
```shell
git clone ssh://icode.baidu.com:8235/baidu/paddle-serving/serving ~/my_paddle_serving/baidu/paddle-serving/serving && cd ~/my_paddle_serving/baidu/paddle-serving/serving && bcloud build && ./output/bin/image_class &
```
Step2:启动Client端:
```shell
git clone ssh://icode.baidu.com:8235/baidu/paddle-serving/sdk-cpp ~/my_paddle_serving/baidu/paddle-serving/sdk-cpp && cd ~/my_paddle_serving/baidu/paddle-serving/sdk-cpp && bcloud build && ./output/bin/ximage && pkill image_class
```
## 示例说明
### 预测接口定义
```c++
syntax="proto2";
package baidu.paddle_serving.predictor.image_class;
option cc_generic_services = true;
// x-image request相关(批量接口)
message XImageReqInstance {
required bytes image_binary = 1;
required uint32 image_length = 2;
};
message Request {
repeated XImageReqInstance instances = 1;
};
// x-image response相关(批量接口)
message DensePrediction {
repeated float categories = 1;
};
message ClassResponse {
repeated DensePrediction predictions = 1;
};
message XImageResInstance {
required string response_json = 1;
};
message Response {
// Each json string is serialized from ClassResponse
repeated XImageResInstance predictions = 1;
};
// Service/method相关
service ImageClassifyService {
rpc inference(Request) returns (Response);
rpc debug(Request) returns (Response);
};
```
### Server端实现
用户只需定制或配置以下三类信息的实现,即可快速搭建完整的Paddle-Serving预测模块。
#### 接口改造([proto目录](http://icode.baidu.com/repos/baidu/paddle-serving/serving/tree/master:proto/))
Server端需对预测接口作如下修改即可:
```c++
// 改动1:依赖paddle-serving option接口文件
import "pds_option.proto";
...
service ClassService {
rpc inference(Request) returns (Response);
rpc debug(Request) returns (Response);
// 改动2:打开generate_impl开关(以支持配置驱动)
option (pds.options).generate_impl = true;
};
```
#### 示例配置([conf目录](http://icode.baidu.com/repos/baidu/paddle-serving/serving/tree/master:conf/))
- gflags配置项
| name | 默认值 | 含义 |
|------|--------|------|
| workflow_path | ./conf | workflow配置目录名 |
|workflow_file|workflow.conf|workflow配置文件名|
|inferservice_path|./conf|service配置目录名|
|inferservice_file|service.conf|service配置文件名|
|logger_path|./conf|日志配置目录名|
|logger_file|log.conf|日志配置文件名|
|resource_path|./conf|资源管理器目录名|
|resource_file|resource.conf|资源管理器文件名|
|reload_interval_s|10|重载线程间隔时间(s)|
- 配置文件实例(Image图像分类demo)
```shell
# >>> service.conf
[@Service]
name: ImageClassifyService
@workflow: workflow_image_classification
# >>> workflow.conf
[@Workflow]
name: workflow_image_classification
path: ./conf
file: imagec_dag.conf
# >>> imagec_dag.conf
workflow_type: Sequence
[@Node]
name: image_reader_op
type: ImageReaderOp
[@Node]
name: image_classify_op
type: ImageClassifyOp
[.@Depend]
name: image_reader_op
mode: RO
[@Node]
name: write_json_op
type: WriteJsonOp
[.@Depend]
name: image_classify_op
mode: RO
# >>> resource.conf
model_manager_path: ./conf
model_manager_file: model_toolkit.conf
```
#### 定制Op算子([op目录](http://icode.baidu.com/repos/baidu/paddle-serving/serving/tree/master:op/))
- 预处理算子(ImageReaderOp):从Request中读取图像字节流,通过opencv解码,填充tensor对象并输出到channel;
- 预测调用算子(ImageClassifyOp):从ImageReaderOp的channel获得输入tensor,临时申请输出tensor,调用ModelToolkit进行预测,并将输出tensor写入channel
- 后处理算子(WriteJsonOp):从ImageClassifyop的channel获得输出tensor,将其序列化为json字符串,写入作为rpc的output;
### Client端实现
用户只需定制或配置以下三类信息,即可方便的接入预估请求,并在本地配置多套服务连接:
#### 接口改造([proto目录](http://icode.baidu.com/repos/baidu/paddle-serving/sdk-cpp/tree/master:proto))
Client端接口只需对预测接口作如下修改即可:
```c++
// 改动1:依赖paddle-serving option接口文件
import "pds_option.proto";
...
service ImageClassifyService {
rpc inference(Request) returns (Response);
rpc debug(Request) returns (Response);
// 改动2:打开generate_stub开关(以支持配置驱动)
option (pds.options).generate_stub = true;
};
```
#### 连接配置([conf目录](http://icode.baidu.com/repos/baidu/paddle-serving/sdk-cpp/tree/master:conf))
```shell
# predictions.conf
## 默认配置共享
[DefaultVariantInfo]
Tag : default
[.Connection]
ConnectTimeoutMicroSec : 200
ReadTimeoutMicroSec : 2000
WriteTimeoutMicroSec : 500
ConnectRetryCount : 2
MaxConnectionPerHost : 100
HedgeRequestTimeoutMicroSec : -1
HedgeFetchRetryCount : 2
BnsReloadIntervalSeconds : 10
ConnectionType : pooled
[.NamingInfo]
ClusterFilterStrategy : Default
LoadBalanceStrategy : la
[.RpcParameter]
# 0-NONE, 1-SNAPPY, 2-GZIP, 3-ZLIB, 4-LZ4
CompressType : 0
Protocol : baidu_std
MaxChannelPerRequest : 3
[@Predictor]
name : ximage
service_name : baidu.paddle_serving.predictor.image_class.ImageClassifyService
endpoint_router : WeightedRandomRender
[.WeightedRandomRender]
VariantWeightList : 30|70 # 30% vs 70% pvs
[.@VariantInfo]
Tag : var1 # 变体版本标识,提供上游辨识
[..NamingInfo]
Cluster : list://127.0.0.1:8010
[.@VariantInfo]
Tag : var2
[..NamingInfo]
Cluster : list://127.0.0.1:8011
```
#### 请求逻辑([demo/ximage.cpp](http://icode.baidu.com/repos/baidu/paddle-serving/sdk-cpp/blob/master:demo/ximage.cpp))
```c++
// 进程级初始化
assert(PredictorAPI::instance().create("./conf/predictions.conf") == 0);
// 线程级预测调用:
Request req;
// fill request
// ...
Response res;
Predictor* ximage = PredictorAPI::instance().fetch_predictor("ximage");
assert(ximage != NULL);
ximage->inference(req, res);
// parse response
// ...
assert(PredictorAPI::instance().free_predictor(ximage) == 0);
// 进程级销毁
assert(PredictorAPI::instance().destroy() == 0);
```
## 凤巢协议兼容
Paddle Serving由凤巢观星框架发展而来,而之前框架的通信协议是nshead+compack+idl,为方便新老接口的兼容,Paddle Serving的server和client均支持向后兼容:
- 老API访问新Server,为适配老观星客户端数据包格式,新Server需通过mcpack2pb生成能解析idl格式的pb对象,详见:[wtitleq server实现](http://icode.baidu.com/repos/baidu/paddle-serving/lr-model/tree/master)
- 新SDK访问老Server,为能够访问老观星server服务,SDK需通过mcpack2pb插件生成基于idl格式的序列化逻辑;详见:[wtitleq api实现](http://icode.baidu.com/repos/baidu/infinite-inference/as-wtitleq-demo/tree/master)
凤巢广告拆包支持:Paddle Serving的SDK-Cpp为用户提供了简单易用的拆包功能,通过修改proto/conf文件开启:
```c++
// interface.proto文件
message PredictorRequest {
message AdvRequest {
// 广告级别字段
repeated uint32 ideaid = 1;
repeated string title = 2;
}
// query级别字段
required uint64 sid = 1;
required string query = 2;
// ...
// 广告级别字段
repeated AdvRequest advs = 3 [(pds.pack_on)=true]; // 改动1:对advs字段进行拆包
}
// ...
service WtitleqService {
rpc ...
rpc ...
option (pds.options).package_size = 10; // 改动2:限制单包大小
}
```
[wtitleq sdk的proto实例](http://icode.baidu.com/repos/baidu/infinite-inference/as-wtitleq-demo/blob/master:proto/predictor_api.proto)
```bash
# predictions.conf文件
[@Predictor]
# ...
[.@VariantInfo]
#...
[..RpcParameter]
Protocol : itp # 改动3:修改rpc请求参数为itp协议
```
[wtitleq sdk的conf实例](http://icode.baidu.com/repos/baidu/infinite-inference/as-wtitleq-demo/blob/master:conf/predictors.conf)
# 框架简介
![图片](http://agroup-bos.cdn.bcebos.com/63a5076471e96a08124b89101e12c1a0ec7b642a)
- 基础框架:屏蔽一个RPC服务所需的所有元素,让用户只关注自己的业务算子的开发;
- 业务框架:基于Protobuf定制请求接口,基于有限DAG定制业务逻辑,并行化调度;
- 模型框架:CPU/FPGA/GPU等硬件异构,多模型间异步优先级调度,新引擎灵活扩展,配置化驱动;
- 用户接口:搭建服务=定义proto文件+实现/复用Op+撰写配置,支持sdk/http请求;
## 名词解释
- 预测引擎:对PaddlePaddle/Abacus/Tensorflow等各种推理计算Lib的封装,屏蔽预测模型动态Reload细节,对上层暴露统一的预测接口;
- 预测模型:由离线训练框架生成、在线预测引擎加载的数据文件或目录,以PaddleFluid模型为例,通常包括拓扑文件和参数文件;
- Op 算子:Paddle-serving对在线(预处理/后处理等)业务逻辑的最小粒度封装,框架提供OpWithChannel和OpWithChannelAndConf这两种常用的Op基类;框架默认实现通用Op算子;
- Node:由某个Op算子类结合参数配置组成的Op算子实例,也是Workflow中的一个执行单元;
- DAG/Workflow:由若干个相互依赖的Node组成,每个Node均可通过特定接口获得Request对象,节点Op通过依赖关系获得其前置Op的输出对象,最后一个Node的输出默认就是Response对象;
- Service:对一次pv的请求封装,可配置若干条Workflow,彼此之间复用当前PV的Request对象,然后各自并行/串行执行,最后将Response写入对应的输出slot中;一个Paddle-serving进程可配置多套Service接口,上游根据ServiceName决定当前访问的Service接口。
![图片](http://agroup-bos.cdn.bcebos.com/2e5e3cdcc9426d16e2090e64e7d33098ae5ad826)
## 主要功能
Paddle serving框架为策略工程师提供以下三层面的功能性扩展:
### 模型
- 预测引擎:集成PaddlePaddle、Abacus、Tensorrt、Anakin、Tensorflow等常用机器学习框架的预测Lib;
- 模型种类:支持PaddlePaddle(V1、V2、Fluid)、TensorrtUFF、Anakin、Tensorflow、Caffe等常见模型格式;
- 用户接口:支持模型加载、重载的配置化驱动,不同种类模型的预测接口完全一致;
- 模型调度:支持基于异步线程模型的多模型预估调度,实现异构资源的优先级调度;
### 业务
- 预测流程:通过有限DAG图描述一次预测从Request到Response的业务流程,节点Node是一个最小逻辑单元——OP;
- 预测逻辑:框架封装常用预处理、预测计算、后处理等常用OP,用户通过自定义OP算子实现特化处理逻辑;
### 服务
- RPC:底层通过Baidu-rpc封装网络交互,Server端可配置化启动多个独立Service,框架会搜集Service粒度的详细业务指标,并按照BVar接口对接到Noah等监控平台;
- SDK:基于Baidu-rpc的client进行封装,提供多下游连接管理、可扩展路由策略、可定制参数实验、自动分包等机制,支持同步、半同步、纯异步等交互模式,以及多种兼容协议,所有连接策略均通过配置驱动
# 平台简介
![图片](http://agroup-bos.cdn.bcebos.com/42a0e34a7c6b36976e3932639209fd823d8f25e0)
- [运维API](http://agroup.baidu.com/share/md/e582f543fb574e9b92445286955a976d)
- [预测API](http://agroup.baidu.com/share/md/eb91a51739514319844ceccdb331564c)
## 名词解释
- 用户(User):云平台注册用户,可基于平台Dashboard对账户下的端点信息进行增、删、查、改;
- 端点(Endpoit):对一个预测需求的逻辑抽象,通常包含一到多个服务变体,以方便多版本模型管理;
- 变体(Variant):一套同质化的Paddle-serving集群服务,每个实例起一个Paddle-serving进程;
- 实验(A/B Test):支持变体实验和参数化实验两种模式,变体实验根据Endpoint所属变体流量百分比实现流量随机抽样;参数化实验通过对pv绑定实验参数、由Paddle-serving进程解析参数、选择不同的代码分支进行实验;
## 主要功能
在公有云落地场景为Infinite(天衍)云平台,主要为策略工程师提供以下三方面的全流程托管:
- 统一接入代理:提供代理服务,通过zk和云平台实时同步元信息,支持多模型版本管理和A/B测试路由策略,提供统一入口和标准预测API;
- 自动化部署:对接K8S/Opera等常见PaaS部署平台,支持服务的一键部署、回滚、下线等运维操作,支持endpoint/variant/model等维度的资源管理;
- 可视化运维:对接console、notebook、dashboard等前端工具和页面,满足可视化运维需求;
# 设计文档
- [总体设计文档](http://agroup.baidu.com/paddleserving/view/office/895070)
- [框架详设文档](http://agroup.baidu.com:8964/static/a3/e40876e464ba08ae5de14aa7710cf326456751.pdf?filename=PaddleServing%E6%9C%8D%E5%8A%A1%E6%A1%86%E6%9E%B6%E8%AF%A6%E7%BB%86%E8%AE%BE%E8%AE%A1%E6%96%87%E6%A1%A3v0_1.pdf)
- [平台详设文档](http://agroup.baidu.com/share/office/042a0941579e49adb8c255c8b5e92d51)
# FAQ
1. 如何修改端口配置?
- 使用该框架搭建的服务需要申请一个端口,可以通过以下方式修改端口号:
- 如果在inferservice_file里指定了port:xxx,那么就去申请该端口号;
- 否则,如果在gflags.conf里指定了--port:xxx,那就去申请该端口号;
- 否则,使用程序里指定的默认端口号:8010。
2. 如何在部署的时候配置动态端口?
- 如果使用FCCI部署协议(凤巢检索端内部的部署协议),需要(1)通过inferservice_file指定端口号;(2)修改[Rakefile.opera](http://wiki.baidu.com/pages/viewpage.action?pageId=399979183#id-%E4%BB%8E%E9%9B%B6%E5%BC%80%E5%A7%8B%E5%86%99production-%E7%BC%96%E5%86%99Rakefile)的dynamic_port_config配置
- `@dynamic_port_config为动态端口配置,向Opera申请名为:name的动态端口,其端口号会被写到:conf文件中的:target配置项。`例子如下:
```
@dynamic_port_config = [
{:name => 'main', :conf => 'framework/service.conf', :target => 'port'}, // 部署时自动向Opera申请端口,服务将会监听这个端口
{:name => 'main', :conf => 'predictor_valid.conf', :target => 'port'}, // valid工具向这个端口发送测试请求,确保服务已正常启动
]
```
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "common/constant.h"
namespace baidu {
......@@ -16,16 +30,21 @@ DEFINE_string(logger_path, "./conf", "");
DEFINE_string(logger_file, "log.conf", "");
DEFINE_string(resource_path, "./conf", "");
DEFINE_string(resource_file, "resource.prototxt", "");
DEFINE_int32(max_concurrency, 0, "Limit of request processing in parallel, 0: unlimited");
DEFINE_int32(num_threads, 0, "Number of pthreads that server runs on, not change if this value <= 0");
DEFINE_int32(max_concurrency,
0,
"Limit of request processing in parallel, 0: unlimited");
DEFINE_int32(
num_threads,
0,
"Number of pthreads that server runs on, not change if this value <= 0");
DEFINE_int32(reload_interval_s, 10, "");
DEFINE_bool(enable_model_toolkit, false, "enable model toolkit");
DEFINE_string(enable_protocol_list, "baidu_std", "set protocol list");
const char* START_OP_NAME = "startup_op";
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
// Baidurpc
BAIDU_REGISTER_ERRNO(baidu::paddle_serving::predictor::ERR_INTERNAL_FAILURE,
......
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_CONSTANT_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_CONSTANT_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "common/inner_common.h"
namespace baidu {
......@@ -52,8 +63,6 @@ static const size_t MAX_WORKFLOW_NUM_IN_ONE_SERVICE = 20;
static const uint32_t DEFAULT_CACHE_CAPACITY = 10000;
static const uint32_t DEFAULT_CACHE_UNITSIZE = 8192;
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_INNER_COMMON_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_INNER_COMMON_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdint.h>
#include <error.h>
#include <getopt.h>
#include <pthread.h>
#include <stdint.h>
#include <stdlib.h>
#include <strings.h>
#include <getopt.h>
#include <sys/types.h>
#include <unistd.h>
#include <typeinfo>
#include <google/protobuf/text_format.h>
#include <boost/unordered_map.hpp>
#include <boost/function.hpp>
#include <boost/algorithm/string.hpp> // for boost::split&trim
#include "boost/algorithm/string.hpp" // for boost::split&trim
#include "boost/function.hpp"
#include "boost/unordered_map.hpp"
#include "google/protobuf/text_format.h"
#include <gflags/gflags.h>
#include "gflags/gflags.h"
#include <butil/logging.h>
#include <butil/time.h>
#include <butil/object_pool.h>
#include <brpc/channel.h>
#include <brpc/server.h>
#include <brpc/policy/giano_authenticator.h>
#include <bthread/bthread.h>
#include <error.h>
#include "brpc/channel.h"
#include "brpc/policy/giano_authenticator.h"
#include "brpc/server.h"
#include "bthread/bthread.h"
#include "butil/logging.h"
#include "butil/object_pool.h"
#include "butil/time.h"
#include "server_configure.pb.h"
#include "configure_parser.h"
#include "configure/include/configure_parser.h"
#include "configure/server_configure.pb.h"
#include "common/utils.h"
#include "common/types.h"
#include "common/constant.h"
#endif
#include "common/types.h"
#include "common/utils.h"
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_MACROS_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_MACROS_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "common/inner_common.h"
namespace baidu {
......@@ -61,8 +73,6 @@ namespace predictor {
#endif
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TYPES_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TYPES_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
namespace baidu {
namespace paddle_serving {
namespace predictor {
......@@ -14,8 +26,6 @@ struct Sequence {
Size size;
};
} // predictor
} // paddle_serving
} // baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_TYPES_H
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_UTILS_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_UTILS_H
#include "common/macros.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include "common/inner_common.h"
#include "common/macros.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class TimerFlow {
public:
public:
static const int MAX_SIZE = 1024;
TimerFlow() {
init();
}
TimerFlow() { init(); }
void init() {
_csize = 0;
......@@ -23,15 +34,13 @@ public:
_auto = false;
}
TimerFlow(const char* name) : _csize(0), _name(name) {
explicit TimerFlow(const char* name) : _csize(0), _name(name) {
_last = _start = butil::cpuwide_time_us();
_auto = true;
_started = true;
}
void set_name(const char* name) {
_name = name;
}
void set_name(const char* name) { _name = name; }
void start() {
_last = _start = butil::cpuwide_time_us();
......@@ -45,10 +54,8 @@ public:
}
uint64_t now = butil::cpuwide_time_us();
if (!appendf("%s:%lu|", tag, now - _last)) {
LOG(WARNING)
<< "Failed check timer: " << _name
<< ", value = [" << tag << ":"
<< (now - _last) << "]!";
LOG(WARNING) << "Failed check timer: " << _name << ", value = [" << tag
<< ":" << (now - _last) << "]!";
return false;
}
......@@ -56,9 +63,7 @@ public:
return true;
}
std::string info() {
return std::string(_buf);
}
std::string info() { return std::string(_buf); }
void end() {
uint64_t now = butil::cpuwide_time_us();
......@@ -74,14 +79,13 @@ public:
}
uint64_t now = butil::cpuwide_time_us();
if (appendf("total:%lu,%s", now - _start, _name)) {
LOG(INFO)
<< " " << _name << "_tc=[" << _buf << "]";
LOG(INFO) << " " << _name << "_tc=[" << _buf << "]";
} else {
LOG(WARNING) << "Failed dump time_info[" << _name << "]";
}
}
private:
private:
bool appendf(const char* fmt, ...) {
va_list ap;
va_start(ap, fmt);
......@@ -92,13 +96,14 @@ private:
return false;
}
_csize += bytes;
} CATCH_ANY_AND_RET(false);
}
CATCH_ANY_AND_RET(false);
va_end(ap);
return true;
}
private:
private:
char _buf[1024];
int _csize;
uint64_t _start;
......@@ -108,47 +113,37 @@ private:
bool _auto;
};
template<bool flag>
template <bool flag>
struct derived_from_message {};
template<typename T, typename TBase>
template <typename T, typename TBase>
class TIsDerivedFromB {
private:
static uint8_t check(TBase*) {
return 1;
}
private:
static uint8_t check(TBase*) { return 1; }
static uint32_t check(void*) {
return 0;
}
static uint32_t check(void*) { return 0; }
public:
public:
enum {
// function call cannot apprear in a constant-expression
RESULT = (sizeof(uint8_t) == sizeof(check((T*)(NULL)))),
RESULT = (sizeof(uint8_t) == sizeof(check(reinterpret_cast<T*>(NULL)))),
};
};
template<typename TBase>
template <typename TBase>
class IsDerivedFrom {
private:
static bool check(TBase*) {
return true;
}
private:
static bool check(TBase*) { return true; }
static bool check(void*) {
return false;
}
static bool check(void*) { return false; }
public:
template<typename T>
public:
template <typename T>
static bool yes(T* x) {
return check(x);
}
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#!/bin/bash
# 启动路径
start_path="$(pwd)"
sh build.sh stop
# 定位到cts目录下
cd "$(dirname "$0")"/
if [[ "x"$@ = x*--module_name=* ]]
then
all_arg=$@
tmp=${all_arg##*--module_name=}
mod_name=${tmp%% *}
sed -i "/^run_mod=/s/run_mod.*/run_mod=$mod_name/" install-all.conf
else
sed -i "/^run_mod=/s/run_mod.*/run_mod=lr_engine/" install-all.conf
fi
env_num=`grep env_num install-all.conf | awk -F '=' '{print $2}'`
# 设置环境变量
export PATH="$(pwd)"/frame/tools/python27/bin:$PATH
export PYTHONPATH="$(pwd)"
alias | grep "alias cp=" >/dev/null
if [ $? -eq 0 ];then
unalias cp
fi
# 回到启动路径,执行main.py
cd "$start_path"
mem_free=`free -m | awk '{print $4}'| head -3 | awk 'END{print}'`
let thread_max=$mem_free/5000
if [ $thread_max -eq 0 ];then
echo "系统内存不足, 不能运行任何case"
exit 1
fi
if [ $thread_max -lt $env_num ];then
env_num=$thread_max
echo "目前系统内存最多支持运行$env_num个线程"
fi
temp_args="--paral=$env_num"
python "$(dirname "$0")"/control/main.py $temp_args $@
ret=$?
sh build.sh stop
if [ $ret -ne 0 ]
then
exit 1
fi
#!/bin/bash
function cfont()
{
while (($#!=0))
do
case $1 in
-b)
echo -ne " ";
;;
-t)
echo -ne "\t";
;;
-n)
echo -ne "\n";
;;
-black)
echo -ne "\033[30m";
;;
-red)
echo -ne "\033[31m";
echo -ne "\033[1m";
;;
-green)
echo -ne "\033[32m";
echo -ne "\033[1m";
;;
-yellow)
echo -ne "\033[33m";
;;
-blue)
echo -ne "\033[34m";
echo -ne "\033[1m";
;;
-purple)
echo -ne "\033[35m";
;;
-cyan)
echo -ne "\033[36m";
echo -ne "\033[1m";
;;
-white|-gray)
echo -ne "\033[37m";
;;
-reset)
echo -ne "\033[0m";
;;
-h|-help|--help)
echo "Usage: cfont -color1 message1 -color2 message2 ...";
echo "eg: cfont -red [ -blue message1 message2 -red ]";
;;
*)
echo -ne "$1"
;;
esac
shift
done
echo -ne "\033[0m";
}
cur_path=`pwd`
work_root=${cur_path%%/baidu/*}
CITOOLS="${work_root}/baidu/fengchao-qa/citools"
if [ ! -e ${CITOOLS}/lib/localbuild_lib.sh ];then
cfont -blue "=============== localbuild_lib.sh is not exist, downloading ...================" -n
git clone ssh://git@icode.baidu.com:8235/baidu/fengchao-qa/citools $CITOOLS >/dev/null
fi
source ${CITOOLS}/lib/localbuild_lib.sh
function get_framework_baseenv()
{
onlineFtp="ftp://tc-orp-app2.tc.baidu.com/home/heqing"
wgetOptions="--tries=3 --retry-connrefused -r -l0 -nv --limit-rate=50m -nH"
cfont -blue "##################################################" -n ;
cfont -blue "### build pdserving_framework xts base env ###" -n ;
cfont -blue "##################################################" -n ;
cfont -reset;
run_path="$(grep "run_path" "./install-all.conf" | cut -d "=" -f 2)"
cd $run_path
wget $wgetOptions --cut-dirs=4 "$onlineFtp"/scmbak/pdserving/framework_tester -o wget.log
ret=$?
retry=0
while [[ $retry -lt 3 ]]; do
if [[ $ret -eq 0 ]];then
break;
fi
wget $wgetOptions --cut-dirs=4 "$onlineFtp"/scmbak/pdserving/framework_tester -o wget.log
ret=$?
((retry++))
done
[[ $ret -ne 0 ]] && return 1
cfont -blue "[XTS] " -green "[ finish download: pdserving-framework ]" -n
cd -
return 0
}
# 搭建cts环境
function build_ctsenv()
{
# 搭建cts环境
if [ -z $1 ]; then
ENV_NUM=0
else
ENV_NUM=$1
fi
#更新安装配置设置
hostname=$(uname -n)
username="$(echo "`whoami`" | awk '{print $1}')"
LIBPATH=${PWD}/lib
echo "libpath is : $LIBPATH"
# 生成install-all.conf
{
echo "[config]"
echo "host=$hostname"
echo "user=$username"
echo "passwd=CAPHI2008"
echo "env_file=${PWD}/envfile"
echo "lib_path=$LIBPATH"
echo "run_path=${PWD}/run_env"
echo "env_num=$ENV_NUM"
} > ./install-all.conf
# 安装cts环境
{
cfont -blue "============= predictor env install =============" -n
rm -rf run_env && mkdir -p run_env
echo "current path is :${cur_path}"
#get_framework_baseenv
#if [ $? -ne 0 ]; then
# echo "pdserving-framework is not ready!!!"
# exit 1
#fi
mkdir -p run_env/predictor/bin
mkdir -p run_env/predictor/conf
# 拷贝pdserving到环境中
[[ -e ../output/bin/pdserving ]] && cp -rf ../output/bin/pdserving run_env/predictor/bin/predictor
[[ -e ../output/lib ]] && cp -rf ../output/lib/ run_env/predictor/
[[ -e ../conf ]] && cp -rf ../conf/* run_env/predictor/conf/
#搭建并行环境
if [ $ENV_NUM -ne 0 ]; then
cfont -blue "=============== build multi env ===============" -n
mkdir -p ${PWD}/run_env/1
mv -f ${PWD}/run_env/framework_tester ${PWD}/run_env/1/framework_tester
mv -f ${PWD}/run_env/model ${PWD}/run_env/1/model
mv -f ${PWD}/run_env/dict ${PWD}/run_env/1/dict
for ((i=2; i<=$ENV_NUM; i=i+1))
do
cp -rf ${PWD}/run_env/1 ${PWD}/run_env/$i
done
fi
}
#安装XTS环境
{
echo "now pwd is :`pwd`"
cfont -blue "=============== XTS(cts) install ================" -n
svn co https://svn.baidu.com/general-test/trunk/xts/frame frame> /dev/null
svn co https://svn.baidu.com/general-test/trunk/xts/im/core/control control>/dev/null
echo "now dir list is :`ls`"
cd lib
svn co https://svn.baidu.com/general-test/trunk/xts/im/core/lib/commonlib commonlib>/dev/null
cd -
}
cfont -blue "[XTS] " -green "[ finish XTS(cts) install ]" -n
onlineFtp="ftp://tc-orp-app2.tc.baidu.com/home/heqing"
wgetOptions="--tries=3 --retry-connrefused -r -l0 -nv --limit-rate=50m -nH"
#安装bidinfo 和基础protolib
{
cd lib
[[ -e bidinfo ]] && rm -rf bidinfo
[[ -e protolib ]] && rm -rf protolib
[[ -e pluginlib ]] && rm -rf pluginlib
wget $wgetOptions --cut-dirs=5 "$onlineFtp"/scmbak/common_lib/pdserving_cts/bidinfo -o wget.log
wget $wgetOptions --cut-dirs=5 "$onlineFtp"/scmbak/common_lib/pdserving_cts/protolib -o wget.log
wget $wgetOptions --cut-dirs=6 "$onlineFtp"/scmbak/common_lib/pdserving_cts/framework/pluginlib -o wget.log
cd -
}
#安装protolib
{
cfont -blue "============== protoc install ==================" -n
[[ -e protoc_tools ]] && rm -rf protoc_tools
wget $wgetOptions --cut-dirs=5 "$onlineFtp"/scmbak/common_lib/pdserving_cts/protoc_tools -o wget.log
[[ -e ../proto ]] && cp -rf ../proto/* ./protoc_tools/proto/
cd protoc_tools
chmod +x ./protobuf-2.4.1/bin/protoc
chmod +x ./protobuf-2.4.1/lib/*
[[ -e protolib ]] && rm -rf protolib
mkdir ./protolib
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:`pwd`/protobuf-2.4.1/lib
./protobuf-2.4.1/bin/protoc -I=./proto --python_out=./protolib/ ./proto/*.proto
cd -
cp ./protoc_tools/protolib/*.py ./lib/protolib/
}
cfont -reset
return 0
}
function get_pid
{
local prog=$1
local user=$2
local prog_path=$3
local ret=-1
local trash_path="/home/$(echo "`whoami`" | awk '{print $1}')/.__trash/"
pids=`pgrep $prog -u $user`
for pid in $pids
do
tmp_path=`ls -l /proc/$pid/exe 2>/dev/null | awk '{print $NF}'`
if [ "$tmp_path" == "$prog_path" ] || [ ! -e $tmp_path ] || [ 0 == `echo $tmp_path | grep -qs $trash_path;echo $?` ]
then
echo $pid
ret=0
fi
done
return $ret
}
function kill_prog()
{
name=$1
username=$2
prog_path=$3
pids=`get_pid $name $username $prog_path`
echo $pids>/dev/null
if [ $? -eq 0 ] ;then
for pid in $pids
do
#echo "$name,$pid"
kill -9 $pid
done
fi
}
function kill_predictor_prog()
{
username="$(echo "`whoami`" | awk '{print $1}')"
if [ -f install-all.conf ]
then
env_num=`grep env_num= install-all.conf|awk -F '=' '{print $2}'`
else
env_num=0
fi
for ((i=0; i<=$env_num; i=i+1))
do
if [ $i -eq 0 ]
then
run_path="${PWD}/run_env"
else
run_path="${PWD}/run_env/$i"
fi
kill_prog predictor $username $run_path/framework_tester/bin/predictor
done
}
function clean_ctsenv()
{
rm -rf install-all.conf ccover
rm -rf run_env fail_env output log frame control lib/commonlib lib/protolib
return 0
}
if [ $# -eq 1 ] && [ $1 == "clean" ]
then
clean_ctsenv
exit 0
fi
if [ $# -eq 1 ] && [ $1 == "stop" ]
then
kill_predictor_prog
exit 0
fi
clean_ctsenv
build_ctsenv "$1"
exit $?
#!/usr/bin/env python
# -*- coding:gbk -*-
"""
case created by templete
"""
import sys
sys.path.append(r'./lib/protolib')
print("sys path is : %s " % str(sys.path))
import os
import json
import commands
from lib.protolib.dense_service_pb2 import Request
from lib.protolib.dense_service_pb2 import Response
from lib.pluginlib.plugin_util import Util as ut
from lib.pluginlib.plugin_case import PluginCase
from lib.pluginlib.plugin_module import PluginModule
from lib.pluginlib.plugin_apistub import ApiStub
class TestDenseService(PluginCase):
"""test wtitleq case class"""
OWNER="zhangwenbo03"
quick=['ALL']
low=[]
daily=[]
ignorelist=[]
RESTART=True
def setUp(self):
"""setup something before run case"""
pass
def tearDown(self):
"""tear down after run case"""
self.t.stop()
print "stop finished"
pass
def testDemoCase(self):
"""demo case"""
req = Request()
denseIns = req.instances.add()
denseIns.features.append(10)
denseIns.features.append(13)
denseIns.features.append(200)
service = "BuiltinDenseFormatService"
type = "debug"
ut_obj = ut()
dict_val = ut_obj.pb2dict(req)
json_val = ut_obj.dict2json(dict_val)
self.t.restart()
self.t.tester.sendJsonData(json_val, service, type)
print "execute demo case"
"""plugin register """
from lib.plugin_tester import *
#!/usr/bin/env python
# -*- coding:gbk -*-
"""
注册类:RegxxxConfData,RegxxxReq,RegxxxXbox,RegxxxAd,xxx为组件名
"""
from lib.pluginlib.plugin_common import ConfData
from lib.pluginlib.plugin_common import TreeConfData
from lib.pluginlib.plugin_common import CommonIndex
class RegpredictorConfData(object):
"""
注册wtitleq组件的conf和data文件
"""
def __init__(self, path):
self.path = path
self.conf = {}
self.data = {}
self.conf['ub'] = ConfData(path=self.path + "/conf/ub.conf", connect_flag=":")
self.data['lr_model'] = CommonIndex(path=self.path + \
'/data/lr-model/wtitleq_model_file.sign',
col_list=['key', 'value'],
format='B')
class RegpredictorReq(object):
"""
注册wtitleq组件的默认请求
"""
def __init__(self):
self.plugin_term = {}
cmd_tag = 'cmd_tag0'
query_schema_list = []
query_value_list = []
pair_schema_list = ['query',
'wadptid',
'wbwsid',
'omit_buf',
'title',
'desc',
'cmatch',
'bidword',
'dynamic_new_title']
pair_value_list = ['鲜花',
'0',
'3',
'鲜花',
'鲜花%2C本地实体鲜花店100%25保证%21',
'鲜花品质100%25%2C主城最快2小时送到%2C全天24时在线订花%21市区内免费送花上门%21鲜%2E%2E',
'223',
'鲜花',
'美丽鲜花']
cmd_str = '/titleq/wise/ctr'
req_term = {"query_schema": query_schema_list,
"pair_schema": pair_schema_list,
"query_value": query_value_list,
"pair_value": pair_value_list,
"cmd": cmd_str}
self.plugin_term.update({cmd_tag: req_term})
self.plugin_list = self.plugin_term.keys()
class RegpredictorNewXbox(object):
"""
注册wtitleq组件的xbox
"""
def __init__(self):
self.need_xbox = True
self.stub_conf = 'xboxstub.conf'
self.stub_name = 'xboxstub'
self.conf_list = ['xbox-wtitleq_pegasus.conf']
class RegpredictorAd(object):
"""
注册wtitleq组件是否需要构造广告库
"""
def __init__(self):
self.need_adstub = False
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <butil/atomicops.h>
#include <errno.h>
#include <vector>
#include <algorithm>
#include <deque>
#include <butil/atomicops.h>
#include <vector>
#include "common/inner_common.h"
#include "framework/infer_data.h"
#include "framework/memory.h"
......@@ -13,12 +28,12 @@
namespace im {
namespace bsf {
template<>
template <>
struct Task<baidu::paddle_serving::predictor::Tensor,
baidu::paddle_serving::predictor::Tensor> {
typedef Task<baidu::paddle_serving::predictor::Tensor,
baidu::paddle_serving::predictor::Tensor> TaskT;
baidu::paddle_serving::predictor::Tensor>
TaskT;
typedef baidu::paddle_serving::predictor::Tensor Tensor;
typedef baidu::paddle_serving::predictor::Tensor InType;
typedef baidu::paddle_serving::predictor::Tensor OutType;
......@@ -27,8 +42,7 @@ struct Task<baidu::paddle_serving::predictor::Tensor,
typedef baidu::paddle_serving::predictor::BatchTensor OutArrayT;
struct Segment {
Segment(void* p, size_t b, size_t s)
: ptr(p), begin(b), size(s) {}
Segment(void* p, size_t b, size_t s) : ptr(p), begin(b), size(s) {}
void* ptr;
size_t begin;
size_t size;
......@@ -75,11 +89,10 @@ struct Task<baidu::paddle_serving::predictor::Tensor,
}
};
template<>
class BatchTasks<Task<
baidu::paddle_serving::predictor::Tensor,
baidu::paddle_serving::predictor::Tensor> > {
public:
template <>
class BatchTasks<Task<baidu::paddle_serving::predictor::Tensor,
baidu::paddle_serving::predictor::Tensor>> {
public:
typedef baidu::paddle_serving::predictor::Tensor Tensor;
typedef baidu::paddle_serving::predictor::Tensor InType;
typedef baidu::paddle_serving::predictor::Tensor OutType;
......@@ -87,15 +100,16 @@ public:
typedef baidu::paddle_serving::predictor::MempoolWrapper MempoolWrapper;
typedef Task<baidu::paddle_serving::predictor::Tensor,
baidu::paddle_serving::predictor::Tensor> TaskT;
baidu::paddle_serving::predictor::Tensor>
TaskT;
typedef TaskMeta<TaskT> TaskMetaT;
typedef TaskT::InArrayT InArrayT;
typedef TaskT::OutArrayT OutArrayT;
BatchTasks(size_t batch_size, bool batch_align = false)
: _batch_size(batch_size)
, _rem_size(batch_size)
, _batch_align(batch_align) {
explicit BatchTasks(size_t batch_size, bool batch_align = false)
: _batch_size(batch_size),
_rem_size(batch_size),
_batch_align(batch_align) {
_batch_in.clear();
_batch_out.clear();
_tasks.clear();
......@@ -107,8 +121,9 @@ public:
_tasks.clear();
}
static bool check_valid(
const InArrayT& in, OutArrayT& out, bool align) {
static bool check_valid(const InArrayT& in,
OutArrayT& out, // NOLINT
bool align) { // NOLINT
if (align) {
if (out.count() <= 0 || out.size() <= 0) {
LOG(ERROR) << "Out tensor is empty, when aligned";
......@@ -116,7 +131,8 @@ public:
}
if (out.size() != in.size()) {
LOG(ERROR) << "In/Out tensor size not eq: " << out.size() << "!=" << in.size();
LOG(ERROR) << "In/Out tensor size not eq: " << out.size()
<< "!=" << in.size();
return false;
}
......@@ -127,7 +143,8 @@ public:
}
if (out.size() != out[fi].shape0()) {
LOG(ERROR) << "Shape0 not consistency, " << out.size() << "!=" << out[fi].shape0() << ", " << fi;
LOG(ERROR) << "Shape0 not consistency, " << out.size()
<< "!=" << out[fi].shape0() << ", " << fi;
return false;
}
}
......@@ -156,13 +173,13 @@ public:
void merge_input() {
if (_tasks.size() <= 0 || _tasks[0].task->in->count() <= 0) {
return ;
return;
}
if (_tasks.size() == 1 && !_batch_align) {
TaskMetaT& tm = _tasks[0];
_batch_in = *(tm.task->in);
return ;
return;
}
merge_tensor(true);
......@@ -171,30 +188,30 @@ public:
void merge_output() {
if (_batch_align) {
if (_tasks.size() <= 0 || _tasks[0].task->out->count() <= 0) {
return ;
return;
}
}
if (_tasks.size() <= 0 || _tasks[0].task->out->count() <= 0) {
return ;
return;
}
TaskMetaT& tm = _tasks[0];
if (_tasks.size() == 1 && !_batch_align) {
_batch_out = *(tm.task->out);
return ;
return;
}
if (tm.task->out->size() <= 0) {
// shape is empty
_batch_out = *(tm.task->out);
return ;
return;
}
if ((*tm.task->out)[0].data.data() == 0
|| (*tm.task->out)[0].data.size() == 0) {
if ((*tm.task->out)[0].data.data() == 0 ||
(*tm.task->out)[0].data.size() == 0) {
_batch_out = *(tm.task->out);
return ;
return;
}
merge_tensor(false);
......@@ -228,11 +245,10 @@ public:
size_t ins_byte = ins_ele_count * head.ele_byte();
size_t tensor_byte = tensor_ele_count * head.ele_byte();
void* data_buf
= MempoolWrapper::instance().malloc(tensor_byte);
void* data_buf = MempoolWrapper::instance().malloc(tensor_byte);
if (!data_buf) {
LOG(ERROR) << "Malloc failed, size: " << tensor_byte;
return ;
return;
}
size_t data_byte = 0;
......@@ -240,23 +256,27 @@ public:
TaskMetaT& tm = _tasks[ti];
size_t acc_byte = ins_byte * (tm.end - tm.begin);
if (data_byte + acc_byte > tensor_byte) {
LOG(ERROR) << "Invalid bytes: " << data_byte << " + " << acc_byte << " >= " << tensor_byte;
return ;
LOG(ERROR) << "Invalid bytes: " << data_byte << " + " << acc_byte
<< " >= " << tensor_byte;
return;
}
const Tensor& tensor = (*(tm.task->get(is_in)))[fi];
memcpy((char *)data_buf + data_byte,
(char *)(tensor.data.data()) + tm.begin * ins_byte,
memcpy(
reinterpret_cast<char*>(data_buf) + data_byte,
reinterpret_cast<char*>(tensor.data.data()) + tm.begin * ins_byte,
acc_byte);
data_byte += acc_byte;
}
if (data_byte != tensor_byte) {
LOG(ERROR) << "Invalid tensor byte: " << data_byte << " != " << tensor_byte;
return ;
LOG(ERROR) << "Invalid tensor byte: " << data_byte
<< " != " << tensor_byte;
return;
}
batch_tensor.data = DataBuf((char *)data_buf, tensor_byte);
batch_tensor.data =
DataBuf(reinterpret_cast<char*>(data_buf), tensor_byte);
if (is_in) {
_batch_in.push_back(batch_tensor);
} else {
......@@ -264,14 +284,15 @@ public:
}
}
LOG(INFO) << "merge input(" << is_in << ") samples: "
<< batch_size << " from " << _tasks.size() << " pvs";
LOG(INFO) << "merge input(" << is_in << ") samples: " << batch_size
<< " from " << _tasks.size() << " pvs";
}
void notify_tasks() {
if (_batch_out.size() != _batch_in.size()) {
LOG(ERROR) << "batch size not consistency: " << _batch_out.size() << " != " << _batch_in.size();
return ;
LOG(ERROR) << "batch size not consistency: " << _batch_out.size()
<< " != " << _batch_in.size();
return;
}
size_t tensor_count = _batch_out.count();
......@@ -283,8 +304,8 @@ public:
ins_byte *= tensor.shape[si];
}
for (size_t ti = 0, bi = 0, add = 0;
ti < _tasks.size(); ++ti, bi += add) {
for (size_t ti = 0, bi = 0, add = 0; ti < _tasks.size();
++ti, bi += add) {
OutArrayT* dst = _tasks[ti].task->out;
add = _tasks[ti].end - _tasks[ti].begin;
size_t offset_src = ins_byte * bi;
......@@ -293,8 +314,10 @@ public:
if (_batch_align) { // merge all batchs
size_t offset_dst = ins_byte * _tasks[ti].begin;
void* ptr = const_cast<void*>((*dst)[fi].data.data());
memcpy((char *)ptr + offset_dst,
(char *)(_batch_out[fi].data.data()) + offset_src, add_byte);
memcpy(
reinterpret_cast<char*>(ptr) + offset_dst,
reinterpret_cast<char*>(_batch_out[fi].data.data()) + offset_src,
add_byte);
} else { // overwrite
if (dst->count() <= 0) {
dst->push_back(_batch_out[fi]);
......@@ -304,7 +327,8 @@ public:
(*dst)[fi].shape[0] = add;
(*dst)[fi].data = DataBuf(
(char *)(_batch_out[fi].data.data()) + offset_src, add_byte);
reinterpret_cast<char*>(_batch_out[fi].data.data()) + offset_src,
add_byte);
}
}
}
......@@ -319,26 +343,19 @@ public:
if ((index + add) >= task->in->size()) {
char c = 0;
while (write(task->write_fd, &c, 1) != 1 && errno == EINTR) {
;
}
butil::return_object(task);
}
}
}
const typename TaskT::InArrayT& in() const {
return _batch_in;
}
const typename TaskT::InArrayT& in() const { return _batch_in; }
typename TaskT::OutArrayT& out() {
return _batch_out;
}
typename TaskT::OutArrayT& out() { return _batch_out; }
size_t task_size() {
return _tasks.size();
}
size_t task_size() { return _tasks.size(); }
private:
private:
std::vector<TaskMetaT> _tasks;
InArrayT _batch_in;
OutArrayT _batch_out;
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <boost/bind.hpp>
#include <butil/atomicops.h>
#include <sys/syscall.h>
#include <boost/bind.hpp>
#include "common/inner_common.h"
#include <sys/syscall.h>
namespace im {
namespace bsf {
template<typename TaskT>
template <typename TaskT>
void* TaskExecutor<TaskT>::thread_entry(void* args) {
ThreadContext<TaskT>* context = static_cast<ThreadContext<TaskT>*>(args);
TaskExecutor<TaskT>* executor = static_cast<TaskExecutor<TaskT>*>(context->executor);
TaskExecutor<TaskT>* executor =
static_cast<TaskExecutor<TaskT>*>(context->executor);
executor->work(context);
return NULL;
}
template<typename TaskT>
template <typename TaskT>
int TaskExecutor<TaskT>::start(uint32_t thread_num, uint32_t init_timeout_sec) {
_stop = false;
if (!_thread_contexts.empty()) {
......@@ -42,7 +56,9 @@ int TaskExecutor<TaskT>::start(uint32_t thread_num, uint32_t init_timeout_sec) {
int rc = THREAD_CREATE(
&contexts[i].tid, NULL, &TaskExecutor::thread_entry, &contexts[i]);
if (rc != 0) {
LOG(ERROR) << "failed to create BSF worker thread: index=" << i << ", rc=" << rc << ", errno=" << errno << ":" << strerror(errno);
LOG(ERROR) << "failed to create BSF worker thread: index=" << i
<< ", rc=" << rc << ", errno=" << errno << ":"
<< strerror(errno);
return -1;
}
......@@ -90,7 +106,7 @@ int TaskExecutor<TaskT>::start(uint32_t thread_num, uint32_t init_timeout_sec) {
return -1;
}
template<typename TaskT>
template <typename TaskT>
void TaskExecutor<TaskT>::stop() {
_stop = true;
for (size_t i = 0; i < _thread_contexts.size(); ++i) {
......@@ -103,9 +119,9 @@ void TaskExecutor<TaskT>::stop() {
_thread_contexts.clear();
}
template<typename TaskT>
TaskHandler<TaskT> TaskExecutor<TaskT>::schedule(
const InArrayT& in, OutArrayT& out) {
template <typename TaskT>
TaskHandler<TaskT> TaskExecutor<TaskT>::schedule(const InArrayT& in,
OutArrayT& out) { // NOLINT
TaskT* task = butil::get_object<TaskT>();
if (!task) {
LOG(ERROR) << "Failed get TaskT from object pool";
......@@ -120,7 +136,8 @@ TaskHandler<TaskT> TaskExecutor<TaskT>::schedule(
int fds[2];
int rc = pipe(fds);
if (rc != 0) {
LOG(ERROR) << "call pipe() failed, errno=" << errno << ":" << strerror(errno);
LOG(ERROR) << "call pipe() failed, errno=" << errno << ":"
<< strerror(errno);
return TaskHandler<TaskT>::valid_handle();
}
......@@ -141,8 +158,8 @@ TaskHandler<TaskT> TaskExecutor<TaskT>::schedule(
return TaskHandler<TaskT>(*task);
}
template<typename TaskT>
bool TaskExecutor<TaskT>::fetch_batch(BatchTasks<TaskT>& batch) {
template <typename TaskT>
bool TaskExecutor<TaskT>::fetch_batch(BatchTasks<TaskT>& batch) { // NOLINT
AutoMutex lock(_mut);
while (_task_queue.empty()) {
THREAD_COND_WAIT(&_cond, &_mut);
......@@ -165,7 +182,7 @@ bool TaskExecutor<TaskT>::fetch_batch(BatchTasks<TaskT>& batch) {
return true;
}
template<typename TaskT>
template <typename TaskT>
int TaskExecutor<TaskT>::work(ThreadContext<TaskT>* context) {
if (_thread_init_fn != NULL) {
if (_thread_init_fn(context->user_thread_context) != 0) {
......@@ -196,9 +213,9 @@ int TaskExecutor<TaskT>::work(ThreadContext<TaskT>* context) {
return 0;
}
template<typename InItemT, typename OutItemT>
template <typename InItemT, typename OutItemT>
bool TaskManager<InItemT, OutItemT>::schedule(const InArrayT& in,
OutArrayT& out) {
OutArrayT& out) { // NOLINT
TaskHandler<TaskT> handler = _executor.schedule(in, out);
if (handler.valid()) {
......@@ -210,12 +227,11 @@ bool TaskManager<InItemT, OutItemT>::schedule(const InArrayT& in,
}
}
template<typename InItemT, typename OutItemT>
template <typename InItemT, typename OutItemT>
void TaskManager<InItemT, OutItemT>::wait() {
char buffer[128];
while (read(_task_owned.read_fd, buffer, sizeof(buffer)) < 0
&& errno == EINTR) {
;
while (read(_task_owned.read_fd, buffer, sizeof(buffer)) < 0 &&
errno == EINTR) {
}
close(_task_owned.read_fd);
......@@ -225,6 +241,5 @@ void TaskManager<InItemT, OutItemT>::wait() {
_task_owned.write_fd = -1;
return;
}
}
}
} // namespace bsf
} // namespace im
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_BSF_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_BSF_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <errno.h>
#include <vector>
#include <algorithm>
#include <deque>
#include <butil/atomicops.h>
#include <vector>
#include "butil/atomicops.h"
#include "common/inner_common.h"
#include <boost/function.hpp>
#include "boost/function.hpp"
namespace im {
namespace bsf {
static const size_t DEFAULT_BATCH_SIZE = 100;
template<typename InItemT, typename OutItemT>
template <typename InItemT, typename OutItemT>
struct Task {
typedef std::vector<InItemT> InArrayT;
typedef std::vector<OutItemT> OutArrayT;
......@@ -33,9 +47,7 @@ struct Task {
size_t rem;
size_t size;
size_t batch_size() {
return in->size();
}
size_t batch_size() { return in->size(); }
butil::atomic<size_t> index;
......@@ -51,29 +63,27 @@ struct Task {
}
};
template<typename TaskT>
template <typename TaskT>
struct TaskMeta {
TaskMeta(TaskT* ptr, size_t start, size_t add)
: task(ptr)
, begin(start)
, end(start + add) {}
: task(ptr), begin(start), end(start + add) {}
TaskT* task;
size_t begin;
size_t end;
};
template<typename TaskT>
template <typename TaskT>
class BatchTasks {
public:
public:
typedef typename TaskT::InType InType;
typedef typename TaskT::OutType OutType;
typedef TaskMeta<TaskT> TaskMetaT;
BatchTasks(size_t batch_size, bool batch_align = true)
: _batch_size(batch_size)
, _rem_size(batch_size)
, _batch_align(batch_align) {
explicit BatchTasks(size_t batch_size, bool batch_align = true)
: _batch_size(batch_size),
_rem_size(batch_size),
_batch_align(batch_align) {
_batch_in.clear();
_batch_out.clear();
_tasks.clear();
......@@ -100,9 +110,9 @@ public:
return _rem_size;
}
static bool check_valid(
const typename TaskT::InArrayT& in,
typename TaskT::OutArrayT& out, bool align) {
static bool check_valid(const typename TaskT::InArrayT& in,
const typename TaskT::OutArrayT& out,
bool align) {
(void)in;
(void)out;
(void)align;
......@@ -121,8 +131,9 @@ public:
void notify_tasks() {
if (_batch_out.size() != _batch_in.size()) {
LOG(ERROR) << "batch size not consistency: " << _batch_out.size() << " != " << _batch_in.size();
return ;
LOG(ERROR) << "batch size not consistency: " << _batch_out.size()
<< " != " << _batch_in.size();
return;
}
for (size_t ti = 0, bi = 0; ti < _tasks.size(); ++ti) {
......@@ -133,8 +144,9 @@ public:
for (size_t oi = begin; oi < end; ++oi, ++bi) {
if (bi >= _batch_in.size()) {
LOG(ERROR) << "batch index overflow: " << bi << " > " <<_batch_in.size();
return ;
LOG(ERROR) << "batch index overflow: " << bi << " > "
<< _batch_in.size();
return;
}
(*task->out)[oi] = _batch_out[bi];
}
......@@ -143,26 +155,19 @@ public:
if ((index + add) >= task->in->size()) {
char c = 0;
while (write(task->write_fd, &c, 1) != 1 && errno == EINTR) {
;
}
butil::return_object(task);
}
}
}
const typename TaskT::InArrayT& in() const {
return _batch_in;
}
const typename TaskT::InArrayT& in() const { return _batch_in; }
typename TaskT::OutArrayT& out() {
return _batch_out;
}
typename TaskT::OutArrayT& out() { return _batch_out; }
size_t task_size() {
return _tasks.size();
}
size_t task_size() { return _tasks.size(); }
private:
private:
std::vector<TaskMetaT> _tasks;
typename TaskT::InArrayT _batch_in;
typename TaskT::OutArrayT _batch_out;
......@@ -171,26 +176,22 @@ private:
bool _batch_align;
};
// BSF 任务句柄, 用来等待时指定任务列表
template<typename TaskT>
// BSF task handle
template <typename TaskT>
struct TaskHandler {
int read_fd;
int write_fd;
TaskHandler()
: read_fd(-1), write_fd(-1) {
TaskHandler() : read_fd(-1), write_fd(-1) {
// do nothing
}
TaskHandler(TaskT const& task)
: read_fd(task.read_fd)
, write_fd(task.write_fd) {
explicit TaskHandler(TaskT const& task)
: read_fd(task.read_fd), write_fd(task.write_fd) {
// do nothing
}
inline bool valid() const {
return read_fd >= 0 && write_fd >= 0;
}
inline bool valid() const { return read_fd >= 0 && write_fd >= 0; }
static TaskHandler<TaskT>& valid_handle() {
static TaskHandler<TaskT> vhandle;
......@@ -198,13 +199,13 @@ struct TaskHandler {
}
};
template<typename TaskT>
template <typename TaskT>
class TaskExecutor;
template<typename InItemT, typename OutItemT>
template <typename InItemT, typename OutItemT>
class TaskManager;
template<typename TaskT>
template <typename TaskT>
struct ThreadContext {
TaskExecutor<TaskT>* executor;
void* user_thread_context;
......@@ -212,9 +213,7 @@ struct ThreadContext {
int init_status;
ThreadContext()
: executor(NULL)
, user_thread_context(NULL)
, tid(-1), init_status(0) {
: executor(NULL), user_thread_context(NULL), tid(-1), init_status(0) {
// do nothing
}
......@@ -226,10 +225,9 @@ struct ThreadContext {
}
};
template<typename TaskT>
template <typename TaskT>
class TaskExecutor {
public:
public:
typedef typename TaskT::InType InType;
typedef typename TaskT::OutType OutType;
typedef typename TaskT::InArrayT InArrayT;
......@@ -237,13 +235,13 @@ public:
typedef std::vector<TaskT> TaskArrayT;
TaskExecutor()
: _stop(false)
, _thread_init_fn(NULL)
, _thread_reset_fn(NULL)
, _user_thread_contexts(NULL)
, _batch_size(DEFAULT_BATCH_SIZE)
, _batch_align(false)
, _fn(NULL) {
: _stop(false),
_thread_init_fn(NULL),
_thread_reset_fn(NULL),
_user_thread_contexts(NULL),
_batch_size(DEFAULT_BATCH_SIZE),
_batch_align(false),
_fn(NULL) {
THREAD_MUTEX_INIT(&_mut, NULL);
THREAD_COND_INIT(&_cond, NULL);
_task_queue.clear();
......@@ -259,15 +257,12 @@ public:
return &singleton;
}
void set_batch_size(size_t batch_size) {
_batch_size = batch_size;
}
void set_batch_size(size_t batch_size) { _batch_size = batch_size; }
void set_batch_align(size_t batch_align) {
_batch_align = batch_align;
}
void set_batch_align(size_t batch_align) { _batch_align = batch_align; }
void set_thread_init_fn(boost::function<int(void*)> init_fn, void** contexts = NULL) {
void set_thread_init_fn(boost::function<int(void*)> init_fn,
void** contexts = NULL) {
_thread_init_fn = init_fn;
_user_thread_contexts = contexts;
}
......@@ -276,7 +271,8 @@ public:
_thread_reset_fn = reset_fn;
}
void set_thread_callback_fn(boost::function<void(const InArrayT&, OutArrayT&)> cb) {
void set_thread_callback_fn(
boost::function<void(const InArrayT&, OutArrayT&)> cb) {
_fn = cb;
}
......@@ -285,7 +281,7 @@ public:
static void* thread_entry(void* args);
private:
private:
TaskExecutor(TaskExecutor<TaskT> const& other);
TaskExecutor* operator=(TaskExecutor<TaskT> const& other);
......@@ -293,7 +289,7 @@ private:
TaskHandler<TaskT> schedule(const InArrayT&, OutArrayT&);
bool fetch_batch(BatchTasks<TaskT>& batch);
bool fetch_batch(BatchTasks<TaskT>& batch); // NOLINT
bool _stop;
......@@ -316,56 +312,44 @@ private:
boost::function<void(const InArrayT&, OutArrayT&)> _fn;
};
template<typename InItemT, typename OutItemT>
template <typename InItemT, typename OutItemT>
class TaskManager {
public:
public:
typedef Task<InItemT, OutItemT> TaskT;
typedef typename TaskT::InArrayT InArrayT;
typedef typename TaskT::OutArrayT OutArrayT;
explicit TaskManager(TaskExecutor<TaskT>& exe, size_t batch_size) : _executor(exe) {
}
explicit TaskManager(TaskExecutor<TaskT>& exe, size_t batch_size) // NOLINT
: _executor(exe) {}
TaskManager()
: _executor(*TaskExecutor<TaskT>::instance()) {
}
TaskManager() : _executor(*TaskExecutor<TaskT>::instance()) {}
~TaskManager() {
wait();
}
~TaskManager() { wait(); }
bool schedule(const InArrayT& in, OutArrayT& out);
bool schedule(const InArrayT& in, OutArrayT& out); // NOLINT
void wait();
inline void clear() {
wait();
}
inline void clear() { wait(); }
private:
private:
TaskExecutor<TaskT>& _executor;
TaskHandler<TaskT> _task_owned;
}; // class TaskManager
class AutoMutex {
public:
AutoMutex(THREAD_MUTEX_T& mut)
: _mut(mut) {
public:
explicit AutoMutex(THREAD_MUTEX_T& mut) : _mut(mut) {
THREAD_MUTEX_LOCK(&_mut);
}
~AutoMutex() {
THREAD_MUTEX_UNLOCK(&_mut);
}
~AutoMutex() { THREAD_MUTEX_UNLOCK(&_mut); }
private:
private:
THREAD_MUTEX_T& _mut;
};
} // namespace bsf
} // namespace im
#include "bsf-inl.h"
#include "bsf-inl-tensor.h"
#endif //BAIDU_PADDLE_SERVING_PREDICTOR_BSF_H
#include "predictor/framework/bsf-inl-tensor.h"
#include "predictor/framework/bsf-inl.h"
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_CHANNEL_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_CHANNEL_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <utility>
#include "common/inner_common.h"
namespace baidu {
......@@ -10,14 +24,12 @@ namespace predictor {
class Channel;
class Bus {
public:
Bus() {
clear();
}
public:
Bus() { clear(); }
int regist(const std::string& op, Channel* channel) {
std::pair<boost::unordered_map<std::string, Channel*>::iterator, bool> r
= _op_channels.insert(std::make_pair(op, channel));
std::pair<boost::unordered_map<std::string, Channel*>::iterator, bool> r =
_op_channels.insert(std::make_pair(op, channel));
if (!r.second) {
LOG(ERROR) << "Failed insert op&channel into bus:" << op;
return -1;
......@@ -26,32 +38,26 @@ public:
}
Channel* channel_by_name(const std::string& op_name) {
typename boost::unordered_map<std::string, Channel*>::iterator it
= _op_channels.find(op_name);
typename boost::unordered_map<std::string, Channel*>::iterator it =
_op_channels.find(op_name);
if (it == _op_channels.end()) {
LOG(WARNING)
<< "Not found channel in bus, op_name:"
<< op_name << ".";
LOG(WARNING) << "Not found channel in bus, op_name:" << op_name << ".";
return NULL;
}
return it->second;
}
void clear() {
_op_channels.clear();
}
void clear() { _op_channels.clear(); }
size_t size() const {
return _op_channels.size();
}
size_t size() const { return _op_channels.size(); }
private:
private:
boost::unordered_map<std::string, Channel*> _op_channels;
};
class Channel {
public:
public:
Channel() {}
void init(uint32_t id, const char* op) {
......@@ -60,23 +66,15 @@ public:
clear_data();
}
void deinit() {
clear_data();
}
void deinit() { clear_data(); }
uint32_t id() const {
return _id;
}
uint32_t id() const { return _id; }
const std::string& op() {
return _op;
}
const std::string& op() { return _op; }
int share_to_bus(Bus* bus) {
if (bus->regist(_op, this) != 0) {
LOG(ERROR)
<< "Failed regist channel[" << _op
<< "] to bus!";
LOG(ERROR) << "Failed regist channel[" << _op << "] to bus!";
return -1;
}
......@@ -95,31 +93,25 @@ public:
virtual std::string debug_string() const = 0;
private:
private:
uint32_t _id;
std::string _op;
};
template<typename T>
template <typename T>
class OpChannel : public Channel {
public:
OpChannel() {
}
public:
OpChannel() {}
void clear_data() {
_data.Clear();
}
void clear_data() { _data.Clear(); }
void* param() {
return &_data;
}
void* param() { return &_data; }
const void* param() const {
return &_data;
}
const void* param() const { return &_data; }
google::protobuf::Message* message() {
return message_impl(derived_from_message<
return message_impl(
derived_from_message<
TIsDerivedFromB<T, google::protobuf::Message>::RESULT>());
}
......@@ -134,15 +126,18 @@ public:
}
const google::protobuf::Message* message() const {
return message_impl(derived_from_message<
return message_impl(
derived_from_message<
TIsDerivedFromB<T, google::protobuf::Message>::RESULT>());
}
const google::protobuf::Message* message_impl(derived_from_message<true>) const {
const google::protobuf::Message* message_impl(
derived_from_message<true>) const {
return dynamic_cast<const google::protobuf::Message*>(&_data);
}
const google::protobuf::Message* message_impl(derived_from_message<false>) const {
const google::protobuf::Message* message_impl(
derived_from_message<false>) const {
LOG(ERROR) << "Current type: " << typeid(T).name()
<< " is not derived from protobuf.";
return NULL;
......@@ -153,58 +148,41 @@ public:
return *this;
}
std::string debug_string() const {
return _data.ShortDebugString();
}
std::string debug_string() const { return _data.ShortDebugString(); }
// functions of derived class
T* data() {
return &_data;
}
T* data() { return &_data; }
const T* data() const {
return &_data;
}
const T* data() const { return &_data; }
Channel& operator=(const T& obj) {
_data = obj;
return *this;
}
private:
private:
T _data;
};
template<>
template <>
class OpChannel<google::protobuf::Message> : public Channel {
public:
OpChannel<google::protobuf::Message>() : _data(NULL) {
}
public:
OpChannel<google::protobuf::Message>() : _data(NULL) {}
virtual ~OpChannel<google::protobuf::Message>() {
_data = NULL;
}
virtual ~OpChannel<google::protobuf::Message>() { _data = NULL; }
void clear_data() {
_data = NULL;
}
void clear_data() { _data = NULL; }
void* param() {
return const_cast<void*>((const void*)_data);
}
void* param() { return const_cast<void*>((const void*)_data); }
const void* param() const {
return _data;
}
const void* param() const { return _data; }
google::protobuf::Message* message() {
return const_cast<google::protobuf::Message*>(_data);
}
const google::protobuf::Message* message() const {
return _data;
}
const google::protobuf::Message* message() const { return _data; }
Channel& operator=(const Channel& channel) {
_data = channel.message();
......@@ -224,9 +202,7 @@ public:
return const_cast<google::protobuf::Message*>(_data);
}
const google::protobuf::Message* data() const {
return _data;
}
const google::protobuf::Message* data() const { return _data; }
OpChannel<google::protobuf::Message>& operator=(
google::protobuf::Message* message) {
......@@ -240,12 +216,10 @@ public:
return *this;
}
private:
private:
const google::protobuf::Message* _data;
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#include "common/inner_common.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "framework/dag.h"
#include "op/op.h"
#include <string>
#include <vector>
#include "common/inner_common.h"
#include "framework/predictor_metric.h" // PredictorMetric
#include "op/op.h"
namespace baidu {
namespace paddle_serving {
......@@ -13,12 +29,12 @@ Dag::Dag() {
_stages.clear();
}
Dag::~Dag() {
deinit();
}
Dag::~Dag() { deinit(); }
int Dag::deinit() {
for (std::vector<DagStage*>::iterator iter = _stages.begin(); iter != _stages.end(); ++iter) {
for (std::vector<DagStage*>::iterator iter = _stages.begin();
iter != _stages.end();
++iter) {
if (*iter != NULL) {
delete *iter;
}
......@@ -118,8 +134,7 @@ int Dag::init(const configure::Workflow& conf, const std::string& name) {
conf.nodes(i).dependencies(j);
std::string name = depend.name();
std::string mode = depend.mode();
node->depends.insert(
std::make_pair(name, parse_mode(mode)));
node->depends.insert(std::make_pair(name, parse_mode(mode)));
}
Op* op = OpRepository::instance().get_op(node->type);
if (op == NULL) {
......@@ -143,8 +158,7 @@ int Dag::init(const configure::Workflow& conf, const std::string& name) {
LOG(INFO) << ", Op Num: " << _index_nodes.size();
for (uint32_t nid = 0; nid < _index_nodes.size(); nid++) {
DagNode* node = _index_nodes[nid];
LOG(INFO)
<< ", OP-" << node->id << "-" << node->name << "-"
LOG(INFO) << ", OP-" << node->id << "-" << node->name << "-"
<< node->type;
LOG(INFO) << " depends: " << node->depends.size();
......@@ -159,17 +173,11 @@ int Dag::init(const configure::Workflow& conf, const std::string& name) {
return ERR_OK;
}
uint32_t Dag::nodes_size() {
return _index_nodes.size();
}
uint32_t Dag::nodes_size() { return _index_nodes.size(); }
const DagNode* Dag::node_by_id(uint32_t id) {
return _index_nodes[id];
}
const DagNode* Dag::node_by_id(uint32_t id) { return _index_nodes[id]; }
const DagNode* Dag::node_by_id(uint32_t id) const {
return _index_nodes[id];
}
const DagNode* Dag::node_by_id(uint32_t id) const { return _index_nodes[id]; }
const DagNode* Dag::node_by_name(std::string& name) {
return _name_nodes[name];
......@@ -185,16 +193,11 @@ const DagNode* Dag::node_by_name(const std::string& name) const {
return it->second;
}
uint32_t Dag::stage_size() {
return _stages.size();
}
uint32_t Dag::stage_size() { return _stages.size(); }
const DagStage* Dag::stage_by_index(uint32_t index) {
return _stages[index];
}
const DagStage* Dag::stage_by_index(uint32_t index) { return _stages[index]; }
int Dag::topo_sort() {
// TODO ƽ
std::stringstream ss;
for (uint32_t nid = 0; nid < _index_nodes.size(); nid++) {
DagStage* stage = new (std::nothrow) DagStage();
......@@ -212,7 +215,8 @@ int Dag::topo_sort() {
// assign stage number after stage created
_index_nodes[nid]->stage = nid;
// assign dag node full name after stage created
_index_nodes[nid]->full_name = stage->full_name + NAME_DELIMITER + _index_nodes[nid]->name;
_index_nodes[nid]->full_name =
stage->full_name + NAME_DELIMITER + _index_nodes[nid]->name;
}
return ERR_OK;
}
......@@ -239,6 +243,6 @@ void Dag::regist_metric(const std::string& service_name) {
}
}
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_DAG_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_DAG_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
enum EdgeMode {
RO = 0,
RW = 1,
UNKNOWN
};
enum EdgeMode { RO = 0, RW = 1, UNKNOWN };
struct DagNode {
uint32_t id;
......@@ -30,12 +40,12 @@ struct DagStage {
};
class Dag {
public:
public:
Dag();
virtual ~Dag();
EdgeMode parse_mode(std::string& mode);
EdgeMode parse_mode(std::string& mode); // NOLINT
int init(const char* path, const char* file, const std::string& name);
......@@ -49,7 +59,7 @@ public:
const DagNode* node_by_id(uint32_t id) const;
const DagNode* node_by_name(std::string& name);
const DagNode* node_by_name(std::string& name); // NOLINT
const DagNode* node_by_name(const std::string& name) const;
......@@ -57,28 +67,22 @@ public:
const DagStage* stage_by_index(uint32_t index);
const std::string& name() const {
return _dag_name;
}
const std::string& name() const { return _dag_name; }
const std::string& full_name() const {
return _dag_name;
}
const std::string& full_name() const { return _dag_name; }
void regist_metric(const std::string& service_name);
private:
private:
int topo_sort();
private:
private:
std::string _dag_name;
boost::unordered_map<std::string, DagNode*> _name_nodes;
std::vector<DagNode*> _index_nodes;
std::vector<DagStage*> _stages;
};
} // predictor
} // paddle_serving
} // baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_DAG_H
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "framework/dag_view.h"
#include <brpc/traceprintf.h> // TRACEPRINTF
#include <string>
#include "common/inner_common.h"
#include "framework/op_repository.h"
......@@ -22,8 +37,7 @@ int DagView::init(Dag* dag, const std::string& service_name) {
}
ViewStage* vstage = butil::get_object<ViewStage>();
if (vstage == NULL) {
LOG(ERROR)
<< "Failed get vstage from object pool"
LOG(ERROR) << "Failed get vstage from object pool"
<< "at:" << si;
return ERR_MEM_ALLOC_FAILURE;
}
......@@ -40,13 +54,13 @@ int DagView::init(Dag* dag, const std::string& service_name) {
// factory type
Op* op = OpRepository::instance().get_op(node->type);
if (op == NULL) {
LOG(ERROR) << "Failed get op with type:"
<< node->type;
LOG(ERROR) << "Failed get op with type:" << node->type;
return ERR_INTERNAL_FAILURE;
}
// initialize a TLS op object
if (op->init(_bus, dag, node->id, node->name, node->type, node->conf) != 0) {
if (op->init(_bus, dag, node->id, node->name, node->type, node->conf) !=
0) {
LOG(WARNING) << "Failed init op, type:" << node->type;
return ERR_INTERNAL_FAILURE;
}
......@@ -91,9 +105,7 @@ int DagView::execute(butil::IOBufBuilder* debug_os) {
int errcode = execute_one_stage(_view[si], debug_os);
TRACEPRINTF("finish to execute stage[%u]", si);
if (errcode < 0) {
LOG(ERROR)
<< "failed execute stage["
<< _view[si]->debug();
LOG(ERROR) << "failed execute stage[" << _view[si]->debug();
return errcode;
}
}
......@@ -115,26 +127,22 @@ int DagView::execute_one_stage(ViewStage* vstage,
int errcode = op->process(debug_os != NULL);
TRACEPRINTF("finish to execute op[%s]", op->name());
if (errcode < 0) {
LOG(ERROR)
<< "Execute failed, Op:" << op->debug_string();
LOG(ERROR) << "Execute failed, Op:" << op->debug_string();
return errcode;
}
if (errcode > 0) {
LOG(INFO)
<< "Execute ignore, Op:" << op->debug_string();
LOG(INFO) << "Execute ignore, Op:" << op->debug_string();
continue;
}
if (debug_os) {
(*debug_os)
<< "{\"op_name\": \"" << op->name()
<< "\", \"debug_str:\": \""
<< op->debug_string()
(*debug_os) << "{\"op_name\": \"" << op->name()
<< "\", \"debug_str:\": \"" << op->debug_string()
<< "\", \"time_info\": \"" << op->time_info() << "\"}";
}
//LOG(DEBUG) << "Execute succ, Op:" << op->debug_string();
// LOG(DEBUG) << "Execute succ, Op:" << op->debug_string();
}
stage_time.stop();
PredictorMetric::GetInstance()->update_latency_metric(
......@@ -161,10 +169,8 @@ const Channel* DagView::get_response_channel() const {
}
ViewStage* last_stage = _view[_view.size() - 1];
if (last_stage->nodes.size() != 1
|| last_stage->nodes[0] == NULL) {
LOG(ERROR) << "Invalid last stage, size["
<< last_stage->nodes.size()
if (last_stage->nodes.size() != 1 || last_stage->nodes[0] == NULL) {
LOG(ERROR) << "Invalid last stage, size[" << last_stage->nodes.size()
<< "] != 1";
return NULL;
}
......@@ -177,6 +183,6 @@ const Channel* DagView::get_response_channel() const {
return last_op->mutable_channel();
}
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_DAG_VIEW_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_DAG_VIEW_H
#include "op/op.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "common/inner_common.h"
#include "framework/channel.h"
#include "framework/dag.h"
#include "op/op.h"
namespace baidu {
namespace paddle_serving {
......@@ -24,16 +38,12 @@ struct ViewNode {
struct ViewStage {
std::vector<ViewNode*> nodes;
std::string full_name; // service_workflow_stageindex
std::string debug() {
return "TOBE IMPLEMENTED!";
}
std::string debug() { return "TOBE IMPLEMENTED!"; }
};
class DagView {
public:
DagView() : _bus(NULL) {
_view.clear();
}
public:
DagView() : _bus(NULL) { _view.clear(); }
~DagView() {}
......@@ -49,19 +59,15 @@ public:
virtual int execute_one_stage(ViewStage* vstage,
butil::IOBufBuilder* debug_os);
int set_request_channel(Channel& request);
int set_request_channel(Channel& request); // NOLINT
const Channel* get_response_channel() const;
const std::string& name() const {
return _name;
}
const std::string& name() const { return _name; }
const std::string& full_name() const {
return _full_name;
}
const std::string& full_name() const { return _full_name; }
private:
private:
std::string _name;
std::string _full_name;
std::vector<ViewStage*> _view;
......@@ -71,14 +77,10 @@ private:
// The derived DagView supports parallel execution
// strategy, by implments the execute_one_stage().
class ParallelDagView : public DagView {
public:
int execute_one_stage(ViewStage* vstage, butil::IOBufBuilder*) {
return 0;
}
public:
int execute_one_stage(ViewStage* vstage, butil::IOBufBuilder*) { return 0; }
};
} // predictor
} // paddle_serving
} // baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_DAG_VIEW_H
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/factory.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/10 22:09:57
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_FACTORY_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_FACTORY_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <map>
#include <string>
#include <utility>
#include "common/inner_common.h"
#include "glog/raw_logging.h"
namespace baidu {
......@@ -24,11 +25,9 @@ namespace predictor {
//////////////// DECLARE INTERFACE ////////////////
#define DECLARE_FACTORY_OBJECT(D, B) \
static int regist(const std::string& tag) { \
FactoryDerive<D, B>* factory = \
new (std::nothrow) FactoryDerive<D, B>();\
if (factory == NULL \
|| FactoryPool<B>::instance().register_factory(\
tag, factory) != 0) { \
FactoryDerive<D, B>* factory = new (std::nothrow) FactoryDerive<D, B>(); \
if (factory == NULL || \
FactoryPool<B>::instance().register_factory(tag, factory) != 0) { \
RAW_LOG_FATAL("Failed regist factory: %s in macro!", #D); \
return -1; \
} \
......@@ -36,119 +35,119 @@ namespace predictor {
}
#define PDS_STR_CAT(a, b) PDS_STR_CAT_I(a, b)
#define PDS_STR_CAT_I(a, b) a ## b
#define PDS_STR_CAT_I(a, b) a##b
#define DEFINE_FACTORY_OBJECT(D) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \
__LINE__)(void) { \
D::regist(#D); \
}
}
//////////////// REGISTER INTERFACE ////////////////
#define REGIST_FACTORY_OBJECT_IMPL(D, B) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
::baidu::paddle_serving::predictor::FactoryDerive<D, B>* factory =\
new (::std::nothrow) ::baidu::paddle_serving::predictor::FactoryDerive<D, B>();\
if (factory == NULL \
|| ::baidu::paddle_serving::predictor::FactoryPool<B>::instance().register_factory(\
#D, factory) != 0) { \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \
__LINE__)(void) { \
::baidu::paddle_serving::predictor::FactoryDerive<D, B>* factory = new ( \
::std::nothrow)::baidu::paddle_serving::predictor::FactoryDerive<D, \
B>(); \
if (factory == NULL || \
::baidu::paddle_serving::predictor::FactoryPool<B>::instance() \
.register_factory(#D, factory) != 0) { \
RAW_LOG_FATAL("Failed regist factory: %s->%s in macro!", #D, #B); \
return ; \
return; \
} \
return ; \
}
return; \
}
#define REGIST_FACTORY_OBJECT_IMPL_WITH_NAME(D, B, N) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
::baidu::paddle_serving::predictor::FactoryDerive<D, B>* factory =\
new (::std::nothrow) ::baidu::paddle_serving::predictor::FactoryDerive<D, B>();\
if (factory == NULL \
|| ::baidu::paddle_serving::predictor::FactoryPool<B>::instance().register_factory(\
N, factory) != 0) { \
RAW_LOG_FATAL("Failed regist factory: %s->%s, tag: %s in macro!", #D, #B, N); \
return ; \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \
__LINE__)(void) { \
::baidu::paddle_serving::predictor::FactoryDerive<D, B>* factory = new ( \
::std::nothrow)::baidu::paddle_serving::predictor::FactoryDerive<D, \
B>(); \
if (factory == NULL || \
::baidu::paddle_serving::predictor::FactoryPool<B>::instance() \
.register_factory(N, factory) != 0) { \
RAW_LOG_FATAL( \
"Failed regist factory: %s->%s, tag: %s in macro!", #D, #B, N); \
return; \
} \
RAW_LOG_WARNING("Succ regist factory: %s->%s, tag: %s in macro!", #D, #B, N); \
return ; \
}
RAW_LOG_WARNING( \
"Succ regist factory: %s->%s, tag: %s in macro!", #D, #B, N); \
return; \
}
template<typename B>
template <typename B>
class FactoryBase {
public:
public:
virtual B* gen() = 0;
virtual void del(B* obj) = 0;
};
template<typename D, typename B>
template <typename D, typename B>
class FactoryDerive : public FactoryBase<B> {
public:
B* gen() {
return new(std::nothrow) D();
}
public:
B* gen() { return new (std::nothrow) D(); }
void del(B* obj) {
delete dynamic_cast<D*>(obj);
}
void del(B* obj) { delete dynamic_cast<D*>(obj); }
};
template<typename B>
template <typename B>
class FactoryPool {
public:
public:
static FactoryPool<B>& instance() {
static FactoryPool<B> singleton;
return singleton;
}
int register_factory(const std::string& tag,
FactoryBase<B>* factory) {
typename std::map<std::string, FactoryBase<B>*>::iterator it
= _pool.find(tag);
int register_factory(const std::string& tag, FactoryBase<B>* factory) {
typename std::map<std::string, FactoryBase<B>*>::iterator it =
_pool.find(tag);
if (it != _pool.end()) {
RAW_LOG_FATAL("Insert duplicate with tag: %s", tag.c_str());
return -1;
}
std::pair<
typename std::map<std::string, FactoryBase<B>*>::iterator,
bool> r = _pool.insert(std::make_pair(tag, factory));
std::pair<typename std::map<std::string, FactoryBase<B>*>::iterator, bool>
r = _pool.insert(std::make_pair(tag, factory));
if (!r.second) {
RAW_LOG_FATAL("Failed insert new factory with: %s", tag.c_str());
return -1;
}
RAW_LOG_INFO("Succ insert one factory, tag: %s, base type %s", tag.c_str(), typeid(B).name());
RAW_LOG_INFO("Succ insert one factory, tag: %s, base type %s",
tag.c_str(),
typeid(B).name());
return 0;
}
B* generate_object(const std::string& tag) {
typename std::map<std::string, FactoryBase<B>*>::iterator it
= _pool.find(tag);
typename std::map<std::string, FactoryBase<B>*>::iterator it =
_pool.find(tag);
if (it == _pool.end() || it->second == NULL) {
RAW_LOG_FATAL("Not found factory pool, tag: %s, pool size %u", tag.c_str(), _pool.size());
RAW_LOG_FATAL("Not found factory pool, tag: %s, pool size %u",
tag.c_str(),
_pool.size());
return NULL;
}
return it->second->gen();
}
template<typename D>
template <typename D>
void return_object(B* object) {
FactoryDerive<D, B> factory;
factory.del(object);
}
private:
private:
std::map<std::string, FactoryBase<B>*> _pool;
};
} // predictor
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_PREDICTOR_FACTORY_H
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/* vim: set expandtab ts=2 sw=2 sts=2 tw=100: */
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_INFER_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_INFER_H
#include <sys/types.h>
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <string>
#include <vector>
#include "common/inner_common.h"
#include "framework/infer_data.h"
#include "framework/factory.h"
#include "framework/bsf.h"
#include "framework/factory.h"
#include "framework/infer_data.h"
namespace baidu {
namespace paddle_serving {
......@@ -16,25 +30,16 @@ namespace predictor {
using configure::ModelToolkitConf;
class InferEngine {
public:
public:
virtual ~InferEngine() {}
virtual int proc_initialize(const configure::EngineDesc& conf, bool version) {
return proc_initialize_impl(conf, version);
}
virtual int proc_finalize() {
return proc_finalize_impl();
}
virtual int thrd_initialize() {
return thrd_initialize_impl();
}
virtual int thrd_clear() {
return thrd_clear_impl();
}
virtual int thrd_finalize() {
return thrd_finalize_impl();
}
virtual int proc_finalize() { return proc_finalize_impl(); }
virtual int thrd_initialize() { return thrd_initialize_impl(); }
virtual int thrd_clear() { return thrd_clear_impl(); }
virtual int thrd_finalize() { return thrd_finalize_impl(); }
virtual int infer(const void* in, void* out, uint32_t batch_size = -1) {
return infer_impl1(in, out, batch_size);
}
......@@ -44,20 +49,22 @@ public:
virtual uint64_t version() const = 0;
// begin: framework inner call
virtual int proc_initialize_impl(
const configure::EngineDesc& conf, bool version) = 0;
virtual int proc_initialize_impl(const configure::EngineDesc& conf,
bool version) = 0;
virtual int thrd_initialize_impl() = 0;
virtual int thrd_finalize_impl() = 0;
virtual int thrd_clear_impl() = 0;
virtual int proc_finalize_impl() = 0;
virtual int infer_impl1(
const void* in, void* out, uint32_t batch_size = -1) = 0;
virtual int infer_impl2(const BatchTensor& in, BatchTensor& out) = 0;
virtual int infer_impl1(const void* in,
void* out,
uint32_t batch_size = -1) = 0;
virtual int infer_impl2(const BatchTensor& in,
BatchTensor& out) = 0; // NOLINT
// end: framework inner call
};
class ReloadableInferEngine : public InferEngine {
public:
public:
virtual ~ReloadableInferEngine() {}
union last_check_status {
......@@ -109,9 +116,10 @@ public:
im::bsf::TaskExecutor<TaskT>::instance()->set_thread_callback_fn(
boost::bind(&InferEngine::infer_impl2, this, _1, _2));
im::bsf::TaskExecutor<TaskT>::instance()->set_batch_size(_infer_batch_size);
im::bsf::TaskExecutor<TaskT>::instance()->set_batch_align(_infer_batch_align);
if (im::bsf::TaskExecutor<TaskT>::instance()->start(_infer_thread_num)
!= 0) {
im::bsf::TaskExecutor<TaskT>::instance()->set_batch_align(
_infer_batch_align);
if (im::bsf::TaskExecutor<TaskT>::instance()->start(_infer_thread_num) !=
0) {
LOG(ERROR) << "Failed start bsf executor, threads:" << _infer_thread_num;
return -1;
}
......@@ -129,7 +137,8 @@ public:
}
im::bsf::TaskManager<Tensor, Tensor> task_manager;
task_manager.schedule(*(const BatchTensor*)in, *(BatchTensor*)out);
task_manager.schedule(*(reinterpret_cast<const BatchTensor*>(in)),
*(reinterpret_cast<BatchTensor*>(out)));
task_manager.wait();
return 0;
}
......@@ -171,21 +180,16 @@ public:
return 0;
}
uint64_t version() const {
return _version;
}
uint64_t version() const { return _version; }
uint32_t thread_num() const {
return _infer_thread_num;
}
uint32_t thread_num() const { return _infer_thread_num; }
private:
private:
int parse_version_info(const configure::EngineDesc& config, bool version) {
_version = uint64_t(-1);
return 0;
}
bool check_need_reload() {
if (_reload_mode_tag == "timestamp_ne") {
return check_timestamp_ne();
......@@ -198,8 +202,7 @@ private:
} else if (_reload_mode_tag == "none") {
return false;
} else {
LOG(ERROR) << "Not support check type: "
<< _reload_mode_tag;
LOG(ERROR) << "Not support check type: " << _reload_mode_tag;
return false;
}
}
......@@ -207,13 +210,11 @@ private:
bool check_timestamp_ne() {
struct stat st;
if (stat(_reload_tag_file.c_str(), &st) != 0) {
LOG(ERROR) << "Failed stat config file:"
<< _reload_tag_file;
LOG(ERROR) << "Failed stat config file:" << _reload_tag_file;
return false;
}
if ((st.st_mode & S_IFREG) &&
st.st_mtime != _last_status.last_timestamp) {
if ((st.st_mode & S_IFREG) && st.st_mtime != _last_status.last_timestamp) {
_last_status.last_timestamp = st.st_mtime;
return true;
}
......@@ -224,13 +225,11 @@ private:
bool check_timestamp_gt() {
struct stat st;
if (stat(_reload_tag_file.c_str(), &st) != 0) {
LOG(ERROR) << "Failed stat config file:"
<< _reload_tag_file;
LOG(ERROR) << "Failed stat config file:" << _reload_tag_file;
return false;
}
if ((st.st_mode & S_IFREG) &&
st.st_mtime > _last_status.last_timestamp) {
if ((st.st_mode & S_IFREG) && st.st_mtime > _last_status.last_timestamp) {
_last_status.last_timestamp = st.st_mtime;
return true;
}
......@@ -238,18 +237,14 @@ private:
return false;
}
bool check_md5sum() {
return false;
}
bool check_md5sum() { return false; }
bool check_revision() {
return false;
}
bool check_revision() { return false; }
protected:
protected:
std::string _model_data_path;
private:
private:
std::string _reload_tag_file;
std::string _reload_mode_tag;
last_check_status _last_status;
......@@ -259,7 +254,7 @@ private:
uint64_t _version;
};
template<typename EngineCore>
template <typename EngineCore>
struct ModelData {
ModelData() : current_idx(1) {
cores[0] = NULL;
......@@ -275,9 +270,9 @@ struct ModelData {
uint32_t current_idx;
};
template<typename EngineCore>
template <typename EngineCore>
class DBReloadableInferEngine : public ReloadableInferEngine {
public:
public:
virtual ~DBReloadableInferEngine() {}
int proc_initialize(const configure::EngineDesc& conf, bool version) {
......@@ -310,8 +305,7 @@ public:
}
md->cores[next_idx] = new (std::nothrow) EngineCore;
if (!md->cores[next_idx]
|| md->cores[next_idx]->create(data_path) != 0) {
if (!md->cores[next_idx] || md->cores[next_idx]->create(data_path) != 0) {
LOG(ERROR) << "Failed create model, path: " << data_path;
return -1;
}
......@@ -326,7 +320,7 @@ public:
return -1;
}
ModelData<EngineCore>* md = new(std::nothrow) ModelData<EngineCore>;
ModelData<EngineCore>* md = new (std::nothrow) ModelData<EngineCore>;
if (!md || load_data(md, _model_data_path) != 0) {
LOG(ERROR) << "Failed create thread data from " << _model_data_path;
return -1;
......@@ -347,9 +341,7 @@ public:
return 0;
}
int thrd_finalize_impl() {
return 0;
}
int thrd_finalize_impl() { return 0; }
int proc_finalize_impl() {
THREAD_KEY_DELETE(_skey);
......@@ -358,7 +350,8 @@ public:
}
EngineCore* get_core() {
ModelData<EngineCore>* md = (ModelData<EngineCore>*)THREAD_GETSPECIFIC(_skey);
ModelData<EngineCore>* md =
(ModelData<EngineCore>*)THREAD_GETSPECIFIC(_skey);
if (!md) {
LOG(ERROR) << "Failed get thread specific data";
return NULL;
......@@ -366,17 +359,19 @@ public:
return md->cores[md->current_idx];
}
protected:
protected:
THREAD_KEY_T _skey;
THREAD_MUTEX_T _mutex;
std::vector<ModelData<EngineCore>*> _reload_vec;
private:
private:
};
// 多个EngineCore共用同一份模型数据
template<typename EngineCore>
class CloneDBReloadableInferEngine : public DBReloadableInferEngine<EngineCore> {
public:
template <typename EngineCore>
class CloneDBReloadableInferEngine
: public DBReloadableInferEngine<EngineCore> {
public:
virtual ~CloneDBReloadableInferEngine() {}
virtual int proc_initialize(const configure::EngineDesc& conf, bool version) {
......@@ -385,28 +380,28 @@ public:
LOG(ERROR) << "Failed to allocate for ProcData";
return -1;
}
return DBReloadableInferEngine<EngineCore>::proc_initialize(
conf, version);
return DBReloadableInferEngine<EngineCore>::proc_initialize(conf, version);
}
virtual int load(const std::string& model_data_dir) {
// 加载进程级模型数据
if (!_pd || DBReloadableInferEngine<EngineCore>::load_data(
_pd, model_data_dir) != 0) {
LOG(ERROR)
<< "Failed to create common model from ["
<< model_data_dir << "].";
if (!_pd ||
DBReloadableInferEngine<EngineCore>::load_data(_pd, model_data_dir) !=
0) {
LOG(ERROR) << "Failed to create common model from [" << model_data_dir
<< "].";
return -1;
}
LOG(WARNING)
<< "Succ load common model[" << _pd->cores[_pd->current_idx]
LOG(WARNING) << "Succ load common model[" << _pd->cores[_pd->current_idx]
<< "], path[" << model_data_dir << "].";
if (DBReloadableInferEngine<EngineCore>::_reload_vec.empty()) {
return 0;
}
for (uint32_t ti = 0; ti < DBReloadableInferEngine<EngineCore>::_reload_vec.size(); ++ti) {
for (uint32_t ti = 0;
ti < DBReloadableInferEngine<EngineCore>::_reload_vec.size();
++ti) {
if (load_data(DBReloadableInferEngine<EngineCore>::_reload_vec[ti],
_pd->cores[_pd->current_idx]) != 0) {
LOG(ERROR) << "Failed reload engine model: " << ti;
......@@ -420,26 +415,23 @@ public:
}
// 加载线程级对象,多个线程级对象共用pd_core的模型数据
int load_data(
ModelData<EngineCore>* td,
EngineCore* pd_core) {
int load_data(ModelData<EngineCore>* td, EngineCore* pd_core) {
uint32_t next_idx = (td->current_idx + 1) % 2;
if (td->cores[next_idx]) {
delete td->cores[next_idx];
}
td->cores[next_idx] = new (std::nothrow) EngineCore;
if (!td->cores[next_idx]
|| td->cores[next_idx]->clone(pd_core->get()) != 0) {
LOG(ERROR) << "Failed clone model from pd_core[ " << pd_core
<< "], idx[" << next_idx << "]";
if (!td->cores[next_idx] ||
td->cores[next_idx]->clone(pd_core->get()) != 0) {
LOG(ERROR) << "Failed clone model from pd_core[ " << pd_core << "], idx["
<< next_idx << "]";
return -1;
}
td->current_idx = next_idx;
LOG(WARNING)
<< "td_core[" << td->cores[td->current_idx]
<< "] clone model from pd_core["
<< pd_core << "] succ, cur_idx[" << td->current_idx << "].";
LOG(WARNING) << "td_core[" << td->cores[td->current_idx]
<< "] clone model from pd_core[" << pd_core
<< "] succ, cur_idx[" << td->current_idx << "].";
return 0;
}
......@@ -450,7 +442,7 @@ public:
return -1;
}
ModelData<EngineCore>* md = new(std::nothrow) ModelData<EngineCore>;
ModelData<EngineCore>* md = new (std::nothrow) ModelData<EngineCore>;
if (!md || load_data(md, _pd->cores[_pd->current_idx]) != 0) {
LOG(ERROR) << "Failed clone thread data, origin_core["
<< _pd->cores[_pd->current_idx] << "].";
......@@ -463,19 +455,20 @@ public:
return 0;
}
protected:
ModelData<EngineCore>* _pd; // 进程级EngineCore,多个线程级EngineCore共用该对象的模型数据
protected:
ModelData<EngineCore>*
_pd; // 进程级EngineCore,多个线程级EngineCore共用该对象的模型数据
};
template<typename FluidFamilyCore>
template <typename FluidFamilyCore>
class FluidInferEngine : public DBReloadableInferEngine<FluidFamilyCore> {
public:
public:
FluidInferEngine() {}
~FluidInferEngine() {}
int infer_impl1(const void* in, void* out, uint32_t batch_size = -1) {
FluidFamilyCore* core
= DBReloadableInferEngine<FluidFamilyCore>::get_core();
FluidFamilyCore* core =
DBReloadableInferEngine<FluidFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(ERROR) << "Failed get fluid core in infer_impl()";
return -1;
......@@ -488,20 +481,20 @@ public:
return 0;
}
int infer_impl2(const BatchTensor& in, BatchTensor& out) {
int infer_impl2(const BatchTensor& in, BatchTensor& out) { // NOLINT
return infer_impl1(&in, &out);
}
};
template<typename TensorrtFamilyCore>
template <typename TensorrtFamilyCore>
class TensorrtInferEngine : public DBReloadableInferEngine<TensorrtFamilyCore> {
public:
public:
TensorrtInferEngine() {}
~TensorrtInferEngine() {}
int infer_impl1(const void* in, void* out, uint32_t batch_size) {
TensorrtFamilyCore* core
= DBReloadableInferEngine<TensorrtFamilyCore>::get_core();
TensorrtFamilyCore* core =
DBReloadableInferEngine<TensorrtFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(ERROR) << "Failed get fluid core in infer_impl()";
return -1;
......@@ -514,15 +507,16 @@ public:
return 0;
}
int infer_impl2(const BatchTensor& in, BatchTensor& out) {
int infer_impl2(const BatchTensor& in, BatchTensor& out) { // NOLINT
LOG(ERROR) << "Tensortrt donot supports infer_impl2 yet!";
return -1;
}
};
template<typename AbacusFamilyCore>
class AbacusInferEngine : public CloneDBReloadableInferEngine<AbacusFamilyCore> {
public:
template <typename AbacusFamilyCore>
class AbacusInferEngine
: public CloneDBReloadableInferEngine<AbacusFamilyCore> {
public:
AbacusInferEngine() {}
~AbacusInferEngine() {}
......@@ -531,15 +525,15 @@ public:
return -1;
}
int infer_impl2(const BatchTensor& in, BatchTensor& out) {
int infer_impl2(const BatchTensor& in, BatchTensor& out) { // NOLINT
LOG(ERROR) << "Abacus dnn engine must use predict interface";
return -1;
}
// Abacus special interface
int predict(uint32_t ins_num) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
AbacusFamilyCore* core =
CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(ERROR) << "Failed get abacus core in predict()";
return -1;
......@@ -548,8 +542,8 @@ public:
return core->predict(ins_num);
}
int set_use_fpga(bool use_fpga) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
AbacusFamilyCore* core =
CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(ERROR) << "Failed get abacus core in predict()";
return -1;
......@@ -558,8 +552,8 @@ public:
return core->set_use_fpga(use_fpga);
}
int debug() {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
AbacusFamilyCore* core =
CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(ERROR) << "Failed get abacus core in debug()";
return -1;
......@@ -568,8 +562,8 @@ public:
}
int set_search_id(uint64_t sid) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
AbacusFamilyCore* core =
CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(ERROR) << "Failed get abacus core in set_serach_id()";
return -1;
......@@ -578,8 +572,8 @@ public:
}
int set_hidden_layer_dim(uint32_t dim) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
AbacusFamilyCore* core =
CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(ERROR) << "Failed get abacus core in set_layer_dim()";
return -1;
......@@ -587,10 +581,9 @@ public:
return core->set_hidden_layer_dim(dim);
}
int get_input(
uint32_t ins_idx, uint32_t* fea_num, void* in) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
int get_input(uint32_t ins_idx, uint32_t* fea_num, void* in) {
AbacusFamilyCore* core =
CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(ERROR) << "Failed get abacus core in get_input()";
return -1;
......@@ -599,9 +592,11 @@ public:
}
int get_layer_value(const std::string& name,
uint32_t ins_num, uint32_t fea_dim, void* out) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
uint32_t ins_num,
uint32_t fea_dim,
void* out) {
AbacusFamilyCore* core =
CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(ERROR) << "Failed get abacus core in get_layer_value()";
return -1;
......@@ -610,8 +605,8 @@ public:
}
void set_position_idx(void* input, uint64_t fea, uint32_t ins_idx) {
AbacusFamilyCore* core
= CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
AbacusFamilyCore* core =
CloneDBReloadableInferEngine<AbacusFamilyCore>::get_core();
if (!core || !core->get()) {
LOG(ERROR) << "Failed get abacus core in set_position_idx()";
return;
......@@ -621,9 +616,10 @@ public:
}
};
template<typename PaddleV2FamilyCore>
class PaddleV2InferEngine : public CloneDBReloadableInferEngine<PaddleV2FamilyCore> {
public:
template <typename PaddleV2FamilyCore>
class PaddleV2InferEngine
: public CloneDBReloadableInferEngine<PaddleV2FamilyCore> {
public:
PaddleV2InferEngine() {}
~PaddleV2InferEngine() {}
......@@ -632,7 +628,7 @@ public:
return -1;
}
int infer_impl2(const BatchTensor& in, BatchTensor& out) {
int infer_impl2(const BatchTensor& in, BatchTensor& out) { // NOLINT
LOG(ERROR) << "Paddle V2 engine must use predict interface";
return -1;
}
......@@ -641,38 +637,31 @@ public:
typedef FactoryPool<InferEngine> StaticInferFactory;
class VersionedInferEngine : public InferEngine {
public:
VersionedInferEngine() {
_versions.clear();
}
public:
VersionedInferEngine() { _versions.clear(); }
~VersionedInferEngine() {}
int proc_initialize(const configure::EngineDesc& conf) {
if (proc_initialize(conf, false) != 0) {
LOG(ERROR) << "Failed proc intialize engine: "
<< conf.name().c_str();
LOG(ERROR) << "Failed proc intialize engine: " << conf.name().c_str();
return -1;
}
LOG(WARNING)
<< "Succ proc initialize engine: " << conf.name().c_str();
LOG(WARNING) << "Succ proc initialize engine: " << conf.name().c_str();
return 0;
}
int proc_initialize(const configure::EngineDesc& conf, bool version) {
std::string engine_type = conf.type();
InferEngine* engine
= StaticInferFactory::instance().generate_object(
engine_type);
InferEngine* engine =
StaticInferFactory::instance().generate_object(engine_type);
if (!engine) {
LOG(ERROR) << "Failed generate engine with type:"
<< engine_type;
LOG(ERROR) << "Failed generate engine with type:" << engine_type;
return -1;
}
if (engine->proc_initialize(conf, version) != 0) {
LOG(ERROR) << "Failed initialize engine, type:"
<< engine_type;
LOG(ERROR) << "Failed initialize engine, type:" << engine_type;
return -1;
}
......@@ -682,19 +671,17 @@ public:
<< ", type: " << engine_type;
return -1;
}
LOG(WARNING)
<< "Succ proc initialize version engine: " << engine->version();
LOG(WARNING) << "Succ proc initialize version engine: "
<< engine->version();
return 0;
}
int proc_finalize() {
for (auto iter = _versions.begin(); iter != _versions.end(); ++iter) {
if (iter->second->proc_finalize() != 0) {
LOG(ERROR) << "Failed proc finalize version engine: " <<
iter->first;
LOG(ERROR) << "Failed proc finalize version engine: " << iter->first;
}
LOG(WARNING)
<< "Succ proc finalize version engine: " << iter->first;
LOG(WARNING) << "Succ proc finalize version engine: " << iter->first;
}
return 0;
}
......@@ -702,12 +689,10 @@ public:
int thrd_initialize() {
for (auto iter = _versions.begin(); iter != _versions.end(); ++iter) {
if (iter->second->thrd_initialize() != 0) {
LOG(ERROR) << "Failed thrd initialize version engine: " <<
iter->first;
LOG(ERROR) << "Failed thrd initialize version engine: " << iter->first;
return -1;
}
LOG(WARNING)
<< "Succ thrd initialize version engine: " << iter->first;
LOG(WARNING) << "Succ thrd initialize version engine: " << iter->first;
}
return 0;
}
......@@ -715,8 +700,7 @@ public:
int thrd_clear() {
for (auto iter = _versions.begin(); iter != _versions.end(); ++iter) {
if (iter->second->thrd_clear() != 0) {
LOG(ERROR) << "Failed thrd clear version engine: " <<
iter->first;
LOG(ERROR) << "Failed thrd clear version engine: " << iter->first;
return -1;
}
LOG(INFO) << "Succ thrd clear version engine: " << iter->first;
......@@ -727,8 +711,7 @@ public:
int thrd_finalize() {
for (auto iter = _versions.begin(); iter != _versions.end(); ++iter) {
if (iter->second->thrd_finalize() != 0) {
LOG(ERROR) << "Failed thrd finalize version engine: " <<
iter->first;
LOG(ERROR) << "Failed thrd finalize version engine: " << iter->first;
return -1;
}
LOG(WARNING) << "Succ thrd finalize version engine: " << iter->first;
......@@ -739,8 +722,7 @@ public:
int reload() {
for (auto iter = _versions.begin(); iter != _versions.end(); ++iter) {
if (iter->second->reload() != 0) {
LOG(ERROR) << "Failed reload version engine: " <<
iter->first;
LOG(ERROR) << "Failed reload version engine: " << iter->first;
return -1;
}
LOG(WARNING) << "Succ reload version engine: " << iter->first;
......@@ -760,8 +742,7 @@ public:
// inference interface
InferEngine* default_engine() const {
if (_versions.size() != 1) {
LOG(ERROR) << "Ambiguous default engine version:"
<< _versions.size();
LOG(ERROR) << "Ambiguous default engine version:" << _versions.size();
return NULL;
}
......@@ -777,7 +758,7 @@ public:
return engine->infer(in, out, batch_size);
}
template<typename T>
template <typename T>
T* get_core() {
InferEngine* engine = default_engine();
if (!engine) {
......@@ -793,8 +774,7 @@ public:
}
// versioned inference interface
int infer(
const void* in, void* out, uint32_t batch_size, uint64_t version) {
int infer(const void* in, void* out, uint32_t batch_size, uint64_t version) {
auto iter = _versions.find(version);
if (iter == _versions.end()) {
LOG(ERROR) << "Not found version engine: " << version;
......@@ -804,7 +784,7 @@ public:
return iter->second->infer(in, out, batch_size);
}
template<typename T>
template <typename T>
T* get_core(uint64_t version) {
auto iter = _versions.find(version);
if (iter == _versions.end()) {
......@@ -821,20 +801,26 @@ public:
}
// --
int proc_initialize_impl(const configure::EngineDesc& conf, bool) { return -1; }
int proc_initialize_impl(const configure::EngineDesc& conf, bool) {
return -1;
}
int thrd_initialize_impl() { return -1; }
int thrd_finalize_impl() { return -1; }
int thrd_clear_impl() { return -1; }
int proc_finalize_impl() { return -1; }
int infer_impl1(const void* in, void* out, uint32_t batch_size = -1) { return -1; }
int infer_impl2(const BatchTensor& in, BatchTensor& out) { return -1; }
int infer_impl1(const void* in, void* out, uint32_t batch_size = -1) {
return -1;
}
int infer_impl2(const BatchTensor& in, BatchTensor& out) { // NOLINT
return -1;
} // NOLINT
private:
private:
boost::unordered_map<uint64_t, InferEngine*> _versions;
};
class InferManager {
public:
public:
static InferManager& instance() {
static InferManager ins;
return ins;
......@@ -843,8 +829,7 @@ public:
int proc_initialize(const char* path, const char* file) {
ModelToolkitConf model_toolkit_conf;
if (configure::read_proto_conf(path, file, &model_toolkit_conf) != 0) {
LOG(ERROR) << "failed load infer config, path: "
<< path << "/" << file;
LOG(ERROR) << "failed load infer config, path: " << path << "/" << file;
return -1;
}
......@@ -858,8 +843,7 @@ public:
}
if (engine->proc_initialize(model_toolkit_conf.engines(ei)) != 0) {
LOG(ERROR) << "Failed initialize version engine, name:"
<< engine_name;
LOG(ERROR) << "Failed initialize version engine, name:" << engine_name;
return -1;
}
......@@ -877,12 +861,10 @@ public:
int thrd_initialize() {
for (auto it = _map.begin(); it != _map.end(); ++it) {
if (it->second->thrd_initialize() != 0) {
LOG(ERROR) << "Failed thrd initialize engine, name: "
<< it->first;
LOG(ERROR) << "Failed thrd initialize engine, name: " << it->first;
return -1;
}
LOG(WARNING) << "Succ thrd initialize engine, name: "
<< it->first;
LOG(WARNING) << "Succ thrd initialize engine, name: " << it->first;
}
return 0;
}
......@@ -890,8 +872,7 @@ public:
int thrd_clear() {
for (auto it = _map.begin(); it != _map.end(); ++it) {
if (it->second->thrd_clear() != 0) {
LOG(ERROR) << "Failed thrd clear engine, name: "
<< it->first;
LOG(ERROR) << "Failed thrd clear engine, name: " << it->first;
return -1;
}
}
......@@ -901,8 +882,7 @@ public:
int reload() {
for (auto it = _map.begin(); it != _map.end(); ++it) {
if (it->second->reload() != 0) {
LOG(ERROR) << "Failed reload engine, name: "
<< it->first;
LOG(ERROR) << "Failed reload engine, name: " << it->first;
return -1;
}
}
......@@ -912,12 +892,10 @@ public:
int thrd_finalize() {
for (auto it = _map.begin(); it != _map.end(); ++it) {
if (it->second->thrd_finalize() != 0) {
LOG(ERROR) << "Failed thrd finalize engine, name: "
<< it->first;
LOG(ERROR) << "Failed thrd finalize engine, name: " << it->first;
return -1;
}
LOG(WARNING) << "Succ thrd finalize engine, name: "
<< it->first;
LOG(WARNING) << "Succ thrd finalize engine, name: " << it->first;
}
return 0;
}
......@@ -925,37 +903,36 @@ public:
int proc_finalize() {
for (auto it = _map.begin(); it != _map.end(); ++it) {
if (it->second->proc_finalize() != 0) {
LOG(ERROR) << "Failed proc finalize engine, name: "
<< it->first;
LOG(ERROR) << "Failed proc finalize engine, name: " << it->first;
return -1;
}
LOG(WARNING) << "Succ proc finalize engine, name: "
<< it->first;
LOG(WARNING) << "Succ proc finalize engine, name: " << it->first;
}
return 0;
}
// Inference interface
int infer(const char* model_name, const void* in, void* out, uint32_t batch_size = -1) {
int infer(const char* model_name,
const void* in,
void* out,
uint32_t batch_size = -1) {
auto it = _map.find(model_name);
if (it == _map.end()) {
LOG(WARNING) << "Cannot find engine in map, model name:"
<< model_name;
LOG(WARNING) << "Cannot find engine in map, model name:" << model_name;
return -1;
}
return it->second->infer(in, out, batch_size);
}
template<typename T>
template <typename T>
T* get_core(const char* model_name) {
auto it = _map.find(model_name);
if (it == _map.end()) {
LOG(WARNING) << "Cannot find engine in map, model name:"
<< model_name;
LOG(WARNING) << "Cannot find engine in map, model name:" << model_name;
return NULL;
}
auto infer_engine = dynamic_cast<DBReloadableInferEngine<T>*>(
it->second->default_engine());
auto infer_engine =
dynamic_cast<DBReloadableInferEngine<T>*>(it->second->default_engine());
if (infer_engine) {
return infer_engine->get_core();
}
......@@ -964,53 +941,49 @@ public:
}
// Versioned inference interface
int infer(const char* model_name, const void* in, void* out,
uint32_t batch_size, uint64_t version) {
int infer(const char* model_name,
const void* in,
void* out,
uint32_t batch_size,
uint64_t version) {
auto it = _map.find(model_name);
if (it == _map.end()) {
LOG(WARNING) << "Cannot find engine in map, model name:"
<< model_name;
LOG(WARNING) << "Cannot find engine in map, model name:" << model_name;
return -1;
}
return it->second->infer(in, out, batch_size, version);
}
template<typename T>
template <typename T>
T* get_core(const char* model_name, uint64_t version) {
auto it = _map.find(model_name);
if (it == _map.end()) {
LOG(WARNING) << "Cannot find engine in map, model name:"
<< model_name;
LOG(WARNING) << "Cannot find engine in map, model name:" << model_name;
return NULL;
}
return it->second->get_core<T>(version);
}
int query_version(const std::string& model, uint64_t& version) {
int query_version(const std::string& model, uint64_t& version) { // NOLINT
auto it = _map.find(model);
if (it == _map.end()) {
LOG(WARNING) << "Cannot find engine in map, model name:"
<< model;
LOG(WARNING) << "Cannot find engine in map, model name:" << model;
return -1;
}
auto infer_engine = it->second->default_engine();
if (!infer_engine) {
LOG(WARNING) << "Cannot get default engine for model:"
<< model;
LOG(WARNING) << "Cannot get default engine for model:" << model;
return -1;
}
version = infer_engine->version();
LOG(INFO) << "Succ get version: " << version << " for model: "
<< model;
LOG(INFO) << "Succ get version: " << version << " for model: " << model;
return 0;
}
private:
private:
boost::unordered_map<std::string, VersionedInferEngine*> _map;
};
} // predictor
} // paddle_serving
} // baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_INFER_H
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_INFER_DATA_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_INFER_DATA_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
enum DataType {
FLOAT32,
INT64
};
enum DataType { FLOAT32, INT64 };
class DataBuf {
public:
public:
DataBuf() : _data(NULL), _size(0), _owned(true) {}
DataBuf(size_t size)
explicit DataBuf(size_t size)
: _data(new char[size]), _size(size), _owned(true) {}
DataBuf(void* data, size_t size)
: _data(data), _size(size), _owned(false) {}
DataBuf(void* data, size_t size) : _data(data), _size(size), _owned(false) {}
DataBuf(void* data, size_t size, bool owned)
: _data(data), _size(size), _owned(owned) {}
void* data() const {
return _data;
}
void* data() const { return _data; }
size_t size() const {
return _size;
}
size_t size() const { return _size; }
void free() {
_size = 0;
if (_owned) {
delete[] (char*)_data;
delete[](reinterpret_cast<char*>(_data));
}
}
~DataBuf() {
free();
}
~DataBuf() { free(); }
private:
private:
void* _data;
size_t _size;
bool _owned;
......@@ -71,9 +75,7 @@ struct Tensor {
}
}
~Tensor() {
shape.clear();
}
~Tensor() { shape.clear(); }
size_t ele_byte() const {
if (type == INT64) {
......@@ -103,7 +105,8 @@ struct Tensor {
}
if (byte_size * ele_byte() != data.size()) {
LOG(ERROR) << "wrong data size: " << byte_size * ele_byte() << " vs. " << data.size();
LOG(ERROR) << "wrong data size: " << byte_size * ele_byte() << " vs. "
<< data.size();
return false;
}
......@@ -121,36 +124,25 @@ struct Tensor {
std::vector<int> shape;
DataBuf data;
DataType type;
std::vector<std::vector<size_t> > lod;
std::vector<std::vector<size_t>> lod;
};
class BatchTensor {
public:
public:
BatchTensor() {}
~BatchTensor() {
_features.clear();
}
~BatchTensor() { _features.clear(); }
BatchTensor(const BatchTensor& tv) {
_features.assign(
tv.features().begin(), tv.features().end());
_features.assign(tv.features().begin(), tv.features().end());
}
Tensor& operator[](int index) {
return _features[index];
}
Tensor& operator[](int index) { return _features[index]; }
const Tensor& operator[](int index) const {
return _features[index];
}
const Tensor& operator[](int index) const { return _features[index]; }
void push_back(const Tensor& tensor) {
_features.push_back(tensor);
}
void push_back(const Tensor& tensor) { _features.push_back(tensor); }
size_t count() const {
return _features.size();
}
size_t count() const { return _features.size(); }
size_t size() const {
// shape0 indicates batch_size
......@@ -160,20 +152,14 @@ public:
return _features[0].shape[0];
}
const std::vector<Tensor>& features() const {
return _features;
}
const std::vector<Tensor>& features() const { return _features; }
void clear() {
_features.clear();
}
void clear() { _features.clear(); }
private:
private:
std::vector<Tensor> _features;
};
} // predictor
} // paddle_serving
} // baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_INFER_DATA_H
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_MANAGER_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_MANAGER_H
#include "common/inner_common.h"
#include "framework/workflow.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <utility>
#include "common/constant.h"
#include "common/inner_common.h"
#include "framework/service.h"
#include "framework/workflow.h"
namespace baidu {
namespace paddle_serving {
......@@ -14,15 +28,15 @@ using configure::WorkflowConf;
using configure::InferServiceConf;
class Workflow;
//class InferService;
//class ParallelInferService;
// class InferService;
// class ParallelInferService;
template<typename I>
template <typename I>
I* create_item_impl() {
return new (std::nothrow) I();
}
template<>
template <>
inline InferService* create_item_impl<InferService>() {
if (FLAGS_use_parallel_infer_service) {
return new (std::nothrow) ParallelInferService();
......@@ -32,7 +46,7 @@ inline InferService* create_item_impl<InferService>() {
}
class WorkflowManager {
public:
public:
static WorkflowManager& instance() {
static WorkflowManager mgr;
return mgr;
......@@ -41,12 +55,12 @@ public:
int initialize(const std::string path, const std::string file) {
WorkflowConf workflow_conf;
if (configure::read_proto_conf(path, file, &workflow_conf) != 0) {
LOG(ERROR) << "Failed load manager<" << Workflow::tag() << "> configure from " << path << "/" << file;
LOG(ERROR) << "Failed load manager<" << Workflow::tag()
<< "> configure from " << path << "/" << file;
return -1;
}
try {
uint32_t item_size = workflow_conf.workflows_size();
for (uint32_t ii = 0; ii < item_size; ii++) {
std::string name = workflow_conf.workflows(ii).name();
......@@ -56,39 +70,31 @@ public:
return -1;
}
if (item->init(workflow_conf.workflows(ii)) != 0) {
LOG(ERROR)
<< "Failed init item: " << name << " at:"
<< ii << "!";
LOG(ERROR) << "Failed init item: " << name << " at:" << ii << "!";
return -1;
}
std::pair<
typename boost::unordered_map<std::string, Workflow*>::iterator, bool>
typename boost::unordered_map<std::string, Workflow*>::iterator,
bool>
r = _item_map.insert(std::make_pair(name, item));
if (!r.second) {
LOG(ERROR)
<< "Failed insert item:" << name << " at:"
<< ii << "!";
LOG(ERROR) << "Failed insert item:" << name << " at:" << ii << "!";
return -1;
}
LOG(INFO)
<< "Succ init item:" << name << " from conf:"
<< path << "/" << file << ", at:" << ii << "!";
LOG(INFO) << "Succ init item:" << name << " from conf:" << path << "/"
<< file << ", at:" << ii << "!";
}
} catch (...) {
LOG(ERROR)
<< "Config[" << path << "/" << file << "] format "
LOG(ERROR) << "Config[" << path << "/" << file << "] format "
<< "invalid, load failed";
return -1;
}
return 0;
}
Workflow* create_item() {
return create_item_impl<Workflow>();
}
Workflow* create_item() { return create_item_impl<Workflow>(); }
Workflow* item(const std::string& name) {
typename boost::unordered_map<std::string, Workflow*>::iterator it;
......@@ -113,8 +119,8 @@ public:
int reload() {
int ret = 0;
typename boost::unordered_map<std::string, Workflow*>::iterator it
= _item_map.begin();
typename boost::unordered_map<std::string, Workflow*>::iterator it =
_item_map.begin();
for (; it != _item_map.end(); ++it) {
if (it->second->reload() != 0) {
LOG(WARNING) << "failed reload item: " << it->first << "!";
......@@ -122,25 +128,22 @@ public:
}
}
LOG(INFO) << "Finish reload "
<< _item_map.size()
<< " " << Workflow::tag() << "(s)";
LOG(INFO) << "Finish reload " << _item_map.size() << " " << Workflow::tag()
<< "(s)";
return ret;
}
int finalize() {
return 0;
}
int finalize() { return 0; }
private:
private:
WorkflowManager() {}
private:
private:
boost::unordered_map<std::string, Workflow*> _item_map;
};
class InferServiceManager {
public:
public:
static InferServiceManager& instance() {
static InferServiceManager mgr;
return mgr;
......@@ -149,54 +152,47 @@ public:
int initialize(const std::string path, const std::string file) {
InferServiceConf infer_service_conf;
if (configure::read_proto_conf(path, file, &infer_service_conf) != 0) {
LOG(ERROR) << "Failed load manager<" << InferService::tag() << "> configure!";
LOG(ERROR) << "Failed load manager<" << InferService::tag()
<< "> configure!";
return -1;
}
try {
uint32_t item_size = infer_service_conf.services_size();
for (uint32_t ii = 0; ii < item_size; ii++) {
std::string name = infer_service_conf.services(ii).name();
InferService* item = new (std::nothrow) InferService();
if (item == NULL) {
LOG(ERROR) << "Failed create " << InferService::tag() << " for: " << name;
LOG(ERROR) << "Failed create " << InferService::tag()
<< " for: " << name;
return -1;
}
if (item->init(infer_service_conf.services(ii)) != 0) {
LOG(ERROR)
<< "Failed init item: " << name << " at:"
<< ii << "!";
LOG(ERROR) << "Failed init item: " << name << " at:" << ii << "!";
return -1;
}
std::pair<
typename boost::unordered_map<std::string, InferService*>::iterator, bool>
typename boost::unordered_map<std::string, InferService*>::iterator,
bool>
r = _item_map.insert(std::make_pair(name, item));
if (!r.second) {
LOG(ERROR)
<< "Failed insert item:" << name << " at:"
<< ii << "!";
LOG(ERROR) << "Failed insert item:" << name << " at:" << ii << "!";
return -1;
}
LOG(INFO)
<< "Succ init item:" << name << " from conf:"
<< path << "/" << file << ", at:" << ii << "!";
LOG(INFO) << "Succ init item:" << name << " from conf:" << path << "/"
<< file << ", at:" << ii << "!";
}
} catch (...) {
LOG(ERROR)
<< "Config[" << path << "/" << file << "] format "
LOG(ERROR) << "Config[" << path << "/" << file << "] format "
<< "invalid, load failed";
return -1;
}
return 0;
}
InferService* create_item() {
return create_item_impl<InferService>();
}
InferService* create_item() { return create_item_impl<InferService>(); }
InferService* item(const std::string& name) {
typename boost::unordered_map<std::string, InferService*>::iterator it;
......@@ -221,8 +217,8 @@ public:
int reload() {
int ret = 0;
typename boost::unordered_map<std::string, InferService*>::iterator it
= _item_map.begin();
typename boost::unordered_map<std::string, InferService*>::iterator it =
_item_map.begin();
for (; it != _item_map.end(); ++it) {
if (it->second->reload() != 0) {
LOG(WARNING) << "failed reload item: " << it->first << "!";
......@@ -230,25 +226,20 @@ public:
}
}
LOG(INFO) << "Finish reload "
<< _item_map.size()
<< " " << InferService::tag() << "(s)";
LOG(INFO) << "Finish reload " << _item_map.size() << " "
<< InferService::tag() << "(s)";
return ret;
}
int finalize() {
return 0;
}
int finalize() { return 0; }
private:
private:
InferServiceManager() {}
private:
private:
boost::unordered_map<std::string, InferService*> _item_map;
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#include "common/inner_common.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "framework/memory.h"
#include "common/inner_common.h"
namespace baidu {
namespace paddle_serving {
......@@ -37,8 +51,7 @@ int MempoolWrapper::thread_initialize() {
}
int MempoolWrapper::thread_clear() {
im::Mempool* p_mempool = (im::Mempool*) THREAD_GETSPECIFIC(
_bspec_key);
im::Mempool* p_mempool = (im::Mempool*)THREAD_GETSPECIFIC(_bspec_key);
if (p_mempool) {
p_mempool->release_block();
_region.reset();
......@@ -48,8 +61,7 @@ int MempoolWrapper::thread_clear() {
}
void* MempoolWrapper::malloc(size_t size) {
im::Mempool* p_mempool = (im::Mempool*) THREAD_GETSPECIFIC(
_bspec_key);
im::Mempool* p_mempool = (im::Mempool*)THREAD_GETSPECIFIC(_bspec_key);
if (!p_mempool) {
LOG(WARNING) << "Cannot malloc memory:" << size
<< ", since mempool is not thread initialized";
......@@ -58,6 +70,6 @@ void* MempoolWrapper::malloc(size_t size) {
return p_mempool->malloc(size);
}
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_MEMORY_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_MEMORY_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "common/inner_common.h"
#include "mempool/mempool.h"
......@@ -9,7 +22,7 @@ namespace paddle_serving {
namespace predictor {
class MempoolWrapper {
public:
public:
MempoolWrapper() {}
static MempoolWrapper& instance() {
......@@ -25,13 +38,11 @@ public:
void* malloc(size_t size);
private:
private:
im::fugue::memory::Region _region;
THREAD_KEY_T _bspec_key;
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#pragma once
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <map>
#include <string>
#include "common/inner_common.h"
namespace baidu {
......@@ -7,14 +22,14 @@ namespace paddle_serving {
namespace predictor {
class IMerger {
public:
virtual bool merge(const google::protobuf::Message*, google::protobuf::Message*) = 0;
public:
virtual bool merge(const google::protobuf::Message*,
google::protobuf::Message*) = 0;
};
class DefaultMerger : public IMerger {
public:
bool merge(
const google::protobuf::Message* s, google::protobuf::Message* d) {
public:
bool merge(const google::protobuf::Message* s, google::protobuf::Message* d) {
if (!s || !d) {
return false;
}
......@@ -24,9 +39,9 @@ public:
}
};
template<typename T>
template <typename T>
class Singleton {
public:
public:
static T* instance() {
static T ins;
return &ins;
......@@ -34,7 +49,7 @@ public:
};
class MergerManager {
public:
public:
typedef IMerger MergerT;
static MergerManager& instance() {
......@@ -51,9 +66,8 @@ public:
return true;
}
bool get(const std::string& name, MergerT*& merger) {
std::map<std::string, MergerT*>::iterator iter =
_mergers.find(name);
bool get(const std::string& name, MergerT*& merger) { // NOLINT
std::map<std::string, MergerT*>::iterator iter = _mergers.find(name);
if (iter == _mergers.end()) {
return false;
}
......@@ -61,35 +75,33 @@ public:
return true;
}
private:
MergerManager() {
set("default", Singleton<DefaultMerger>::instance());
}
private:
MergerManager() { set("default", Singleton<DefaultMerger>::instance()); }
private:
private:
std::map<std::string, MergerT*> _mergers;
};
#define DECLARE_MERGER(M) \
static bool regist_self() {\
if (!baidu::paddle_serving::predictor::MergerManager::instance().set(\
#M, baidu::paddle_serving::predictor::Singleton<M>::instance())) {\
LOG(ERROR) << "Failed regist merger: " << #M;\
return false;\
}\
LOG(INFO) << "Succ regist merger: " << #M;\
return true;\
static bool regist_self() { \
if (!baidu::paddle_serving::predictor::MergerManager::instance().set( \
#M, baidu::paddle_serving::predictor::Singleton<M>::instance())) { \
LOG(ERROR) << "Failed regist merger: " << #M; \
return false; \
} \
LOG(INFO) << "Succ regist merger: " << #M; \
return true; \
}
#define PDS_STR_CAT(a, b) PDS_STR_CAT_I(a, b)
#define PDS_STR_CAT_I(a, b) a ## b
#define PDS_STR_CAT_I(a, b) a##b
#define DEFINE_MERGER(M)\
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void)\
{\
M::regist_self();\
}
#define DEFINE_MERGER(M) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \
__LINE__)(void) { \
M::regist_self(); \
}
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "framework/op_repository.h"
#include <string>
#include "op/op.h"
namespace baidu {
......@@ -39,8 +54,8 @@ void OpRepository::return_op(const std::string& op_type, Op* op) {
if (iter != _repository.end()) {
iter->second->return_op(op);
} else {
LOG(ERROR) << "Try to return unknown op[" << op << "], op_type["
<< op_type << "].";
LOG(ERROR) << "Try to return unknown op[" << op << "], op_type[" << op_type
<< "].";
}
}
......
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_OP_REPOSITORY_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_OP_REPOSITORY_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include "common/inner_common.h"
namespace baidu {
......@@ -8,22 +21,21 @@ namespace paddle_serving {
namespace predictor {
#define REGISTER_OP(op) \
::baidu::paddle_serving::predictor::OpRepository::instance().regist_op<op>(#op)
::baidu::paddle_serving::predictor::OpRepository::instance().regist_op<op>( \
#op)
class Op;
class Factory {
public:
public:
virtual Op* get_op() = 0;
virtual void return_op(Op* op) = 0;
};
template<typename OP_TYPE>
template <typename OP_TYPE>
class OpFactory : public Factory {
public:
Op* get_op() {
return butil::get_object<OP_TYPE>();
}
public:
Op* get_op() { return butil::get_object<OP_TYPE>(); }
void return_op(Op* op) {
butil::return_object<OP_TYPE>(dynamic_cast<OP_TYPE*>(op));
......@@ -36,7 +48,7 @@ public:
};
class OpRepository {
public:
public:
typedef boost::unordered_map<std::string, Factory*> ManagerMap;
OpRepository() {}
......@@ -47,7 +59,7 @@ public:
return repo;
}
template<typename OP_TYPE>
template <typename OP_TYPE>
void regist_op(std::string op_type) {
_repository[op_type] = &OpFactory<OP_TYPE>::instance();
LOG(INFO) << "Succ regist op: " << op_type << "!";
......@@ -59,12 +71,10 @@ public:
void return_op(const std::string& op_type, Op* op);
private:
private:
ManagerMap _repository;
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#include "predictor_metric.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "predictor/framework/predictor_metric.h"
#include "butil/memory/singleton.h"
namespace baidu {
......
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_FRAMEWORK_PREDICTOR_METRIC_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_FRAMEWORK_PREDICTOR_METRIC_H
#include <bvar/bvar.h> // bvar
#include <butil/scoped_lock.h> // BAIDU_SCOPED_LOCK
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <butil/containers/flat_map.h> // FlatMap
#include <butil/memory/singleton.h> // DefaultSingletonTraits
#include <butil/scoped_lock.h> // BAIDU_SCOPED_LOCK
#include <bvar/bvar.h> // bvar
#include <string>
namespace baidu {
namespace paddle_serving {
namespace predictor {
static const std::string WORKFLOW_METRIC_PREFIX = "workflow_";
static const std::string STAGE_METRIC_PREFIX = "stage_";
static const std::string OP_METRIC_PREFIX = "op_";
static const std::string NAME_DELIMITER = "_";
static const char* WORKFLOW_METRIC_PREFIX = "workflow_";
static const char* STAGE_METRIC_PREFIX = "stage_";
static const char* OP_METRIC_PREFIX = "op_";
static const char* NAME_DELIMITER = "_";
typedef ::bvar::Window<::bvar::Adder<int> > AdderWindow;
typedef ::bvar::Window<::bvar::Adder<int>> AdderWindow;
typedef ::bvar::Window<::bvar::IntRecorder> RecorderWindow;
class AdderWindowMetric {
public:
AdderWindowMetric() :
sum_window(&sum, ::bvar::FLAGS_bvar_dump_interval) {
}
public:
AdderWindowMetric() : sum_window(&sum, ::bvar::FLAGS_bvar_dump_interval) {}
AdderWindowMetric(const std::string& name) :
sum_window(name + "_sum_window", &sum, ::bvar::FLAGS_bvar_dump_interval) {
}
explicit AdderWindowMetric(const std::string& name)
: sum_window(
name + "_sum_window", &sum, ::bvar::FLAGS_bvar_dump_interval) {}
inline AdderWindowMetric& operator<<(int count) {
sum << count;
return *this;
}
public:
public:
::bvar::Adder<int> sum;
AdderWindow sum_window;
};
static float g_get_rate(void* arg);
class RateBaseMetric {
public:
RateBaseMetric(const std::string& name) :
rate_value(name + "_rate", g_get_rate, this) {
}
public:
explicit RateBaseMetric(const std::string& name)
: rate_value(name + "_rate", g_get_rate, this) {}
void update_lhs(int count) { lhs.sum << count; }
void update_rhs(int count) { rhs.sum << count; }
public:
public:
::bvar::PassiveStatus<float> rate_value;
AdderWindowMetric lhs;
AdderWindowMetric rhs;
......@@ -60,27 +70,25 @@ static float g_get_rate(void* arg) {
if (rate_metric->rhs.sum_window.get_value() <= 0) {
return 0;
}
return rate_metric->lhs.sum_window.get_value() * 100
/ (float) rate_metric->rhs.sum_window.get_value();
return rate_metric->lhs.sum_window.get_value() * 100 /
static_cast<float>(rate_metric->rhs.sum_window.get_value());
}
// 计算平均值时取整
class AvgWindowMetric {
public:
AvgWindowMetric() :
avg_window(&avg, ::bvar::FLAGS_bvar_dump_interval) {
}
public:
AvgWindowMetric() : avg_window(&avg, ::bvar::FLAGS_bvar_dump_interval) {}
AvgWindowMetric(const std::string& name) :
avg_window(name + "_avg_window", &avg, ::bvar::FLAGS_bvar_dump_interval) {
}
explicit AvgWindowMetric(const std::string& name)
: avg_window(
name + "_avg_window", &avg, ::bvar::FLAGS_bvar_dump_interval) {}
inline AvgWindowMetric& operator<<(int64_t value) {
avg << value;
return *this;
}
public:
public:
::bvar::IntRecorder avg;
RecorderWindow avg_window;
};
......@@ -88,17 +96,16 @@ public:
// 计算平均值时不取整
static double g_get_double_avg(void* arg);
class AvgDoubleWindowMetric {
public:
AvgDoubleWindowMetric(const std::string& name) :
avg_value(name + "_avg_double_window", g_get_double_avg, this) {
}
public:
explicit AvgDoubleWindowMetric(const std::string& name)
: avg_value(name + "_avg_double_window", g_get_double_avg, this) {}
inline AvgDoubleWindowMetric& operator<<(int64_t value) {
recorder << value;
return *this;
}
public:
public:
::bvar::PassiveStatus<double> avg_value;
AvgWindowMetric recorder;
};
......@@ -109,36 +116,36 @@ static double g_get_double_avg(void* arg) {
}
class PredictorMetric {
public:
public:
static PredictorMetric* GetInstance();
~PredictorMetric() {
for (::butil::FlatMap<std::string, bvar::LatencyRecorder*>::iterator iter
= latency_recorder_map.begin();
for (::butil::FlatMap<std::string, bvar::LatencyRecorder*>::iterator iter =
latency_recorder_map.begin();
iter != latency_recorder_map.end();
++iter) {
delete iter->second;
}
for (::butil::FlatMap<std::string, AdderWindowMetric*>::iterator iter
= adder_window_map.begin();
for (::butil::FlatMap<std::string, AdderWindowMetric*>::iterator iter =
adder_window_map.begin();
iter != adder_window_map.end();
++iter) {
delete iter->second;
}
for (::butil::FlatMap<std::string, AvgWindowMetric*>::iterator iter
= avg_window_map.begin();
for (::butil::FlatMap<std::string, AvgWindowMetric*>::iterator iter =
avg_window_map.begin();
iter != avg_window_map.end();
++iter) {
delete iter->second;
}
for (::butil::FlatMap<std::string, AvgDoubleWindowMetric*>::iterator iter
= avg_double_window_map.begin();
for (::butil::FlatMap<std::string, AvgDoubleWindowMetric*>::iterator iter =
avg_double_window_map.begin();
iter != avg_double_window_map.end();
++iter) {
delete iter->second;
}
for (::butil::FlatMap<std::string, RateBaseMetric*>::iterator iter
= rate_map.begin();
for (::butil::FlatMap<std::string, RateBaseMetric*>::iterator iter =
rate_map.begin();
iter != rate_map.end();
++iter) {
delete iter->second;
......@@ -150,7 +157,8 @@ public:
BAIDU_SCOPED_LOCK(_mutex);
LOG(INFO) << "try to regist latency metric[" << metric_name << "].";
if (latency_recorder_map.seek(metric_name) == NULL) {
bvar::LatencyRecorder* metric = new (std::nothrow) bvar::LatencyRecorder(metric_name);
bvar::LatencyRecorder* metric =
new (std::nothrow) bvar::LatencyRecorder(metric_name);
latency_recorder_map.insert(metric_name, metric);
LOG(INFO) << "succ to regist latency metric[" << metric_name << "].";
}
......@@ -162,9 +170,11 @@ public:
BAIDU_SCOPED_LOCK(_mutex);
LOG(INFO) << "try to regist adder window metric[" << metric_name << "].";
if (adder_window_map.seek(metric_name) == NULL) {
AdderWindowMetric* metric = new (std::nothrow) AdderWindowMetric(metric_name);
AdderWindowMetric* metric =
new (std::nothrow) AdderWindowMetric(metric_name);
adder_window_map.insert(metric_name, metric);
LOG(INFO) << "succ to regist adder window metric[" << metric_name << "].";
LOG(INFO) << "succ to regist adder window metric[" << metric_name
<< "].";
}
}
}
......@@ -174,7 +184,8 @@ public:
BAIDU_SCOPED_LOCK(_mutex);
LOG(INFO) << "try to regist avg window metric[" << metric_name << "].";
if (avg_window_map.seek(metric_name) == NULL) {
AvgWindowMetric* metric = new (std::nothrow) AvgWindowMetric(metric_name);
AvgWindowMetric* metric =
new (std::nothrow) AvgWindowMetric(metric_name);
avg_window_map.insert(metric_name, metric);
LOG(INFO) << "succ to regist avg window metric[" << metric_name << "].";
}
......@@ -184,11 +195,14 @@ public:
void regist_avg_double_window_metric(const std::string& metric_name) {
{
BAIDU_SCOPED_LOCK(_mutex);
LOG(INFO) << "try to regist avg double window metric[" << metric_name << "].";
LOG(INFO) << "try to regist avg double window metric[" << metric_name
<< "].";
if (avg_double_window_map.seek(metric_name) == NULL) {
AvgDoubleWindowMetric* metric = new (std::nothrow) AvgDoubleWindowMetric(metric_name);
AvgDoubleWindowMetric* metric =
new (std::nothrow) AvgDoubleWindowMetric(metric_name);
avg_double_window_map.insert(metric_name, metric);
LOG(INFO) << "succ to regist avg double window metric[" << metric_name << "].";
LOG(INFO) << "succ to regist avg double window metric[" << metric_name
<< "].";
}
}
}
......@@ -205,7 +219,8 @@ public:
}
}
inline void update_latency_metric(const std::string& metric_name, int64_t latency) {
inline void update_latency_metric(const std::string& metric_name,
int64_t latency) {
bvar::LatencyRecorder** metric = latency_recorder_map.seek(metric_name);
if (metric != NULL) {
**metric << latency;
......@@ -214,7 +229,8 @@ public:
}
}
inline void update_adder_window_metric(const std::string& metric_name, int count) {
inline void update_adder_window_metric(const std::string& metric_name,
int count) {
AdderWindowMetric** metric = adder_window_map.seek(metric_name);
if (metric != NULL) {
**metric << count;
......@@ -223,7 +239,8 @@ public:
}
}
inline void update_avg_window_metric(const std::string& metric_name, int64_t value) {
inline void update_avg_window_metric(const std::string& metric_name,
int64_t value) {
AvgWindowMetric** metric = avg_window_map.seek(metric_name);
if (metric != NULL) {
**metric << value;
......@@ -232,7 +249,8 @@ public:
}
}
inline void update_avg_double_window_metric(const std::string& metric_name, int64_t value) {
inline void update_avg_double_window_metric(const std::string& metric_name,
int64_t value) {
AvgDoubleWindowMetric** metric = avg_double_window_map.seek(metric_name);
if (metric != NULL) {
**metric << value;
......@@ -259,9 +277,8 @@ public:
}
}
private:
PredictorMetric() :
bucket_count(300) {
private:
PredictorMetric() : bucket_count(300) {
latency_recorder_map.init(bucket_count);
adder_window_map.init(bucket_count);
avg_window_map.init(bucket_count);
......@@ -269,7 +286,7 @@ private:
rate_map.init(bucket_count);
}
private:
private:
const size_t bucket_count;
::butil::FlatMap<std::string, bvar::LatencyRecorder*> latency_recorder_map;
::butil::FlatMap<std::string, AdderWindowMetric*> adder_window_map;
......@@ -285,6 +302,3 @@ private:
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_FRAMEWORK_PREDICTOR_METRIC_H
#include "common/inner_common.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "framework/resource.h"
#include <string>
#include "common/inner_common.h"
#include "framework/infer.h"
namespace baidu {
......@@ -21,19 +36,14 @@ DynamicResource::DynamicResource() {}
DynamicResource::~DynamicResource() {}
int DynamicResource::initialize() {
return 0;
}
int DynamicResource::initialize() { return 0; }
int DynamicResource::clear() {
return 0;
}
int DynamicResource::clear() { return 0; }
int Resource::initialize(const std::string& path, const std::string& file) {
ResourceConf resource_conf;
if (configure::read_proto_conf(path, file, &resource_conf) != 0) {
LOG(ERROR) << "Failed initialize resource from: "
<< path << "/" << file;
LOG(ERROR) << "Failed initialize resource from: " << path << "/" << file;
return -1;
}
......@@ -48,14 +58,14 @@ int Resource::initialize(const std::string& path, const std::string& file) {
int err = 0;
std::string model_toolkit_path = resource_conf.model_toolkit_path();
if (err != 0) {
LOG(ERROR) << "read model_toolkit_path failed, path["
<< path << "], file[" << file << "]";
LOG(ERROR) << "read model_toolkit_path failed, path[" << path
<< "], file[" << file << "]";
return -1;
}
std::string model_toolkit_file = resource_conf.model_toolkit_file();
if (err != 0) {
LOG(ERROR) << "read model_toolkit_file failed, path["
<< path << "], file[" << file << "]";
LOG(ERROR) << "read model_toolkit_file failed, path[" << path
<< "], file[" << file << "]";
return -1;
}
if (InferManager::instance().proc_initialize(
......@@ -83,12 +93,14 @@ int Resource::thread_initialize() {
LOG(WARNING) << "Successfully thread initialized mempool wrapper";
// infer manager
if (FLAGS_enable_model_toolkit && InferManager::instance().thrd_initialize() != 0) {
if (FLAGS_enable_model_toolkit &&
InferManager::instance().thrd_initialize() != 0) {
LOG(ERROR) << "Failed thrd initialized infer manager";
return -1;
}
DynamicResource* p_dynamic_resource = (DynamicResource*) THREAD_GETSPECIFIC(_tls_bspec_key);
DynamicResource* p_dynamic_resource =
reinterpret_cast<DynamicResource*>(THREAD_GETSPECIFIC(_tls_bspec_key));
if (p_dynamic_resource == NULL) {
p_dynamic_resource = new (std::nothrow) DynamicResource;
if (p_dynamic_resource == NULL) {
......@@ -108,12 +120,13 @@ int Resource::thread_initialize() {
p_dynamic_resource = NULL;
return -1;
}
}
#if 0
LOG(INFO) << "Successfully thread initialized dynamic resource";
#else
LOG(INFO) << bthread_self() << ": Successfully thread initialized dynamic resource " << p_dynamic_resource;
LOG(INFO) << bthread_self()
<< ": Successfully thread initialized dynamic resource "
<< p_dynamic_resource;
#endif
return 0;
......@@ -127,17 +140,22 @@ int Resource::thread_clear() {
}
// infer manager
if (FLAGS_enable_model_toolkit && InferManager::instance().thrd_clear() != 0) {
if (FLAGS_enable_model_toolkit &&
InferManager::instance().thrd_clear() != 0) {
LOG(ERROR) << "Failed thrd clear infer manager";
return -1;
}
DynamicResource* p_dynamic_resource = (DynamicResource*) THREAD_GETSPECIFIC(_tls_bspec_key);
DynamicResource* p_dynamic_resource =
reinterpret_cast<DynamicResource*>(THREAD_GETSPECIFIC(_tls_bspec_key));
if (p_dynamic_resource == NULL) {
#if 0
LOG(ERROR) << "tls dynamic resource shouldn't be null after thread_initialize";
LOG(ERROR) << "tls dynamic resource shouldn't be null after "
<< "thread_initialize";
#else
LOG(ERROR) << bthread_self() << ": tls dynamic resource shouldn't be null after thread_initialize";
LOG(ERROR)
<< bthread_self()
<< ": tls dynamic resource shouldn't be null after thread_initialize";
#endif
return -1;
}
......@@ -162,7 +180,8 @@ int Resource::reload() {
}
int Resource::finalize() {
if (FLAGS_enable_model_toolkit && InferManager::instance().proc_finalize() != 0) {
if (FLAGS_enable_model_toolkit &&
InferManager::instance().proc_finalize() != 0) {
LOG(ERROR) << "Failed proc finalize infer manager";
return -1;
}
......@@ -172,6 +191,6 @@ int Resource::finalize() {
return 0;
}
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_RESOURCE_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_RESOURCE_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include "common/inner_common.h"
#include "framework/memory.h"
......@@ -20,8 +33,7 @@ struct DynamicResource {
};
class Resource {
public:
public:
Resource() {}
~Resource() { finalize(); }
......@@ -42,20 +54,16 @@ public:
int finalize();
DynamicResource* get_dynamic_resource() {
return (DynamicResource*) THREAD_GETSPECIFIC(_tls_bspec_key);
return reinterpret_cast<DynamicResource*>(
THREAD_GETSPECIFIC(_tls_bspec_key));
}
private:
int thread_finalize() {
return 0;
}
private:
int thread_finalize() { return 0; }
THREAD_KEY_T _tls_bspec_key;
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "framework/server.h"
#include <brpc/policy/nova_pbrpc_protocol.h> // NovaServiceAdaptor
#include <brpc/policy/public_pbrpc_protocol.h> // PublicPbrpcServiceAdaptor
#include <brpc/policy/nshead_mcpack_protocol.h> // NsheadMcpackAdaptor
#include <brpc/policy/public_pbrpc_protocol.h> // PublicPbrpcServiceAdaptor
#include <string>
#include <utility>
#include "common/inner_common.h"
#include "framework/server.h"
#include "framework/service_manager.h"
#include "framework/resource.h"
#include "framework/manager.h"
#include "framework/resource.h"
#include "framework/service_manager.h"
namespace baidu {
namespace paddle_serving {
......@@ -29,24 +45,22 @@ ServerManager::ServerManager() {
}
int ServerManager::add_service_by_format(const std::string& format) {
Service* service =
FormatServiceManager::instance().get_service(format);
Service* service = FormatServiceManager::instance().get_service(format);
if (service == NULL) {
LOG(ERROR) << "Not found service by format:" << format << "!";
return -1;
}
if (_format_services.find(format) != _format_services.end()) {
LOG(ERROR) << "Cannot insert duplicated service by format:"
<< format << "!";
LOG(ERROR) << "Cannot insert duplicated service by format:" << format
<< "!";
return -1;
}
std::pair<boost::unordered_map<std::string, Service*>::iterator, bool> it
= _format_services.insert(std::make_pair(format, service));
std::pair<boost::unordered_map<std::string, Service*>::iterator, bool> it =
_format_services.insert(std::make_pair(format, service));
if (!it.second) {
LOG(ERROR) << "Failed insert service by format:"
<< format << "!";
LOG(ERROR) << "Failed insert service by format:" << format << "!";
return -1;
}
......@@ -60,18 +74,15 @@ int ServerManager::start_and_wait() {
}
boost::unordered_map<std::string, Service*>::iterator it;
for (it = _format_services.begin(); it != _format_services.end();
it++) {
if (_server.AddService(it->second, brpc::SERVER_DOESNT_OWN_SERVICE)
!= 0) {
LOG(ERROR) << "Failed to add service of format:"
<< it->first << "!";
for (it = _format_services.begin(); it != _format_services.end(); it++) {
if (_server.AddService(it->second, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) {
LOG(ERROR) << "Failed to add service of format:" << it->first << "!";
return -1;
}
}
if (_server.Start(FLAGS_port, &_options) != 0) {
LOG(ERROR) << "Failed to start Paddle Inference Server" ;
LOG(ERROR) << "Failed to start Paddle Inference Server";
return -1;
}
_server.RunUntilAskedToQuit();
......@@ -88,24 +99,26 @@ void ServerManager::_set_server_option_by_protocol(
const ::butil::StringPiece& protocol_type) {
std::string enabled_protocols = FLAGS_enable_protocol_list;
if (_compare_string_piece_without_case(protocol_type, "nova_pbrpc")) {
_options.nshead_service = new ::brpc::policy::NovaServiceAdaptor;;
} else if (_compare_string_piece_without_case(protocol_type, "public_pbrpc")) {
_options.nshead_service = new ::brpc::policy::NovaServiceAdaptor;
} else if (_compare_string_piece_without_case(protocol_type,
"public_pbrpc")) {
_options.nshead_service = new ::brpc::policy::PublicPbrpcServiceAdaptor;
} else if (_compare_string_piece_without_case(protocol_type, "nshead_mcpack")) {
} else if (_compare_string_piece_without_case(protocol_type,
"nshead_mcpack")) {
_options.nshead_service = new ::brpc::policy::NsheadMcpackAdaptor;
} else {
LOG(ERROR) << "fail to set nshead protocol, protocol_type[" << protocol_type << "].";
LOG(ERROR) << "fail to set nshead protocol, protocol_type[" << protocol_type
<< "].";
return;
}
_options.enabled_protocols = enabled_protocols;
LOG(INFO) << "success to set nshead protocol, protocol_type[" << protocol_type << "].";
LOG(INFO) << "success to set nshead protocol, protocol_type[" << protocol_type
<< "].";
}
int ServerManager::_start_reloader() {
int ret = THREAD_CREATE(
&_reload_thread, NULL,
ServerManager::_reload_worker,
NULL);
int ret =
THREAD_CREATE(&_reload_thread, NULL, ServerManager::_reload_worker, NULL);
if (ret != 0) {
LOG(ERROR) << "Failed start reload thread, ret:" << ret;
......@@ -140,6 +153,6 @@ void* ServerManager::_reload_worker(void* args) {
return NULL;
}
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_SERVER_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_SERVER_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include "common/inner_common.h"
namespace baidu {
......@@ -8,7 +21,7 @@ namespace paddle_serving {
namespace predictor {
class ServerManager {
public:
public:
typedef google::protobuf::Service Service;
ServerManager();
......@@ -17,29 +30,26 @@ public:
return server;
}
static bool reload_starting() {
return _s_reload_starting;
}
static bool reload_starting() { return _s_reload_starting; }
static void stop_reloader() {
_s_reload_starting = false;
}
static void stop_reloader() { _s_reload_starting = false; }
int add_service_by_format(const std::string& format);
int start_and_wait();
private:
private:
int _start_reloader();
int _wait_reloader();
static void* _reload_worker(void* args);
bool _compare_string_piece_without_case(
const butil::StringPiece& s1, const char* s2);
bool _compare_string_piece_without_case(const butil::StringPiece& s1,
const char* s2);
void _set_server_option_by_protocol(const ::butil::StringPiece& protocol_type);
void _set_server_option_by_protocol(
const ::butil::StringPiece& protocol_type);
brpc::ServerOptions _options;
brpc::Server _server;
......@@ -48,8 +58,6 @@ private:
static volatile bool _s_reload_starting;
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#include "common/inner_common.h"
#include "framework/channel.h"
#include "common/constant.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "framework/service.h"
#include <butil/time.h> // butil::Timer
#include "framework/server.h"
#include <list>
#include <string>
#include <vector>
#include "common/constant.h"
#include "common/inner_common.h"
#include "framework/channel.h"
#include "framework/dag_view.h"
#include "framework/manager.h"
#include "framework/resource.h"
#include "framework/predictor_metric.h" // PredictorMetric
#include "framework/resource.h"
#include "framework/server.h"
namespace baidu {
namespace paddle_serving {
......@@ -24,15 +41,14 @@ int InferService::init(const configure::InferService& conf) {
LOG(ERROR) << "Failed get merger: " << merger;
return ERR_INTERNAL_FAILURE;
} else {
LOG(WARNING) << "Succ get merger: " << merger <<
" for service: " << _infer_service_format;
LOG(WARNING) << "Succ get merger: " << merger
<< " for service: " << _infer_service_format;
}
ServerManager& svr_mgr = ServerManager::instance();
if (svr_mgr.add_service_by_format(_infer_service_format) != 0) {
LOG(ERROR)
<< "Not found service by format name:"
<< _infer_service_format << "!";
LOG(ERROR) << "Not found service by format name:" << _infer_service_format
<< "!";
return ERR_INTERNAL_FAILURE;
}
......@@ -43,24 +59,20 @@ int InferService::init(const configure::InferService& conf) {
if (_enable_map_request_to_workflow) {
if (_request_to_workflow_map.init(
MAX_WORKFLOW_NUM_IN_ONE_SERVICE/*load_factor=80*/) != 0) {
LOG(ERROR)
<< "init request to workflow map failed, bucket_count["
MAX_WORKFLOW_NUM_IN_ONE_SERVICE /*load_factor=80*/) != 0) {
LOG(ERROR) << "init request to workflow map failed, bucket_count["
<< MAX_WORKFLOW_NUM_IN_ONE_SERVICE << "].";
return ERR_INTERNAL_FAILURE;
}
int err = 0;
_request_field_key = conf.request_field_key().c_str();
if (_request_field_key == "") {
LOG(ERROR)
<< "read request_field_key failed, request_field_key["
LOG(ERROR) << "read request_field_key failed, request_field_key["
<< _request_field_key << "].";
return ERR_INTERNAL_FAILURE;
}
LOG(INFO)
<< "service[" << _infer_service_format
<< "], request_field_key["
LOG(INFO) << "service[" << _infer_service_format << "], request_field_key["
<< _request_field_key << "].";
uint32_t value_mapped_workflows_size = conf.value_mapped_workflows_size();
for (uint32_t fi = 0; fi < value_mapped_workflows_size; fi++) {
......@@ -71,38 +83,34 @@ int InferService::init(const configure::InferService& conf) {
uint32_t tsize = tokens.size();
for (uint32_t ti = 0; ti < tsize; ++ti) {
boost::trim_if(tokens[ti], boost::is_any_of(" "));
Workflow* workflow =
WorkflowManager::instance().item(tokens[ti]);
Workflow* workflow = WorkflowManager::instance().item(tokens[ti]);
if (workflow == NULL) {
LOG(ERROR)
<< "Failed get workflow by name:"
<< tokens[ti] << ", ti: " << ti;
LOG(ERROR) << "Failed get workflow by name:" << tokens[ti]
<< ", ti: " << ti;
return ERR_INTERNAL_FAILURE;
}
workflow->regist_metric(full_name());
workflows.push_back(workflow);
}
const std::string& request_field_value = conf.value_mapped_workflows(fi).request_field_value();
if (_request_to_workflow_map.insert(request_field_value, workflows) == NULL) {
LOG(ERROR)
<< "insert [" << request_field_value << ","
<< list << "] to _request_to_workflow_map failed.";
const std::string& request_field_value =
conf.value_mapped_workflows(fi).request_field_value();
if (_request_to_workflow_map.insert(request_field_value, workflows) ==
NULL) {
LOG(ERROR) << "insert [" << request_field_value << "," << list
<< "] to _request_to_workflow_map failed.";
return ERR_INTERNAL_FAILURE;
}
LOG(INFO) << "workflow[" << list
<< "], request_field_value[" << request_field_value << "].";
LOG(INFO) << "workflow[" << list << "], request_field_value["
<< request_field_value << "].";
}
} else {
uint32_t flow_size = conf.workflows_size();
for (uint32_t fi = 0; fi < flow_size; fi++) {
const std::string& workflow_name = conf.workflows(fi);
Workflow* workflow =
WorkflowManager::instance().item(workflow_name);
Workflow* workflow = WorkflowManager::instance().item(workflow_name);
if (workflow == NULL) {
LOG(ERROR)
<< "Failed get workflow by name:"
<< workflow_name;
LOG(ERROR) << "Failed get workflow by name:" << workflow_name;
return ERR_INTERNAL_FAILURE;
}
workflow->regist_metric(full_name());
......@@ -110,27 +118,19 @@ int InferService::init(const configure::InferService& conf) {
}
}
LOG(INFO)
<< "Succ load infer_service: "
<< _infer_service_format << "!";
LOG(INFO) << "Succ load infer_service: " << _infer_service_format << "!";
return ERR_OK;
}
int InferService::reload() {
return ERR_OK;
}
int InferService::reload() { return ERR_OK; }
const std::string& InferService::name() const {
return _infer_service_format;
}
const std::string& InferService::name() const { return _infer_service_format; }
// ִÿworkflow
int InferService::inference(
const google::protobuf::Message* request,
// ´®ÐÐÖ´ÐÐÿ¸öworkflow
int InferService::inference(const google::protobuf::Message* request,
google::protobuf::Message* response,
butil::IOBufBuilder* debug_os) {
TRACEPRINTF("start to inference");
// when funtion call begins, framework will reset
// thread local variables&resources automatically.
......@@ -179,29 +179,26 @@ int InferService::inference(
return ERR_OK;
}
int InferService::debug(
const google::protobuf::Message* request,
int InferService::debug(const google::protobuf::Message* request,
google::protobuf::Message* response,
butil::IOBufBuilder* debug_os) {
return inference(request, response, debug_os);
}
int InferService::execute_one_workflow(
uint32_t index,
int InferService::execute_one_workflow(uint32_t index,
const google::protobuf::Message* request,
google::protobuf::Message* response,
butil::IOBufBuilder* debug_os) {
if (index >= _flows.size()) {
LOG(ERROR) << "Faield execute workflow, index: "
<< index << " >= max:" << _flows.size();
LOG(ERROR) << "Faield execute workflow, index: " << index
<< " >= max:" << _flows.size();
return ERR_OVERFLOW_FAILURE;
}
Workflow* workflow = _flows[index];
return _execute_workflow(workflow, request, response, debug_os);
}
int InferService::_execute_workflow(
Workflow* workflow,
int InferService::_execute_workflow(Workflow* workflow,
const google::protobuf::Message* request,
google::protobuf::Message* response,
butil::IOBufBuilder* debug_os) {
......@@ -217,8 +214,7 @@ int InferService::_execute_workflow(
// call actual inference interface
int errcode = dv->execute(debug_os);
if (errcode < 0) {
LOG(ERROR) << "Failed execute dag for workflow:"
<< workflow->name();
LOG(ERROR) << "Failed execute dag for workflow:" << workflow->name();
return errcode;
}
......@@ -244,30 +240,35 @@ int InferService::_execute_workflow(
std::vector<Workflow*>* InferService::_map_request_to_workflow(
const google::protobuf::Message* request) {
const google::protobuf::Descriptor* desc = request->GetDescriptor();
const google::protobuf::FieldDescriptor* field = desc->FindFieldByName(_request_field_key);
const google::protobuf::FieldDescriptor* field =
desc->FindFieldByName(_request_field_key);
if (field == NULL) {
LOG(ERROR) << "No field[" << _request_field_key << "] in [" << desc->full_name() << "].";
LOG(ERROR) << "No field[" << _request_field_key << "] in ["
<< desc->full_name() << "].";
return NULL;
}
if (field->is_repeated()) {
LOG(ERROR) << "field[" << desc->full_name() << "."
<< _request_field_key << "] is repeated.";
LOG(ERROR) << "field[" << desc->full_name() << "." << _request_field_key
<< "] is repeated.";
return NULL;
}
if (field->cpp_type() != google::protobuf::FieldDescriptor::CPPTYPE_STRING) {
LOG(ERROR) << "field[" << desc->full_name() << "."
<< _request_field_key << "] should be string";
LOG(ERROR) << "field[" << desc->full_name() << "." << _request_field_key
<< "] should be string";
return NULL;
}
const std::string& field_value = request->GetReflection()->GetString(*request, field);
std::vector<Workflow*>* p_workflow = _request_to_workflow_map.seek(field_value);
const std::string& field_value =
request->GetReflection()->GetString(*request, field);
std::vector<Workflow*>* p_workflow =
_request_to_workflow_map.seek(field_value);
if (p_workflow == NULL) {
LOG(ERROR) << "cannot find key[" << field_value << "] in _request_to_workflow_map";
LOG(ERROR) << "cannot find key[" << field_value
<< "] in _request_to_workflow_map";
return NULL;
}
return p_workflow;
}
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_SERVICE_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_SERVICE_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <utility>
#include <vector>
#include "common/inner_common.h"
#include "framework/workflow.h"
#include "framework/merger.h"
#include "framework/workflow.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class InferService {
public:
public:
typedef OpChannel<google::protobuf::Message> BuiltinChannel;
static const char* tag() {
return "service";
}
static const char* tag() { return "service"; }
InferService() :
_last_change_timestamp(0),
InferService()
: _last_change_timestamp(0),
_enable_map_request_to_workflow(false),
_request_field_key(""),
_merger(NULL) {
......@@ -34,58 +47,52 @@ public:
const std::string& name() const;
const std::string& full_name() const {
return _infer_service_format;
}
const std::string& full_name() const { return _infer_service_format; }
// 串行执行每个workflow
virtual int inference(
const google::protobuf::Message* request,
// Execute each workflow serially
virtual int inference(const google::protobuf::Message* request,
google::protobuf::Message* response,
butil::IOBufBuilder* debug_os = NULL);
int debug(
const google::protobuf::Message* request,
int debug(const google::protobuf::Message* request,
google::protobuf::Message* response,
butil::IOBufBuilder* debug_os);
int execute_one_workflow(
uint32_t index,
int execute_one_workflow(uint32_t index,
const google::protobuf::Message* request,
google::protobuf::Message* response,
butil::IOBufBuilder* debug_os);
private:
int _execute_workflow(
Workflow* workflow,
private:
int _execute_workflow(Workflow* workflow,
const google::protobuf::Message* request,
google::protobuf::Message* response,
butil::IOBufBuilder* debug_os);
std::vector<Workflow*>* _map_request_to_workflow(const google::protobuf::Message* request);
std::vector<Workflow*>* _map_request_to_workflow(
const google::protobuf::Message* request);
private:
private:
std::vector<Workflow*> _flows;
std::string _infer_service_format;
uint64_t _last_change_timestamp;
bool _enable_map_request_to_workflow;
std::string _request_field_key;
::butil::FlatMap<std::string, std::vector<Workflow*> > _request_to_workflow_map;
::butil::FlatMap<std::string, std::vector<Workflow*>>
_request_to_workflow_map;
IMerger* _merger;
};
class ParallelInferService : public InferService {
public:
// 并行执行每个workflow
int inference(
const google::protobuf::Message* request,
public:
// Execute workflows in parallel
int inference(const google::protobuf::Message* request,
google::protobuf::Message* response,
butil::IOBufBuilder* debug_os) {
return 0;
}
};
} // predictor
} // paddle_serving
} // baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_INFERSERVICE_H
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_FORMAT_MANAGER_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_FORMAT_MANAGER_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <utility>
#include "common/inner_common.h"
namespace baidu {
......@@ -8,33 +22,28 @@ namespace paddle_serving {
namespace predictor {
#define REGIST_FORMAT_SERVICE(svr_name, svr) \
do { \
int ret = ::baidu::paddle_serving::predictor::FormatServiceManager::instance().regist_service(\
svr_name, svr); \
do { \
int ret = \
::baidu::paddle_serving::predictor::FormatServiceManager::instance() \
.regist_service(svr_name, svr); \
if (ret != 0) { \
LOG(ERROR) \
<< "Failed regist service[" \
<< svr_name << "]" << "[" \
<< typeid(svr).name() << "]" \
LOG(ERROR) << "Failed regist service[" << svr_name << "]" \
<< "[" << typeid(svr).name() << "]" \
<< "!"; \
} else { \
LOG(INFO) \
<< "Success regist service[" \
<< svr_name << "][" \
LOG(INFO) << "Success regist service[" << svr_name << "][" \
<< typeid(svr).name() << "]" \
<< "!"; \
} \
} while (0)
} while (0)
class FormatServiceManager {
public:
public:
typedef google::protobuf::Service Service;
int regist_service(const std::string& svr_name, Service* svr) {
if (_service_map.find(svr_name) != _service_map.end()) {
LOG(ERROR)
<< "Service[" << svr_name << "]["
<< typeid(svr).name() << "]"
LOG(ERROR) << "Service[" << svr_name << "][" << typeid(svr).name() << "]"
<< " already exist!";
return -1;
}
......@@ -42,23 +51,19 @@ public:
std::pair<boost::unordered_map<std::string, Service*>::iterator, bool> ret;
ret = _service_map.insert(std::make_pair(svr_name, svr));
if (ret.second == false) {
LOG(ERROR)
<< "Service[" << svr_name << "]["
<< typeid(svr).name() << "]"
LOG(ERROR) << "Service[" << svr_name << "][" << typeid(svr).name() << "]"
<< " insert failed!";
return -1;
}
LOG(INFO)
<< "Service[" << svr_name << "] insert successfully!";
LOG(INFO) << "Service[" << svr_name << "] insert successfully!";
return 0;
}
Service* get_service(const std::string& svr_name) {
boost::unordered_map<std::string, Service*>::iterator res;
if ((res = _service_map.find(svr_name)) == _service_map.end()) {
LOG(WARNING)
<< "Service[" << svr_name << "] "
LOG(WARNING) << "Service[" << svr_name << "] "
<< "not found in service manager"
<< "!";
return NULL;
......@@ -71,12 +76,10 @@ public:
return service_;
}
private:
private:
boost::unordered_map<std::string, Service*> _service_map;
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#include "common/inner_common.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "framework/workflow.h"
#include <string>
#include "common/inner_common.h"
#include "framework/predictor_metric.h" // PredictorMetric
namespace baidu {
......@@ -24,8 +39,7 @@ DagView* Workflow::fetch_dag_view(const std::string& service_name) {
} else if (_type == "Parallel") {
view = butil::get_object<ParallelDagView>();
} else {
LOG(ERROR)
<< "Unknown dag type:" << _type << "!";
LOG(ERROR) << "Unknown dag type:" << _type << "!";
return NULL;
}
if (view == NULL) {
......@@ -41,12 +55,10 @@ void Workflow::return_dag_view(DagView* view) {
if (_type == "Sequence") {
butil::return_object<DagView>(view);
} else if (_type == "Parallel") {
butil::return_object<ParallelDagView>(
dynamic_cast<ParallelDagView*>(view));
butil::return_object<ParallelDagView>(dynamic_cast<ParallelDagView*>(view));
} else {
LOG(ERROR)
<< "Unknown dag type:" << _type << "!";
return ;
LOG(ERROR) << "Unknown dag type:" << _type << "!";
return;
}
}
......@@ -62,6 +74,6 @@ void Workflow::regist_metric(const std::string& service_name) {
_dag.regist_metric(service_name);
}
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_WORKFLOW_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_WORKFLOW_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include "common/inner_common.h"
#include "framework/dag.h"
#include "framework/dag_view.h"
......@@ -9,16 +22,14 @@ namespace baidu {
namespace paddle_serving {
namespace predictor {
template<typename T>
template <typename T>
class Manager;
class Workflow {
public:
public:
Workflow() {}
static const char* tag() {
return "workflow";
}
static const char* tag() { return "workflow"; }
// Each workflow object corresponds to an independent
// configure file, so you can share the object between
......@@ -33,24 +44,18 @@ public:
int reload();
const std::string& name() {
return _name;
}
const std::string& name() { return _name; }
const std::string& full_name() {
return _name;
}
const std::string& full_name() { return _name; }
void regist_metric(const std::string& service_name);
private:
private:
Dag _dag;
std::string _type;
std::string _name;
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "mempool/mempool.h"
namespace im {
......@@ -31,7 +45,6 @@ void Region::reset() {
// clear the large buffer
_big_mem_size.store(0, butil::memory_order_relaxed);
_big_mem_count.store(0, butil::memory_order_relaxed);
}
BlockReference* Region::get() {
......@@ -43,13 +56,12 @@ BlockReference* Region::get() {
return ref;
}
void Region::put(BlockReference* block) {
_free_blocks.put(block);
}
void Region::put(BlockReference* block) { _free_blocks.put(block); }
void* Region::malloc(size_t size) {
if (size < MLC_MEM_THRESHOLD) {
uint32_t offset = _big_mem_size.fetch_add(size, butil::memory_order_relaxed);
uint32_t offset =
_big_mem_size.fetch_add(size, butil::memory_order_relaxed);
if (offset + size < _big_mem_capacity) {
_big_mem_count.fetch_add(1, butil::memory_order_relaxed);
return _big_mem_start + offset;
......@@ -58,7 +70,7 @@ void* Region::malloc(size_t size) {
_mlc_mem_size.fetch_add(size, butil::memory_order_relaxed);
_mlc_mem_count.fetch_add(1, butil::memory_order_relaxed);
BigNode* node = (BigNode*)::malloc(sizeof(BigNode) + size);
BigNode* node = reinterpret_cast<BigNode*>(::malloc(sizeof(BigNode) + size));
_big_nodes.push(node);
return node->data;
}
......@@ -73,9 +85,6 @@ Region::Region() {
_mlc_mem_size.store(0, butil::memory_order_relaxed);
_mlc_mem_count.store(0, butil::memory_order_relaxed);
}
}
}
}
} // namespace memory
} // namespace fugue
} // namespace im
#ifndef APP_ECOM_IM_MEMPOOL_SRC_MEMPOOL_H
#define APP_ECOM_IM_MEMPOOL_SRC_MEMPOOL_H
#include <execinfo.h>
#include <sstream>
#include <pthread.h>
#include <new>
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <butil/atomicops.h>
#include <butil/logging.h>
#include <execinfo.h>
#include <pthread.h>
#include <iostream>
#include <new>
#include <sstream>
#include <string>
namespace im {
namespace fugue {
......@@ -16,25 +29,21 @@ namespace lockfree {
template <class T>
class PushOnlyStack {
public:
PushOnlyStack() {
_head.store(NULL, butil::memory_order_relaxed);
}
public:
PushOnlyStack() { _head.store(NULL, butil::memory_order_relaxed); }
void push(T* node) {
T* head = _head.load(butil::memory_order_relaxed);
node->next = head;
while (!_head.compare_exchange_weak(
head, node, butil::memory_order_relaxed)) {
while (
!_head.compare_exchange_weak(head, node, butil::memory_order_relaxed)) {
node->next = head;
}
}
T* release() {
return _head.exchange(NULL, butil::memory_order_relaxed);
}
T* release() { return _head.exchange(NULL, butil::memory_order_relaxed); }
private:
private:
butil::atomic<T*> _head;
};
......@@ -47,7 +56,7 @@ struct FreeListNode {
template <class T, int CAP>
class FreeList {
public:
public:
typedef FreeListNode<T> Node;
static const uint64_t EMPTY = 0xFFFFFFFFFFFFFFFF;
......@@ -129,10 +138,8 @@ public:
reset();
}
private:
uint32_t slot(uint64_t id) const {
return static_cast<uint32_t>(id);
}
private:
uint32_t slot(uint64_t id) const { return static_cast<uint32_t>(id); }
T* new_node() {
uint32_t index = _slot_index.fetch_add(1, butil::memory_order_relaxed);
......@@ -144,7 +151,7 @@ private:
return &(_node[index]->data);
}
Node* node = (Node*)malloc(sizeof(Node));
Node* node = reinterpret_cast<Node*>(malloc(sizeof(Node)));
new (node) Node;
node->id = index;
......@@ -153,20 +160,15 @@ private:
return &node->data;
}
Node* address(uint64_t id) {
return _node[slot(id)];
}
Node* address(uint64_t id) { return _node[slot(id)]; }
const Node* address(uint64_t id) const {
return _node[slot(id)];
}
const Node* address(uint64_t id) const { return _node[slot(id)]; }
butil::atomic<uint64_t> _head;
butil::atomic<uint32_t> _slot_index;
Node* _node[CAP];
};
}
} // namespace lockfree
namespace memory {
......@@ -176,7 +178,7 @@ struct Block {
};
class GlobalBlockFreeList {
public:
public:
static const int MAX_BLOCK_COUNT = 32 * 1024;
typedef lockfree::FreeList<Block, MAX_BLOCK_COUNT> type;
static type* instance() {
......@@ -200,7 +202,7 @@ struct BlockReference {
};
class Region {
public:
public:
struct GlobalPut {
void operator()(BlockReference* block_ref) {
if (block_ref->block != NULL) {
......@@ -217,7 +219,7 @@ public:
~Region() {
reset();
delete [] _big_mem_start;
delete[] _big_mem_start;
_big_mem_start = NULL;
}
......@@ -232,8 +234,10 @@ public:
std::ostringstream oss;
oss << "[alloc_blks:" << alloc_blocks << ",free_blks:" << free_blocks
<< ",used_mem_kb:" << used_mem_mb << ",big_mem_kb:" << (big_buf_size >> 10)
<< ",big_buf_cnt:" << big_buf_count << ",mlc_mem_kb:" << (mlc_mem_size >> 10)
<< ",used_mem_kb:" << used_mem_mb
<< ",big_mem_kb:" << (big_buf_size >> 10)
<< ",big_buf_cnt:" << big_buf_count
<< ",mlc_mem_kb:" << (mlc_mem_size >> 10)
<< ",mlc_cnt:" << mlc_mem_count << "]";
return oss.str().c_str();
......@@ -256,7 +260,7 @@ public:
static const int MLC_MEM_THRESHOLD = 4 * 1024 * 1024;
static const int COUNTER_SIZE = MLC_MEM_THRESHOLD / BIG_MEM_THRESHOLD + 1;
private:
private:
lockfree::FreeList<BlockReference, MAX_BLOCK_COUNT> _free_blocks;
lockfree::PushOnlyStack<BigNode> _big_nodes;
......@@ -269,13 +273,11 @@ private:
butil::atomic<uint32_t> _mlc_mem_size;
butil::atomic<uint32_t> _mlc_mem_count;
};
}
}
} // namespace memory
} // namespace fugue
class Mempool {
public:
public:
void* malloc(size_t size) {
size = _align(size);
if (size <= _free_size) {
......@@ -326,15 +328,12 @@ public:
return NULL;
}
Mempool(fugue::memory::Region* blocks) : _free_size(0)
, _free_cursor(NULL)
, _blocks(blocks) {
explicit Mempool(fugue::memory::Region* blocks)
: _free_size(0), _free_cursor(NULL), _blocks(blocks) {
_block = NULL;
}
~Mempool() {
release_block();
}
~Mempool() { release_block(); }
void release_block() {
if (_block) {
......@@ -347,7 +346,7 @@ public:
_block = NULL;
}
private:
private:
void* malloc_from_region(size_t size) {
if (size >= fugue::memory::Region::BIG_MEM_THRESHOLD) {
return _blocks->malloc(size);
......@@ -394,14 +393,14 @@ private:
extern __thread Mempool* g_mempool;
class mempool {
public:
virtual void * malloc (size_t size) = 0;
virtual void free (void *p, size_t size) = 0;
inline virtual ~mempool(){}
public:
virtual void* malloc(size_t size) = 0;
virtual void free(void* p, size_t size) = 0;
inline virtual ~mempool() {}
};
class GlobalMempool : public mempool {
public:
public:
GlobalMempool() {
// do nothing;
}
......@@ -415,35 +414,24 @@ public:
return &singleton;
}
void reset(Mempool* mempool) {
g_mempool = mempool;
}
void reset(Mempool* mempool) { g_mempool = mempool; }
void* malloc(size_t size) {
return g_mempool->malloc(size);
}
void* malloc(size_t size) { return g_mempool->malloc(size); }
void* realloc(void* old_data, size_t old_size, size_t new_size) {
return g_mempool->realloc(old_data, old_size, new_size);
}
void free(void* p, size_t s) {
g_mempool->free(p, s);
}
void clear() {
g_mempool->release_block();
}
void free(void* p, size_t s) { g_mempool->free(p, s); }
Mempool* get() {
return g_mempool;
}
void clear() { g_mempool->release_block(); }
Mempool* get() { return g_mempool; }
};
class MempoolGuard {
public:
MempoolGuard(fugue::memory::Region* region) : _mempool(region) {
public:
explicit MempoolGuard(fugue::memory::Region* region) : _mempool(region) {
acquire();
}
......@@ -457,17 +445,15 @@ public:
g_mempool = _saved_mempool;
}
~MempoolGuard() {
release();
}
~MempoolGuard() { release(); }
private:
private:
Mempool _mempool;
Mempool* _saved_mempool;
};
inline std::string print_trace() {
const static int BT_BUF_SIZE = 400;
static const int BT_BUF_SIZE = 400;
std::stringstream debug_stream;
void* buffer[BT_BUF_SIZE];
......@@ -480,6 +466,4 @@ inline std::string print_trace() {
return debug_stream.str();
}
}
#endif
} // namespace im
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "op/op.h"
#include <butil/time.h> // butil::Timer
#include "common/utils.h"
#include <string>
#include "common/constant.h"
#include "common/utils.h"
#include "framework/channel.h"
#include "framework/dag.h"
......@@ -9,8 +24,12 @@ namespace baidu {
namespace paddle_serving {
namespace predictor {
int Op::init(Bus* bus, Dag* dag, uint32_t id, const std::string& name,
const std::string& type, void* conf) {
int Op::init(Bus* bus,
Dag* dag,
uint32_t id,
const std::string& name,
const std::string& type,
void* conf) {
_bus = bus;
_dag = dag;
_id = id;
......@@ -20,8 +39,7 @@ int Op::init(Bus* bus, Dag* dag, uint32_t id, const std::string& name,
_timer = butil::get_object<TimerFlow>();
if (!_timer) {
LOG(ERROR) << "Invalid timerflow in op:"
<< this->name();
LOG(ERROR) << "Invalid timerflow in op:" << this->name();
return -1;
}
......@@ -31,9 +49,8 @@ int Op::init(Bus* bus, Dag* dag, uint32_t id, const std::string& name,
Channel* channel = mutable_channel();
if (channel == NULL) {
LOG(ERROR)
<< "Failed mutable channel in op: "
<< this->id() << ", " << this->name() << "!";
LOG(ERROR) << "Failed mutable channel in op: " << this->id() << ", "
<< this->name() << "!";
return -1;
}
......@@ -50,8 +67,8 @@ int Op::deinit() {
_timer = NULL;
if (release_channel() != 0) {
LOG(ERROR) << "Failed release channel in op:"
<< this->id() << ", " << this->name() << "!";
LOG(ERROR) << "Failed release channel in op:" << this->id() << ", "
<< this->name() << "!";
return -1;
}
......@@ -78,14 +95,12 @@ int Op::process(bool debug) {
_timer->start();
}
if (!_has_init) {
LOG(ERROR)
<< "Make sure op has been init before inference";
LOG(ERROR) << "Make sure op has been init before inference";
return ERR_INTERNAL_FAILURE;
}
if (_has_calc) {
LOG(INFO)
<< "Op: " << _name << " already processed before";
LOG(INFO) << "Op: " << _name << " already processed before";
return ERR_OK;
}
......@@ -151,15 +166,13 @@ bool Op::is_mutable(const std::string& op) {
}
DagNode* node = const_cast<DagNode*>(_dag->node_by_name(_name));
if (node->depends.find(op) == node->depends.end()) {
LOG(WARNING)
<< "op: " << _name << " doesnot depend on"
LOG(WARNING) << "op: " << _name << " doesnot depend on"
<< "op: " << op << "!";
return false;
}
if (node->depends[op] != RW) {
LOG(WARNING)
<< "op: " << _name << " has no RW access"
LOG(WARNING) << "op: " << _name << " has no RW access"
<< "ot op: " << op << ", mode: " << node->depends[op]
<< ", please use get_argment() instead.";
return false;
......@@ -172,18 +185,15 @@ bool Op::is_mutable(const std::string& op) const {
if (op == START_OP_NAME) {
return false;
}
DagNode* node = const_cast<DagNode*>(
_dag->node_by_name(_name));
DagNode* node = const_cast<DagNode*>(_dag->node_by_name(_name));
if (node->depends.find(op) == node->depends.end()) {
LOG(WARNING)
<< "op: " << _name << " doesnot depend on"
LOG(WARNING) << "op: " << _name << " doesnot depend on"
<< "op: " << op << "!";
return false;
}
if (node->depends[op] != RW) {
LOG(WARNING)
<< "op: " << _name << " has no RW access"
LOG(WARNING) << "op: " << _name << " has no RW access"
<< "ot op: " << op << ", mode: " << node->depends[op]
<< ", please use get_argment() instead.";
return false;
......@@ -198,15 +208,13 @@ bool Op::is_readable(const std::string& op) {
}
DagNode* node = const_cast<DagNode*>(_dag->node_by_name(_name));
if (node->depends.find(op) == node->depends.end()) {
LOG(WARNING)
<< "op: " << _name << " doesnot depend on"
LOG(WARNING) << "op: " << _name << " doesnot depend on"
<< "op: " << op << "!";
return false;
}
if (node->depends[op] != RW && node->depends[op] != RO) {
LOG(WARNING)
<< "op: " << _name << " has no RO access"
LOG(WARNING) << "op: " << _name << " has no RO access"
<< "ot op: " << op << ", mode: " << node->depends[op]
<< ", please check your configuration.";
return false;
......@@ -221,15 +229,13 @@ bool Op::is_readable(const std::string& op) const {
}
DagNode* node = const_cast<DagNode*>(_dag->node_by_name(_name));
if (node->depends.find(op) == node->depends.end()) {
LOG(WARNING)
<< "op: " << _name << " doesnot depend on "
LOG(WARNING) << "op: " << _name << " doesnot depend on "
<< "op: " << op << "!";
return false;
}
if (node->depends[op] != RW && node->depends[op] != RO) {
LOG(WARNING)
<< "op: " << _name << " has no RO access"
LOG(WARNING) << "op: " << _name << " has no RO access"
<< "ot op: " << op << ", mode: " << node->depends[op]
<< ", please check your configuration.";
return false;
......@@ -238,30 +244,26 @@ bool Op::is_readable(const std::string& op) const {
return true;
}
// 获得依赖Op的Channel对象
// Get the Channel object of dependent OP
Channel* Op::mutable_depend_channel(const std::string& op) {
if (!is_mutable(op)) {
LOG(WARNING)
<< "Op: " << _name << " cannot mutable op: "
<< op << "!";
LOG(WARNING) << "Op: " << _name << " cannot mutable op: " << op << "!";
return NULL;
}
// 从bus中获取依赖op的channel
// Get the Channel object of dependent OP from bus
return _bus->channel_by_name(op);
}
// 获得依赖Op的Channel对象
// Get the Channel object of dependent OP
const Channel* Op::get_depend_channel(const std::string& op) const {
// 从dag中获取依赖op的mode属性
// Get the `mode` attribute of dependent OP from dag
if (!is_readable(op)) {
LOG(WARNING)
<< "op: " << _name << " doesnot depend on op: "
<< op << "!";
LOG(WARNING) << "op: " << _name << " doesnot depend on op: " << op << "!";
return NULL;
}
// 从bus中获取依赖op的channel
// Get the Channel object of dependent OP from bus
return _bus->channel_by_name(op);
}
......@@ -275,17 +277,11 @@ const google::protobuf::Message* Op::get_message() const {
bool Op::has_calc() { return _has_calc; }
const char* Op::name() const {
return _name.c_str();
}
const char* Op::name() const { return _name.c_str(); }
const std::string& Op::type() const {
return _type;
}
const std::string& Op::type() const { return _type; }
uint32_t Op::id() const {
return _id;
}
uint32_t Op::id() const { return _id; }
const std::string Op::debug_string() {
const Channel* channel = get_channel();
......@@ -300,6 +296,6 @@ const google::protobuf::Message* Op::get_request_message() {
return _bus->channel_by_name(START_OP_NAME)->message();
}
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_OP_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_OP_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <bvar/bvar.h> // bvar::LatencyRecorder
#include <string>
#include "common/inner_common.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
......@@ -14,8 +27,9 @@ namespace predictor {
class Dag;
class Op {
public:
Op() : _bus(NULL),
public:
Op()
: _bus(NULL),
_dag(NULL),
_has_calc(false),
_has_init(false),
......@@ -23,80 +37,81 @@ public:
virtual ~Op() {}
// ------对依赖OP的Channel/Data/Message数据获取接口-----
// ------Getters for Channel/Data/Message of dependent OP-----
// 获得依赖Op的Channel对象
// Get the Channel object of dependent OP
Channel* mutable_depend_channel(const std::string& op);
// 获得依赖Op的Channel对象
// Get the Channel object of dependent OP
const Channel* get_depend_channel(const std::string& op) const;
template<typename T>
template <typename T>
T* mutable_depend_argument(const std::string& op) {
Channel* channel = mutable_depend_channel(op);
if (channel == NULL) {
LOG(WARNING) << "cannot mutable channel of " << op
<< " in " << _name;
LOG(WARNING) << "cannot mutable channel of " << op << " in " << _name;
return NULL;
}
OpChannel<T>* op_channel =
dynamic_cast<OpChannel<T>*>(channel);
OpChannel<T>* op_channel = dynamic_cast<OpChannel<T>*>(channel);
if (!op_channel) {
LOG(ERROR) << "Cannot dynamic cast channel of op:"
<< this->name() << " to type: " << typeid(T).name();
LOG(ERROR) << "Cannot dynamic cast channel of op:" << this->name()
<< " to type: " << typeid(T).name();
return NULL;
}
return op_channel->data();
}
template<typename T>
template <typename T>
const T* get_depend_argument(const std::string& op) const {
const Channel* channel = get_depend_channel(op);
if (channel == NULL) {
LOG(WARNING) << "cannot get read-only channel of " << op
<< " in " << _name;
LOG(WARNING) << "cannot get read-only channel of " << op << " in "
<< _name;
return NULL;
}
const OpChannel<T>* op_channel =
dynamic_cast<const OpChannel<T>*>(channel);
const OpChannel<T>* op_channel = dynamic_cast<const OpChannel<T>*>(channel);
if (!op_channel) {
LOG(ERROR) << "Cannot dynamic cast channel of op:"
<< this->name() << " to type: " << typeid(T).name();
LOG(ERROR) << "Cannot dynamic cast channel of op:" << this->name()
<< " to type: " << typeid(T).name();
return NULL;
}
return op_channel->data();
}
// -----对当前OP的Channel/Data/Message数据获取接口----
// -----Getters for Channel/Data/Message of current OP----
// 获得该OP的Protobuf message类型指针
// Get pointer to the progobuf message of current OP
google::protobuf::Message* mutable_message();
// 获得该OP的Protobuf message类型指针
// Get pointer to the protobuf message of current OP
const google::protobuf::Message* get_message() const;
// 获得该OP的模板类数据对象
template<typename T>
// Get the template class data object of current OP
template <typename T>
T* mutable_data() {
Channel* channel = mutable_channel();
return (dynamic_cast<OpChannel<T>*>(channel))->data();
}
// 获得该OP的模板类数据对象
template<typename T>
// Get the template class data object of current OP
template <typename T>
const T* get_data() const {
const Channel* channel = get_channel();
return (dynamic_cast<const OpChannel<T>*>(channel))->data();
}
// ---------------- 其它基类成员函数 ----------------
// ---------------- Other base class members ----------------
int init(Bus* bus, Dag* dag, uint32_t id, const std::string& name,
const std::string& type, void* conf);
int init(Bus* bus,
Dag* dag,
uint32_t id,
const std::string& name,
const std::string& type,
void* conf);
int deinit();
......@@ -106,20 +121,16 @@ public:
std::string time_info();
// 获得输入对象
// Get the input object
const google::protobuf::Message* get_request_message();
bool has_calc();
const char* name() const;
const std::string& full_name() const {
return _full_name;
}
const std::string& full_name() const { return _full_name; }
void set_full_name(const std::string full_name) {
_full_name = full_name;
}
void set_full_name(const std::string full_name) { _full_name = full_name; }
const std::string& type() const;
......@@ -135,29 +146,29 @@ public:
// ------------------ OP Interface -------------------
// 获得当前Op的Channel派生类对象
// Get the derived Channel object of current OP
virtual Channel* mutable_channel() = 0;
// 获得当前Op的Channel派生类对象
// Get the derived Channel object of current OP
virtual const Channel* get_channel() const = 0;
// 释放当前Op的Channel派生类对象
// Release the derived Channel object of current OP
virtual int release_channel() = 0;
// 当前Op自定义inference函数接口
// Inference interface
virtual int inference() = 0;
// ------------------ Conf Interface -------------------
virtual void* create_config(const configure::DAGNode& conf) { return NULL; }
virtual void delete_config(void* conf) { }
virtual void delete_config(void* conf) {}
virtual void set_config(void* conf) { return; }
// ------------------ Metric Interface -------------------
virtual void regist_metric() { return; }
private:
private:
bool is_mutable(const std::string& op);
bool is_mutable(const std::string& op) const;
......@@ -166,7 +177,7 @@ private:
bool is_readable(const std::string& op) const;
private:
private:
Bus* _bus;
Dag* _dag;
uint32_t _id;
......@@ -178,9 +189,9 @@ private:
TimerFlow* _timer;
};
template<typename T>
template <typename T>
class OpWithChannel : public Op {
public:
public:
typedef T DataType;
typedef OpChannel<T> ChannelType;
......@@ -197,18 +208,14 @@ public:
_channel = butil::get_object<ChannelType>();
if (!_channel) {
LOG(ERROR)
<< "Failed mutable channel of type:"
<< typeid(T).name();
LOG(ERROR) << "Failed mutable channel of type:" << typeid(T).name();
return NULL;
}
_channel->init(this->id(), this->name());
return _channel;
}
const Channel* get_channel() const {
return _channel;
}
const Channel* get_channel() const { return _channel; }
int release_channel() {
if (_channel) {
......@@ -222,39 +229,32 @@ public:
// ------------- Interface -------------
// Op自定义inference接口
// Inference interface
virtual int inference() = 0;
private:
private:
ChannelType* _channel;
};
template<typename T, typename C>
template <typename T, typename C>
class OpWithChannelAndConf : public OpWithChannel<T> {
public:
void set_config(void* conf) {
_conf = static_cast<C*>(conf);
}
public:
void set_config(void* conf) { _conf = static_cast<C*>(conf); }
C* get_self_config() { return _conf; }
virtual void delete_config(void* conf) { delete static_cast<C*>(conf); }
private:
private:
C* _conf;
};
#define DECLARE_OP(OP_TYPE) \
OP_TYPE() { \
REGISTER_OP(OP_TYPE); \
} \
static OP_TYPE _s_##OP_TYPE \
#define DEFINE_OP(OP_TYPE) \
OP_TYPE OP_TYPE::_s_##OP_TYPE \
OP_TYPE() { REGISTER_OP(OP_TYPE); } \
static OP_TYPE _s_##OP_TYPE
} // predictor
} // paddle_serving
} // baidu
#define DEFINE_OP(OP_TYPE) OP_TYPE OP_TYPE::_s_##OP_TYPE
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SEVING_PREDICTOR_OP_STRUCT_DEMO_H
#define BAIDU_PADDLE_SEVING_PREDICTOR_OP_STRUCT_DEMO_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
#include "op/op.h"
namespace baidu {
namespace paddle_serving {
......@@ -15,8 +28,7 @@ struct DemoData {
};
class StructOp : public OpWithChannel<DemoData> {
public:
public:
DECLARE_OP(StructOp);
int inference() {
......@@ -29,8 +41,6 @@ public:
DEFINE_OP(StructOp);
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// http://code.google.com/p/protobuf/
......@@ -30,10 +44,9 @@
// from google3/util/gtl/stl_util-inl.h
#ifndef GOOGLE_PROTOBUF_STUBS_STL_UTIL_INL_H__
#define GOOGLE_PROTOBUF_STUBS_STL_UTIL_INL_H__
#pragma once
#include <google/protobuf/stubs/common.h>
#include <string>
namespace google {
namespace protobuf {
......@@ -49,8 +62,7 @@ namespace protobuf {
// advanced, which could result in the hash function trying to deference a
// stale pointer.
template <class ForwardIterator>
void STLDeleteContainerPointers(ForwardIterator begin,
ForwardIterator end) {
void STLDeleteContainerPointers(ForwardIterator begin, ForwardIterator end) {
while (begin != end) {
ForwardIterator temp = begin;
++begin;
......@@ -96,7 +108,7 @@ inline char* string_as_array(string* str) {
// ElementDeleter (defined below), which ensures that your container's elements
// are deleted when the ElementDeleter goes out of scope.
template <class T>
void STLDeleteElements(T *container) {
void STLDeleteElements(T* container) {
if (!container) return;
STLDeleteContainerPointers(container->begin(), container->end());
container->clear();
......@@ -107,7 +119,7 @@ void STLDeleteElements(T *container) {
// in the case it's given a NULL pointer.
template <class T>
void STLDeleteValues(T *v) {
void STLDeleteValues(T* v) {
if (!v) return;
for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
delete i->second;
......@@ -117,5 +129,3 @@ void STLDeleteValues(T *v) {
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_STUBS_STL_UTIL_INL_H__
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// http://code.google.com/p/protobuf/
......@@ -30,12 +44,11 @@
// from google3/strings/strutil.h
#ifndef GOOGLE_PROTOBUF_STUBS_STRUTIL_H__
#define GOOGLE_PROTOBUF_STUBS_STRUTIL_H__
#pragma once
#include <google/protobuf/stubs/common.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include <google/protobuf/stubs/common.h>
namespace google {
namespace protobuf {
......@@ -60,14 +73,11 @@ namespace protobuf {
// ----------------------------------------------------------------------
inline bool ascii_isalnum(char c) {
return ('a' <= c && c <= 'z') ||
('A' <= c && c <= 'Z') ||
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') ||
('0' <= c && c <= '9');
}
inline bool ascii_isdigit(char c) {
return ('0' <= c && c <= '9');
}
inline bool ascii_isdigit(char c) { return ('0' <= c && c <= '9'); }
// ----------------------------------------------------------------------
// HasPrefixString()
......@@ -77,8 +87,7 @@ inline bool ascii_isdigit(char c) {
// prefix string if the prefix matches, otherwise the original
// string.
// ----------------------------------------------------------------------
inline bool HasPrefixString(const string& str,
const string& prefix) {
inline bool HasPrefixString(const string& str, const string& prefix) {
return str.size() >= prefix.size() &&
str.compare(0, prefix.size(), prefix) == 0;
}
......@@ -99,8 +108,7 @@ inline string StripPrefixString(const string& str, const string& prefix) {
// suffix string if the suffix matches, otherwise the original
// string.
// ----------------------------------------------------------------------
inline bool HasSuffixString(const string& str,
const string& suffix) {
inline bool HasSuffixString(const string& str, const string& suffix) {
return str.size() >= suffix.size() &&
str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
}
......@@ -120,7 +128,8 @@ inline string StripSuffixString(const string& str, const string& suffix) {
// Good for keeping html characters or protocol characters (\t) out
// of places where they might cause a problem.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT void StripString(string* s, const char* remove,
LIBPROTOBUF_EXPORT void StripString(string* s,
const char* remove,
char replacewith);
// ----------------------------------------------------------------------
......@@ -132,7 +141,7 @@ LIBPROTOBUF_EXPORT void StripString(string* s, const char* remove,
// strings.
// ----------------------------------------------------------------------
inline void LowerString(string * s) {
inline void LowerString(string* s) {
string::iterator end = s->end();
for (string::iterator i = s->begin(); i != end; ++i) {
// tolower() changes based on locale. We don't want this!
......@@ -140,7 +149,7 @@ inline void LowerString(string * s) {
}
}
inline void UpperString(string * s) {
inline void UpperString(string* s) {
string::iterator end = s->end();
for (string::iterator i = s->begin(); i != end; ++i) {
// toupper() changes based on locale. We don't want this!
......@@ -156,8 +165,10 @@ inline void UpperString(string * s) {
// happened or not.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT string StringReplace(const string& s, const string& oldsub,
const string& newsub, bool replace_all);
LIBPROTOBUF_EXPORT string StringReplace(const string& s,
const string& oldsub,
const string& newsub,
bool replace_all);
// ----------------------------------------------------------------------
// SplitStringUsing()
......@@ -165,7 +176,8 @@ LIBPROTOBUF_EXPORT string StringReplace(const string& s, const string& oldsub,
// to 'result'. If there are consecutive delimiters, this function skips
// over all of them.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT void SplitStringUsing(const string& full, const char* delim,
LIBPROTOBUF_EXPORT void SplitStringUsing(const string& full,
const char* delim,
vector<string>* res);
// ----------------------------------------------------------------------
......@@ -177,10 +189,10 @@ LIBPROTOBUF_EXPORT void SplitStringUsing(const string& full, const char* delim,
// target string is cleared and overwritten.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT void JoinStrings(const vector<string>& components,
const char* delim, string* result);
const char* delim,
string* result);
inline string JoinStrings(const vector<string>& components,
const char* delim) {
inline string JoinStrings(const vector<string>& components, const char* delim) {
string result;
JoinStrings(components, delim, &result);
return result;
......@@ -218,8 +230,9 @@ inline string JoinStrings(const vector<string>& components,
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT int UnescapeCEscapeSequences(const char* source, char* dest);
LIBPROTOBUF_EXPORT int UnescapeCEscapeSequences(const char* source, char* dest,
vector<string> *errors);
LIBPROTOBUF_EXPORT int UnescapeCEscapeSequences(const char* source,
char* dest,
vector<string>* errors);
// ----------------------------------------------------------------------
// UnescapeCEscapeString()
......@@ -237,8 +250,9 @@ LIBPROTOBUF_EXPORT int UnescapeCEscapeSequences(const char* source, char* dest,
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT int UnescapeCEscapeString(const string& src, string* dest);
LIBPROTOBUF_EXPORT int UnescapeCEscapeString(const string& src, string* dest,
vector<string> *errors);
LIBPROTOBUF_EXPORT int UnescapeCEscapeString(const string& src,
string* dest,
vector<string>* errors);
LIBPROTOBUF_EXPORT string UnescapeCEscapeString(const string& src);
// ----------------------------------------------------------------------
......@@ -251,8 +265,10 @@ LIBPROTOBUF_EXPORT string UnescapeCEscapeString(const string& src);
//
// Currently only \n, \r, \t, ", ', \ and !isprint() chars are escaped.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT int CEscapeString(const char* src, int src_len,
char* dest, int dest_len);
LIBPROTOBUF_EXPORT int CEscapeString(const char* src,
int src_len,
char* dest,
int dest_len);
// ----------------------------------------------------------------------
// CEscape()
......@@ -281,20 +297,22 @@ LIBPROTOBUF_EXPORT string CHexEscape(const string& src);
// platforms, so using these is safer, from the point of view of
// overflow behavior, than using the standard libc functions.
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT int32 strto32_adaptor(const char *nptr, char **endptr,
LIBPROTOBUF_EXPORT int32 strto32_adaptor(const char* nptr,
char** endptr,
int base);
LIBPROTOBUF_EXPORT uint32 strtou32_adaptor(const char *nptr, char **endptr,
LIBPROTOBUF_EXPORT uint32 strtou32_adaptor(const char* nptr,
char** endptr,
int base);
inline int32 strto32(const char *nptr, char **endptr, int base) {
if (sizeof(int32) == sizeof(long))
inline int32 strto32(const char* nptr, char** endptr, int base) {
if (sizeof(int32) == sizeof(long)) // NOLINT
return strtol(nptr, endptr, base);
else
return strto32_adaptor(nptr, endptr, base);
}
inline uint32 strtou32(const char *nptr, char **endptr, int base) {
if (sizeof(uint32) == sizeof(unsigned long))
inline uint32 strtou32(const char* nptr, char** endptr, int base) {
if (sizeof(uint32) == sizeof(unsigned long)) // NOLINT
return strtoul(nptr, endptr, base);
else
return strtou32_adaptor(nptr, endptr, base);
......@@ -302,14 +320,14 @@ inline uint32 strtou32(const char *nptr, char **endptr, int base) {
// For now, long long is 64-bit on all the platforms we care about, so these
// functions can simply pass the call to strto[u]ll.
inline int64 strto64(const char *nptr, char **endptr, int base) {
GOOGLE_COMPILE_ASSERT(sizeof(int64) == sizeof(long long),
inline int64 strto64(const char* nptr, char** endptr, int base) {
GOOGLE_COMPILE_ASSERT(sizeof(int64) == sizeof(long long), // NOLINT
sizeof_int64_is_not_sizeof_long_long);
return strtoll(nptr, endptr, base);
}
inline uint64 strtou64(const char *nptr, char **endptr, int base) {
GOOGLE_COMPILE_ASSERT(sizeof(uint64) == sizeof(unsigned long long),
inline uint64 strtou64(const char* nptr, char** endptr, int base) {
GOOGLE_COMPILE_ASSERT(sizeof(uint64) == sizeof(unsigned long long), // NOLINT
sizeof_uint64_is_not_sizeof_long_long);
return strtoull(nptr, endptr, base);
}
......@@ -350,20 +368,20 @@ LIBPROTOBUF_EXPORT char* FastHex32ToBuffer(uint32 i, char* buffer);
// at least 22 bytes long
inline char* FastIntToBuffer(int i, char* buffer) {
return (sizeof(i) == 4 ?
FastInt32ToBuffer(i, buffer) : FastInt64ToBuffer(i, buffer));
return (sizeof(i) == 4 ? FastInt32ToBuffer(i, buffer)
: FastInt64ToBuffer(i, buffer));
}
inline char* FastUIntToBuffer(unsigned int i, char* buffer) {
return (sizeof(i) == 4 ?
FastUInt32ToBuffer(i, buffer) : FastUInt64ToBuffer(i, buffer));
return (sizeof(i) == 4 ? FastUInt32ToBuffer(i, buffer)
: FastUInt64ToBuffer(i, buffer));
}
inline char* FastLongToBuffer(long i, char* buffer) {
return (sizeof(i) == 4 ?
FastInt32ToBuffer(i, buffer) : FastInt64ToBuffer(i, buffer));
inline char* FastLongToBuffer(long i, char* buffer) { // NOLINT
return (sizeof(i) == 4 ? FastInt32ToBuffer(i, buffer)
: FastInt64ToBuffer(i, buffer));
}
inline char* FastULongToBuffer(unsigned long i, char* buffer) {
return (sizeof(i) == 4 ?
FastUInt32ToBuffer(i, buffer) : FastUInt64ToBuffer(i, buffer));
inline char* FastULongToBuffer(unsigned long i, char* buffer) { // NOLINT
return (sizeof(i) == 4 ? FastUInt32ToBuffer(i, buffer)
: FastUInt64ToBuffer(i, buffer));
}
// ----------------------------------------------------------------------
......@@ -405,10 +423,10 @@ inline char* FastUInt64ToBuffer(uint64 i, char* buffer) {
// ----------------------------------------------------------------------
LIBPROTOBUF_EXPORT string SimpleItoa(int i);
LIBPROTOBUF_EXPORT string SimpleItoa(unsigned int i);
LIBPROTOBUF_EXPORT string SimpleItoa(long i);
LIBPROTOBUF_EXPORT string SimpleItoa(unsigned long i);
LIBPROTOBUF_EXPORT string SimpleItoa(long long i);
LIBPROTOBUF_EXPORT string SimpleItoa(unsigned long long i);
LIBPROTOBUF_EXPORT string SimpleItoa(long i); // NOLINT
LIBPROTOBUF_EXPORT string SimpleItoa(unsigned long i); // NOLINT
LIBPROTOBUF_EXPORT string SimpleItoa(long long i); // NOLINT
LIBPROTOBUF_EXPORT string SimpleItoa(unsigned long long i); // NOLINT
// ----------------------------------------------------------------------
// SimpleDtoa()
......@@ -451,7 +469,3 @@ LIBPROTOBUF_EXPORT double NoLocaleStrtod(const char* text, char** endptr);
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_STUBS_STRUTIL_H__
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// http://code.google.com/p/protobuf/
......@@ -30,11 +44,11 @@
// Author: kenton@google.com (Kenton Varda)
//#include <google/protobuf/stubs/strutil.h>
// #include <google/protobuf/stubs/strutil.h>
#include "plugin/strutil.h"
#include "plugin/substitute.h"
#include "plugin/stl_util-inl.h"
#include "plugin/substitute.h"
namespace google {
namespace protobuf {
......@@ -52,36 +66,63 @@ static int CountSubstituteArgs(const SubstituteArg* const* args_array) {
return count;
}
string Substitute(
const char* format,
const SubstituteArg& arg0, const SubstituteArg& arg1,
const SubstituteArg& arg2, const SubstituteArg& arg3,
const SubstituteArg& arg4, const SubstituteArg& arg5,
const SubstituteArg& arg6, const SubstituteArg& arg7,
const SubstituteArg& arg8, const SubstituteArg& arg9) {
string Substitute(const char* format,
const SubstituteArg& arg0,
const SubstituteArg& arg1,
const SubstituteArg& arg2,
const SubstituteArg& arg3,
const SubstituteArg& arg4,
const SubstituteArg& arg5,
const SubstituteArg& arg6,
const SubstituteArg& arg7,
const SubstituteArg& arg8,
const SubstituteArg& arg9) {
string result;
SubstituteAndAppend(&result, format, arg0, arg1, arg2, arg3, arg4,
arg5, arg6, arg7, arg8, arg9);
SubstituteAndAppend(&result,
format,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7,
arg8,
arg9);
return result;
}
void SubstituteAndAppend(
string* output, const char* format,
const SubstituteArg& arg0, const SubstituteArg& arg1,
const SubstituteArg& arg2, const SubstituteArg& arg3,
const SubstituteArg& arg4, const SubstituteArg& arg5,
const SubstituteArg& arg6, const SubstituteArg& arg7,
const SubstituteArg& arg8, const SubstituteArg& arg9) {
const SubstituteArg* const args_array[] = {
&arg0, &arg1, &arg2, &arg3, &arg4, &arg5, &arg6, &arg7, &arg8, &arg9, NULL
};
void SubstituteAndAppend(string* output,
const char* format,
const SubstituteArg& arg0,
const SubstituteArg& arg1,
const SubstituteArg& arg2,
const SubstituteArg& arg3,
const SubstituteArg& arg4,
const SubstituteArg& arg5,
const SubstituteArg& arg6,
const SubstituteArg& arg7,
const SubstituteArg& arg8,
const SubstituteArg& arg9) {
const SubstituteArg* const args_array[] = {&arg0,
&arg1,
&arg2,
&arg3,
&arg4,
&arg5,
&arg6,
&arg7,
&arg8,
&arg9,
NULL};
// Determine total size needed.
int size = 0;
for (int i = 0; format[i] != '\0'; i++) {
if (format[i] == '$') {
if (ascii_isdigit(format[i+1])) {
int index = format[i+1] - '0';
if (ascii_isdigit(format[i + 1])) {
int index = format[i + 1] - '0';
if (args_array[index]->size() == -1) {
GOOGLE_LOG(DFATAL)
<< "strings::Substitute format string invalid: asked for \"$"
......@@ -92,12 +133,11 @@ void SubstituteAndAppend(
}
size += args_array[index]->size();
++i; // Skip next char.
} else if (format[i+1] == '$') {
} else if (format[i + 1] == '$') {
++size;
++i; // Skip next char.
} else {
GOOGLE_LOG(DFATAL)
<< "Invalid strings::Substitute() format string: \""
GOOGLE_LOG(DFATAL) << "Invalid strings::Substitute() format string: \""
<< CEscape(format) << "\".";
return;
}
......@@ -114,12 +154,12 @@ void SubstituteAndAppend(
char* target = string_as_array(output) + original_size;
for (int i = 0; format[i] != '\0'; i++) {
if (format[i] == '$') {
if (ascii_isdigit(format[i+1])) {
const SubstituteArg* src = args_array[format[i+1] - '0'];
if (ascii_isdigit(format[i + 1])) {
const SubstituteArg* src = args_array[format[i + 1] - '0'];
memcpy(target, src->data(), src->size());
target += src->size();
++i; // Skip next char.
} else if (format[i+1] == '$') {
} else if (format[i + 1] == '$') {
*target++ = '$';
++i; // Skip next char.
}
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// http://code.google.com/p/protobuf/
......@@ -31,15 +45,13 @@
// Author: kenton@google.com (Kenton Varda)
// from google3/strings/substitute.h
#include <string>
#include <google/protobuf/stubs/common.h>
#include <string>
// hmmm...
//#include <google/protobuf/stubs/strutil.h>
// #include <google/protobuf/stubs/strutil.h>
#include "plugin/strutil.h"
#ifndef GOOGLE_PROTOBUF_STUBS_SUBSTITUTE_H_
#define GOOGLE_PROTOBUF_STUBS_SUBSTITUTE_H_
#pragma once
namespace google {
namespace protobuf {
namespace strings {
......@@ -88,14 +100,13 @@ namespace internal { // Implementation details.
class SubstituteArg {
public:
inline SubstituteArg(const char* value)
explicit inline SubstituteArg(const char* value)
: text_(value), size_(strlen(text_)) {}
inline SubstituteArg(const string& value)
explicit inline SubstituteArg(const string& value)
: text_(value.data()), size_(value.size()) {}
// Indicates that no argument was given.
inline explicit SubstituteArg()
: text_(NULL), size_(-1) {}
inline SubstituteArg() : text_(NULL), size_(-1) {}
// Primitives
// We don't overload for signed and unsigned char because if people are
......@@ -103,29 +114,36 @@ class SubstituteArg {
// probably actually using them as 8-bit integers and would probably
// prefer an integer representation. But, we don't really know. So, we
// make the caller decide what to do.
inline SubstituteArg(char value)
: text_(scratch_), size_(1) { scratch_[0] = value; }
inline SubstituteArg(short value)
: text_(FastInt32ToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(unsigned short value)
: text_(FastUInt32ToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(int value)
explicit inline SubstituteArg(char value) : text_(scratch_), size_(1) {
scratch_[0] = value;
}
explicit inline SubstituteArg(short value) // NOLINT
: text_(FastInt32ToBuffer(value, scratch_)),
size_(strlen(text_)) {}
explicit inline SubstituteArg(unsigned short value) // NOLINT
: text_(FastUInt32ToBuffer(value, scratch_)),
size_(strlen(text_)) {}
explicit inline SubstituteArg(int value)
: text_(FastInt32ToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(unsigned int value)
explicit inline SubstituteArg(unsigned int value)
: text_(FastUInt32ToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(long value)
: text_(FastLongToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(unsigned long value)
: text_(FastULongToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(long long value)
: text_(FastInt64ToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(unsigned long long value)
: text_(FastUInt64ToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(float value)
explicit inline SubstituteArg(long value) // NOLINT
: text_(FastLongToBuffer(value, scratch_)),
size_(strlen(text_)) {}
explicit inline SubstituteArg(unsigned long value) // NOLINT
: text_(FastULongToBuffer(value, scratch_)),
size_(strlen(text_)) {}
explicit inline SubstituteArg(long long value) // NOLINT
: text_(FastInt64ToBuffer(value, scratch_)),
size_(strlen(text_)) {}
explicit inline SubstituteArg(unsigned long long value) // NOLINT
: text_(FastUInt64ToBuffer(value, scratch_)),
size_(strlen(text_)) {}
explicit inline SubstituteArg(float value)
: text_(FloatToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(double value)
explicit inline SubstituteArg(double value)
: text_(DoubleToBuffer(value, scratch_)), size_(strlen(text_)) {}
inline SubstituteArg(bool value)
explicit inline SubstituteArg(bool value)
: text_(value ? "true" : "false"), size_(strlen(text_)) {}
inline const char* data() const { return text_; }
......@@ -139,8 +157,8 @@ class SubstituteArg {
} // namespace internal
LIBPROTOBUF_EXPORT string Substitute(
const char* format,
LIBPROTOBUF_EXPORT string
Substitute(const char* format,
const internal::SubstituteArg& arg0 = internal::SubstituteArg(),
const internal::SubstituteArg& arg1 = internal::SubstituteArg(),
const internal::SubstituteArg& arg2 = internal::SubstituteArg(),
......@@ -153,7 +171,8 @@ LIBPROTOBUF_EXPORT string Substitute(
const internal::SubstituteArg& arg9 = internal::SubstituteArg());
LIBPROTOBUF_EXPORT void SubstituteAndAppend(
string* output, const char* format,
string* output,
const char* format,
const internal::SubstituteArg& arg0 = internal::SubstituteArg(),
const internal::SubstituteArg& arg1 = internal::SubstituteArg(),
const internal::SubstituteArg& arg2 = internal::SubstituteArg(),
......@@ -168,5 +187,3 @@ LIBPROTOBUF_EXPORT void SubstituteAndAppend(
} // namespace strings
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_STUBS_SUBSTITUTE_H_
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
package baidu.paddle_serving.predictor.format;
// dense format
message DenseInstance {
repeated float features = 1;
};
message DenseInstance { repeated float features = 1; };
message DensePrediction {
repeated float categories = 1;
};
message DensePrediction { repeated float categories = 1; };
// sparse format
message SparseInstance {
......@@ -18,9 +28,7 @@ message SparseInstance {
repeated float values = 3;
};
message SparsePrediction {
repeated float categories = 1;
};
message SparsePrediction { repeated float categories = 1; };
// int64-tensor format
message Int64TensorInstance {
......@@ -39,9 +47,7 @@ message XImageReqInstance {
required uint32 image_length = 2;
};
message XImageResInstance {
required string response_json = 1;
};
message XImageResInstance { required string response_json = 1; };
// x-record format
message XRecordInstance {
......
//syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// syntax="proto2";
package pds.ut;
message OpMessageData {
optional int32 a = 1 [default=33];
optional float b = 2 [default=4.4];
optional int32 a = 1 [ default = 33 ];
optional float b = 2 [ default = 4.4 ];
};
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "google/protobuf/descriptor.proto";
package pds;
extend google.protobuf.FieldOptions {
optional bool pack_on = 70000 [default=false];
optional bool pack_on = 70000 [ default = false ];
};
extend google.protobuf.ServiceOptions {
......@@ -11,6 +25,6 @@ extend google.protobuf.ServiceOptions {
};
message PaddleServiceOption {
optional bool generate_impl = 1 [default = false];
optional bool generate_stub = 2 [default = false];
optional bool generate_impl = 1 [ default = false ];
optional bool generate_stub = 2 [ default = false ];
};
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
package aialgs.data;
message Float32Tensor {
......
#include <boost/algorithm/string.hpp>
#include <boost/scoped_ptr.hpp>
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <list>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/compiler/plugin.h>
#include <google/protobuf/compiler/code_generator.h>
#include <google/protobuf/io/printer.h>
#include <google/protobuf/io/zero_copy_stream.h>
#include "boost/algorithm/string.hpp"
#include "boost/scoped_ptr.hpp"
#include "google/protobuf/compiler/code_generator.h"
#include "google/protobuf/compiler/plugin.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/io/printer.h"
#include "google/protobuf/io/zero_copy_stream.h"
#include "plugin/strutil.h"
#include "plugin/substitute.h"
#include "pds_option.pb.h"
#include "predictor/pds_option.pb.h"
using std::string;
using google::protobuf::Descriptor;
using google::protobuf::FileDescriptor;
......@@ -33,8 +47,8 @@ string full_class_name(const Descriptor* descriptor) {
}
return outer->full_name();
}
}
}
} // namespace protobuf
} // namespace google
string strip_proto(const string& filename) {
if (HasSuffixString(filename, ".protolevel")) {
return StripSuffixString(filename, ".protolevel");
......@@ -42,7 +56,7 @@ string strip_proto(const string& filename) {
return StripSuffixString(filename, ".proto");
}
}
void string_format(std::string& source) {
void string_format(std::string& source) { // NOLINT
size_t len = source.length();
std::string sep = "_";
for (int i = 0; i < len; i++) {
......@@ -70,9 +84,8 @@ bool valid_service_method(const std::vector<const MethodDescriptor*>& methods) {
return false;
}
class PdsCodeGenerator : public CodeGenerator {
public:
virtual bool Generate(
const FileDescriptor* file,
public:
virtual bool Generate(const FileDescriptor* file,
const string& parameter,
GeneratorContext* context,
std::string* error) const {
......@@ -85,8 +98,8 @@ public:
*error = "get descriptor failed";
return false;
}
pds::PaddleServiceOption options
= descriptor->options().GetExtension(pds::options);
pds::PaddleServiceOption options =
descriptor->options().GetExtension(pds::options);
bool generate_impl = options.generate_impl();
bool generate_stub = options.generate_stub();
if (!generate_impl && !generate_stub) {
......@@ -117,11 +130,11 @@ public:
if (generate_impl) {
// service scope
// namespace scope
boost::scoped_ptr<google::protobuf::io::ZeroCopyOutputStream>
output(context->OpenForInsert(header, "namespace_scope"));
boost::scoped_ptr<google::protobuf::io::ZeroCopyOutputStream> output(
context->OpenForInsert(header, "namespace_scope"));
google::protobuf::io::Printer printer(output.get(), '$');
if (!generate_paddle_serving_head(&printer, descriptor, error,
service_name, class_name)) {
if (!generate_paddle_serving_head(
&printer, descriptor, error, service_name, class_name)) {
return false;
}
}
......@@ -133,38 +146,35 @@ public:
boost::scoped_ptr<google::protobuf::io::ZeroCopyOutputStream>
output(context->OpenForInsert(header, "namespace_scope"));
google::protobuf::io::Printer printer(output.get(), '$');
if (!generate_paddle_serving_stub_head(&printer, descriptor, error,
service_name, class_name)) {
if (!generate_paddle_serving_stub_head(
&printer, descriptor, error, service_name, class_name)) {
return false;
}
}
}
}
// xxx.pb.cc
{
if (generate_impl) {
// service scope
// namespace scope
boost::scoped_ptr<google::protobuf::io::ZeroCopyOutputStream>
output(context->OpenForInsert(body, "namespace_scope"));
boost::scoped_ptr<google::protobuf::io::ZeroCopyOutputStream> output(
context->OpenForInsert(body, "namespace_scope"));
google::protobuf::io::Printer printer(output.get(), '$');
if (!generate_paddle_serving_body(&printer, descriptor, error,
service_name, class_name)) {
if (!generate_paddle_serving_body(
&printer, descriptor, error, service_name, class_name)) {
return false;
}
}
if (generate_stub) {
// service class scope
{
}
// namespace scope
{} // namespace scope
{
boost::scoped_ptr<google::protobuf::io::ZeroCopyOutputStream>
output(context->OpenForInsert(body, "namespace_scope"));
google::protobuf::io::Printer printer(output.get(), '$');
if (!generate_paddle_serving_stub_body(&printer, descriptor, error,
service_name, class_name)) {
if (!generate_paddle_serving_stub_body(
&printer, descriptor, error, service_name, class_name)) {
return false;
}
}
......@@ -173,11 +183,11 @@ public:
}
return true;
}
private:
bool generate_paddle_serving_head(
google::protobuf::io::Printer* printer,
private:
bool generate_paddle_serving_head(google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
string* error,
const std::string& service_name,
const std::string& class_name) const {
std::vector<const MethodDescriptor*> methods;
......@@ -201,7 +211,10 @@ private:
" REGIST_FORMAT_SERVICE(\n"
" service_name, &$name$Impl::instance());\n"
" }\n\n",
"name", class_name, "variable_name", variable_name);
"name",
class_name,
"variable_name",
variable_name);
for (int i = 0; i < methods.size(); i++) {
const MethodDescriptor* m = methods[i];
printer->Print(
......@@ -209,19 +222,25 @@ private:
" const $input_name$* request,\n"
" $output_name$* response,\n"
" google::protobuf::Closure* done);\n\n",
"name", m->name(),
"input_name", google::protobuf::dots_to_colons(m->input_type()->full_name()),
"output_name", google::protobuf::dots_to_colons(m->output_type()->full_name()));
"name",
m->name(),
"input_name",
google::protobuf::dots_to_colons(m->input_type()->full_name()),
"output_name",
google::protobuf::dots_to_colons(m->output_type()->full_name()));
}
printer->Print(
" static $name$Impl _s_$variable_name$_impl;\n"
"};", "name", class_name, "variable_name", variable_name);
"};",
"name",
class_name,
"variable_name",
variable_name);
return true;
}
bool generate_paddle_serving_body(
google::protobuf::io::Printer* printer,
bool generate_paddle_serving_body(google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
string* error,
const std::string& service_name,
const std::string& class_name) const {
std::vector<const MethodDescriptor*> methods;
......@@ -236,9 +255,11 @@ private:
string_format(variable_name);
for (int i = 0; i < methods.size(); i++) {
const MethodDescriptor* m = methods[i];
printer->Print(
"void $name$Impl::$method$(\n",
"name", class_name, "method", m->name());
printer->Print("void $name$Impl::$method$(\n",
"name",
class_name,
"method",
m->name());
printer->Print(
" google::protobuf::RpcController* cntl_base,\n"
" const $input_name$* request,\n"
......@@ -247,36 +268,46 @@ private:
" struct timeval tv;\n"
" gettimeofday(&tv, NULL);"
" long start = tv.tv_sec * 1000000 + tv.tv_usec;",
"input_name", google::protobuf::dots_to_colons(m->input_type()->full_name()),
"output_name", google::protobuf::dots_to_colons(m->output_type()->full_name()));
"input_name",
google::protobuf::dots_to_colons(m->input_type()->full_name()),
"output_name",
google::protobuf::dots_to_colons(m->output_type()->full_name()));
if (m->name() == "inference") {
printer->Print(
" brpc::ClosureGuard done_guard(done);\n"
" brpc::Controller* cntl = \n"
" static_cast<brpc::Controller*>(cntl_base);\n"
" ::baidu::paddle_serving::predictor::InferService* svr = \n"
" ::baidu::paddle_serving::predictor::InferServiceManager::instance().item(\"$service$\");\n"
" "
"::baidu::paddle_serving::predictor::InferServiceManager::instance("
").item(\"$service$\");\n"
" if (svr == NULL) {\n"
" LOG(ERROR) << \"Not found service: $service$\";\n"
" cntl->SetFailed(404, \"Not found service: $service$\");\n"
" return ;\n"
" }\n"
" LOG(INFO) << \" remote_side=\[\" << cntl->remote_side() << \"\]\";\n"
" LOG(INFO) << \" local_side=\[\" << cntl->local_side() << \"\]\";\n"
" LOG(INFO) << \" service_name=\[\" << \"$name$\" << \"\]\";\n"
" LOG(INFO) << \" log_id=\[\" << cntl->log_id() << \"\]\";\n"
" LOG(INFO) << \" remote_side=\[\" << cntl->remote_side() << " // NOLINT
"\"\]\";\n"
" LOG(INFO) << \" local_side=\[\" << cntl->local_side() << " // NOLINT
"\"\]\";\n"
" LOG(INFO) << \" service_name=\[\" << \"$name$\" << \"\]\";\n" // NOLINT
" LOG(INFO) << \" log_id=\[\" << cntl->log_id() << \"\]\";\n" // NOLINT
" int err_code = svr->inference(request, response);\n"
" if (err_code != 0) {\n"
" LOG(WARNING)\n"
" << \"Failed call inferservice[$name$], name[$service$]\"\n"
" << \", error_code: \" << err_code;\n"
" cntl->SetFailed(err_code, \"InferService inference failed!\");\n"
" cntl->SetFailed(err_code, \"InferService inference "
"failed!\");\n"
" }\n"
" gettimeofday(&tv, NULL);\n"
" long end = tv.tv_sec * 1000000 + tv.tv_usec;\n"
" // flush notice log\n"
" LOG(INFO) << \" tc=\[\" << (end - start) << \"\]\";\n",
"name", class_name, "service", service_name);
" LOG(INFO) << \" tc=\[\" << (end - start) << \"\]\";\n", // NOLINT
"name",
class_name,
"service",
service_name);
}
if (m->name() == "debug") {
printer->Print(
......@@ -284,48 +315,59 @@ private:
" brpc::Controller* cntl = \n"
" static_cast<brpc::Controller*>(cntl_base);\n"
" ::baidu::paddle_serving::predictor::InferService* svr = \n"
" ::baidu::paddle_serving::predictor::InferServiceManager::instance().item(\"$service$\");\n"
" "
"::baidu::paddle_serving::predictor::InferServiceManager::instance("
").item(\"$service$\");\n"
" if (svr == NULL) {\n"
" LOG(ERROR) << \"Not found service: $service$\";\n"
" cntl->SetFailed(404, \"Not found service: $service$\");\n"
" return ;\n"
" }\n"
" LOG(INFO) << \" remote_side=\[\" << cntl->remote_side() << \"\]\";\n"
" LOG(INFO) << \" local_side=\[\" << cntl->local_side() << \"\]\";\n"
" LOG(INFO) << \" service_name=\[\" << \"$name$\" << \"\]\";\n"
" LOG(INFO) << \" log_id=\[\" << cntl->log_id() << \"\]\";\n"
" LOG(INFO) << \" remote_side=\[\" << cntl->remote_side() << " // NOLINT
"\"\]\";\n"
" LOG(INFO) << \" local_side=\[\" << cntl->local_side() << " // NOLINT
"\"\]\";\n"
" LOG(INFO) << \" service_name=\[\" << \"$name$\" << \"\]\";\n" // NOLINT
" LOG(INFO) << \" log_id=\[\" << cntl->log_id() << \"\]\";\n" // NOLINT
" butil::IOBufBuilder debug_os;\n"
" int err_code = svr->inference(request, response, &debug_os);\n"
" if (err_code != 0) {\n"
" LOG(WARNING)\n"
" << \"Failed call inferservice[$name$], name[$service$]\"\n"
" << \", error_code: \" << err_code;\n"
" cntl->SetFailed(err_code, \"InferService inference failed!\");\n"
" cntl->SetFailed(err_code, \"InferService inference "
"failed!\");\n"
" }\n"
" debug_os.move_to(cntl->response_attachment());\n"
" gettimeofday(&tv, NULL);\n"
" long end = tv.tv_sec * 1000000 + tv.tv_usec;\n"
" // flush notice log\n"
" LOG(INFO) << \" tc=\[\" << (end - start) << \"\]\";\n"
" LOG(INFO) << \" tc=\[\" << (end - start) << \"\]\";\n" // NOLINT
" LOG(INFO)\n"
" << \"TC=[\" << (end - start) << \"] Received debug request[log_id=\" << cntl->log_id()\n"
" << \"TC=[\" << (end - start) << \"] Received debug "
"request[log_id=\" << cntl->log_id()\n"
" << \"] from \" << cntl->remote_side()\n"
" << \" to \" << cntl->local_side();\n",
"name", class_name, "service", service_name);
"name",
class_name,
"service",
service_name);
}
printer->Print("}\n");
}
printer->Print(
"$name$Impl $name$Impl::_s_$variable_name$_impl(\"$service$\");\n",
"name", class_name,
"variable_name", variable_name,
"service", service_name);
"name",
class_name,
"variable_name",
variable_name,
"service",
service_name);
return true;
}
bool generate_paddle_serving_stub_head(
google::protobuf::io::Printer* printer,
bool generate_paddle_serving_stub_head(google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
string* error,
const std::string& service_name,
const std::string& class_name) const {
printer->Print(
......@@ -333,13 +375,18 @@ private:
"private:\n"
" uint32_t _package_size;\n"
" baidu::paddle_serving::sdk_cpp::Stub* _stub_handler;\n"
"public:\n", "name", class_name);
"public:\n",
"name",
class_name);
printer->Indent();
printer->Print(
"$name$_StubCallMapper(uint32_t package_size, baidu::paddle_serving::sdk_cpp::Stub* stub) {\n"
"$name$_StubCallMapper(uint32_t package_size, "
"baidu::paddle_serving::sdk_cpp::Stub* stub) {\n"
" _package_size = package_size;\n"
" _stub_handler = stub;\n"
"}\n", "name", class_name);
"}\n",
"name",
class_name);
printer->Print(
"brpc::SubCall default_map(\n"
......@@ -347,18 +394,19 @@ private:
" const google::protobuf::MethodDescriptor* method,\n"
" const google::protobuf::Message* request,\n"
" google::protobuf::Message* response) {\n"
" baidu::paddle_serving::sdk_cpp::TracePackScope scope(\"default_map\", channel_index);",
"name", class_name);
" baidu::paddle_serving::sdk_cpp::TracePackScope "
"scope(\"default_map\", channel_index);",
"name",
class_name);
printer->Indent();
if (!generate_paddle_serving_stub_default_map(printer, descriptor, error,
service_name, class_name)) {
if (!generate_paddle_serving_stub_default_map(
printer, descriptor, error, service_name, class_name)) {
return false;
}
printer->Outdent();
printer->Print(
"}\n");
printer->Print("}\n");
printer->Print(
"brpc::SubCall sub_package_map(\n"
......@@ -366,8 +414,10 @@ private:
" const google::protobuf::MethodDescriptor* method,\n"
" const google::protobuf::Message* request,\n"
" google::protobuf::Message* response) {\n"
" baidu::paddle_serving::sdk_cpp::TracePackScope scope(\"sub_map\", channel_index);",
"name", class_name);
" baidu::paddle_serving::sdk_cpp::TracePackScope scope(\"sub_map\", "
"channel_index);",
"name",
class_name);
printer->Indent();
std::vector<const FieldDescriptor*> in_shared_fields;
......@@ -395,13 +445,17 @@ private:
}
}
if (!generate_paddle_serving_stub_package_map(printer, descriptor, error,
service_name, class_name, in_shared_fields, in_item_fields)) {
if (!generate_paddle_serving_stub_package_map(printer,
descriptor,
error,
service_name,
class_name,
in_shared_fields,
in_item_fields)) {
return false;
}
printer->Outdent();
printer->Print(
"}\n");
printer->Print("}\n");
printer->Print(
"brpc::SubCall Map(\n"
......@@ -409,7 +463,8 @@ private:
" const google::protobuf::MethodDescriptor* method,\n"
" const google::protobuf::Message* request,\n"
" google::protobuf::Message* response) {\n",
"name", class_name);
"name",
class_name);
printer->Indent();
if (in_item_fields.size() <= 0) {
......@@ -423,7 +478,8 @@ private:
"if (_package_size == 0) {\n"
" ret = default_map(channel_index, method, request, response);\n"
"} else {\n"
" ret = sub_package_map(channel_index, method, request, response);\n"
" ret = sub_package_map(channel_index, method, request, "
"response);\n"
"}\n"
"tt.stop();\n"
"if (ret.flags != brpc::SKIP_SUB_CHANNEL && ret.method != NULL) {\n"
......@@ -444,49 +500,57 @@ private:
"private:\n"
" uint32_t _package_size;\n"
" baidu::paddle_serving::sdk_cpp::Stub* _stub_handler;\n"
"public:\n", "name", class_name);
"public:\n",
"name",
class_name);
printer->Indent();
printer->Print(
"$name$_StubResponseMerger(uint32_t package_size, baidu::paddle_serving::sdk_cpp::Stub* stub) {\n"
"$name$_StubResponseMerger(uint32_t package_size, "
"baidu::paddle_serving::sdk_cpp::Stub* stub) {\n"
" _package_size = package_size;\n"
" _stub_handler = stub;\n"
"}\n", "name", class_name);
"}\n",
"name",
class_name);
printer->Print(
"brpc::ResponseMerger::Result default_merge(\n"
" google::protobuf::Message* response,\n"
" const google::protobuf::Message* sub_response) {\n"
" baidu::paddle_serving::sdk_cpp::TracePackScope scope(\"default_merge\");",
"name", class_name);
" baidu::paddle_serving::sdk_cpp::TracePackScope "
"scope(\"default_merge\");",
"name",
class_name);
printer->Indent();
if (!generate_paddle_serving_stub_default_merger(printer, descriptor, error,
service_name, class_name)) {
if (!generate_paddle_serving_stub_default_merger(
printer, descriptor, error, service_name, class_name)) {
return false;
}
printer->Outdent();
printer->Print(
"}\n");
printer->Print("}\n");
printer->Print(
"brpc::ResponseMerger::Result sub_package_merge(\n"
" google::protobuf::Message* response,\n"
" const google::protobuf::Message* sub_response) {\n"
" baidu::paddle_serving::sdk_cpp::TracePackScope scope(\"sub_merge\");",
"name", class_name);
" baidu::paddle_serving::sdk_cpp::TracePackScope "
"scope(\"sub_merge\");",
"name",
class_name);
printer->Indent();
if (!generate_paddle_serving_stub_package_merger(printer, descriptor, error,
service_name, class_name)) {
if (!generate_paddle_serving_stub_package_merger(
printer, descriptor, error, service_name, class_name)) {
return false;
}
printer->Outdent();
printer->Print(
"}\n");
printer->Print("}\n");
printer->Print(
"brpc::ResponseMerger::Result Merge(\n"
" google::protobuf::Message* response,\n"
" const google::protobuf::Message* sub_response) {\n",
"name", class_name);
"name",
class_name);
printer->Indent();
printer->Print(
"butil::Timer tt(butil::Timer::STARTED);\n"
......@@ -511,7 +575,7 @@ private:
bool generate_paddle_serving_stub_default_map(
google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
string* error,
const std::string& service_name,
const std::string& class_name) const {
printer->Print(
......@@ -519,7 +583,8 @@ private:
" return brpc::SubCall::Skip();\n"
"}\n");
printer->Print(
"google::protobuf::Message* cur_res = _stub_handler->fetch_response();\n"
"google::protobuf::Message* cur_res = "
"_stub_handler->fetch_response();\n"
"if (cur_res == NULL) {\n"
" LOG(INFO) << \"Failed fetch response from stub handler, new it\";\n"
" cur_res = response->New();\n"
......@@ -528,19 +593,18 @@ private:
" _stub_handler->update_average(1, \"pack_fail\");\n"
" return brpc::SubCall::Bad();\n"
" }\n"
" return brpc::SubCall(method, request, cur_res, brpc::DELETE_RESPONSE);\n"
" return brpc::SubCall(method, request, cur_res, "
"brpc::DELETE_RESPONSE);\n"
"}\n");
"LOG(INFO) \n"
" << \"[default] Succ map, channel_index: \" << channel_index;\n";
printer->Print(
"return brpc::SubCall(method, request, cur_res, 0);\n"
);
printer->Print("return brpc::SubCall(method, request, cur_res, 0);\n");
return true;
}
bool generate_paddle_serving_stub_default_merger(
google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
string* error,
const std::string& service_name,
const std::string& class_name) const {
printer->Print(
......@@ -557,11 +621,11 @@ private:
bool generate_paddle_serving_stub_package_map(
google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
string* error,
const std::string& service_name,
const std::string& class_name,
std::vector<const FieldDescriptor*>& in_shared_fields,
std::vector<const FieldDescriptor*>& in_item_fields) const {
std::vector<const FieldDescriptor*>& in_shared_fields, // NOLINT
std::vector<const FieldDescriptor*>& in_item_fields) const { // NOLINT
const MethodDescriptor* md = descriptor->FindMethodByName("inference");
if (!md) {
*error = "not found inference method!";
......@@ -572,10 +636,8 @@ private:
"const $req_type$* req \n"
" = dynamic_cast<const $req_type$*>(request);\n"
"$req_type$* sub_req = NULL;",
"req_type", google::protobuf::dots_to_colons(
md->input_type()->full_name()));
"req_type",
google::protobuf::dots_to_colons(md->input_type()->full_name()));
// 1. pack fields 逐字段计算index范围,并从req copy值sub_req
printer->Print("\n// 1. 样本字段(必须为repeated类型)按指定下标复制\n");
......@@ -588,7 +650,9 @@ private:
"uint32_t total_size = req->$field_name$_size();\n"
"if (channel_index == 0) {\n"
" _stub_handler->update_average(total_size, \"item_size\");\n"
"}\n", "field_name", field_name);
"}\n",
"field_name",
field_name);
printer->Print(
"int start = _package_size * channel_index;\n"
......@@ -601,36 +665,44 @@ private:
"}\n");
printer->Print(
"sub_req = dynamic_cast<$req_type$*>(_stub_handler->fetch_request());\n"
"sub_req = "
"dynamic_cast<$req_type$*>(_stub_handler->fetch_request());\n"
"if (sub_req == NULL) {\n"
" LOG(ERROR) << \"failed fetch sub_req from stub.\";\n"
" _stub_handler->update_average(1, \"pack_fail\");\n"
" return brpc::SubCall::Bad();\n"
"}\n",
"name", class_name, "req_type", google::protobuf::dots_to_colons(
md->input_type()->full_name()));
"name",
class_name,
"req_type",
google::protobuf::dots_to_colons(md->input_type()->full_name()));
} else {
printer->Print(
"if (req->$field_name$_size() != total_size) {\n"
" LOG(ERROR) << \"pack field size not consistency: \"\n"
" << total_size << \"!=\" << req->$field_name$_size()\n"
" << total_size << \"!=\" << "
"req->$field_name$_size()\n"
" << \", field: $field_name$.\";\n"
" _stub_handler->update_average(1, \"pack_fail\");\n"
" return brpc::SubCall::Bad();\n"
"}\n", "field_name", field_name);
"}\n",
"field_name",
field_name);
}
printer->Print("for (uint32_t i = start; i < end; ++i) {\n");
printer->Indent();
if (fd->cpp_type() == google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE) {
if (fd->cpp_type() ==
google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE) {
printer->Print(
"sub_req->add_$field_name$()->CopyFrom(req->$field_name$(i));\n",
"field_name", field_name);
"field_name",
field_name);
} else {
printer->Print(
"sub_req->add_$field_name$(req->$field_name$(i));\n",
"field_name", field_name);
printer->Print("sub_req->add_$field_name$(req->$field_name$(i));\n",
"field_name",
field_name);
}
printer->Outdent();
printer->Print("}\n");
......@@ -641,14 +713,16 @@ private:
if (in_item_fields.size() == 0) {
printer->Print(
"if (sub_req == NULL) { // no packed items\n"
" sub_req = dynamic_cast<$req_type$*>(_stub_handler->fetch_request());\n"
" sub_req = "
"dynamic_cast<$req_type$*>(_stub_handler->fetch_request());\n"
" if (!sub_req) {\n"
" LOG(ERROR) << \"failed fetch sub_req from stub handler.\";\n"
" _stub_handler->update_average(1, \"pack_fail\");\n"
" return brpc::SubCall::Bad();\n"
" }\n"
"}\n", "req_type", google::protobuf::dots_to_colons(
md->input_type()->full_name()));
"}\n",
"req_type",
google::protobuf::dots_to_colons(md->input_type()->full_name()));
}
for (uint32_t si = 0; si < in_shared_fields.size(); si++) {
const FieldDescriptor* fd = in_shared_fields[si];
......@@ -659,14 +733,17 @@ private:
"if (req->has_$field_name$()) {\n", "field_name", field_name);
printer->Indent();
}
if (fd->cpp_type() == google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE || fd->is_repeated()) {
if (fd->cpp_type() ==
google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE ||
fd->is_repeated()) {
printer->Print(
"sub_req->mutable_$field_name$()->CopyFrom(req->$field_name$());\n",
"field_name", field_name);
"field_name",
field_name);
} else {
printer->Print(
"sub_req->set_$field_name$(req->$field_name$());\n",
"field_name", field_name);
printer->Print("sub_req->set_$field_name$(req->$field_name$());\n",
"field_name",
field_name);
}
if (fd->is_optional()) {
printer->Outdent();
......@@ -679,7 +756,8 @@ private:
" << \"[pack] Succ map req at: \"\n"
" << channel_index;\n");
printer->Print(
"google::protobuf::Message* sub_res = _stub_handler->fetch_response();\n"
"google::protobuf::Message* sub_res = "
"_stub_handler->fetch_response();\n"
"if (sub_res == NULL) {\n"
" LOG(ERROR) << \"failed create sub_res from res.\";\n"
" _stub_handler->update_average(1, \"pack_fail\");\n"
......@@ -691,16 +769,15 @@ private:
bool generate_paddle_serving_stub_package_merger(
google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
string* error,
const std::string& service_name,
const std::string& class_name) const {
return generate_paddle_serving_stub_default_merger(
printer, descriptor, error, service_name, class_name);
}
bool generate_paddle_serving_stub_body(
google::protobuf::io::Printer* printer,
bool generate_paddle_serving_stub_body(google::protobuf::io::Printer* printer,
const ServiceDescriptor* descriptor,
string *error,
string* error,
const std::string& service_name,
const std::string& class_name) const {
std::vector<const MethodDescriptor*> methods;
......@@ -715,8 +792,10 @@ private:
const MethodDescriptor* md = methods[0];
std::map<string, string> variables;
variables["name"] = class_name;
variables["req_type"] = google::protobuf::dots_to_colons(md->input_type()->full_name());
variables["res_type"] = google::protobuf::dots_to_colons(md->output_type()->full_name());
variables["req_type"] =
google::protobuf::dots_to_colons(md->input_type()->full_name());
variables["res_type"] =
google::protobuf::dots_to_colons(md->output_type()->full_name());
variables["fullname"] = descriptor->full_name();
printer->Print(variables,
"REGIST_STUB_OBJECT_WITH_TAG(\n"
......@@ -733,4 +812,4 @@ private:
int main(int argc, char** argv) {
PdsCodeGenerator generator;
return google::protobuf::compiler::PluginMain(argc, argv, &generator);
};
}
#include <sys/types.h>
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <iostream>
#include <fstream>
#include <bthread/unstable.h> // bthread_set_worker_startfn
#include <fstream>
#include <iostream>
#include "butil/logging.h"
#include "common/constant.h"
#include "common/inner_common.h"
#include "framework/workflow.h"
#include "framework/service.h"
#include "framework/manager.h"
#include "framework/server.h"
#include "butil/logging.h"
#include "framework/resource.h"
#include "common/constant.h"
#include "framework/server.h"
#include "framework/service.h"
#include "framework/workflow.h"
using baidu::paddle_serving::predictor::ServerManager;
using baidu::paddle_serving::predictor::WorkflowManager;
......@@ -50,13 +64,13 @@ DEFINE_bool(V, false, "print version, bool");
DEFINE_bool(g, false, "user defined gflag path");
DECLARE_string(flagfile);
void pthread_worker_start_fn() {
Resource::instance().thread_initialize();
}
void pthread_worker_start_fn() { Resource::instance().thread_initialize(); }
static void g_change_server_port() {
InferServiceConf conf;
if (read_proto_conf(FLAGS_inferservice_path.c_str(), FLAGS_inferservice_file.c_str(), &conf) != 0) {
if (read_proto_conf(FLAGS_inferservice_path.c_str(),
FLAGS_inferservice_file.c_str(),
&conf) != 0) {
LOG(WARNING) << "failed to load configure[" << FLAGS_inferservice_path
<< "," << FLAGS_inferservice_file << "].";
return;
......@@ -65,7 +79,8 @@ static void g_change_server_port() {
if (port != 0) {
FLAGS_port = port;
LOG(INFO) << "use configure[" << FLAGS_inferservice_path << "/"
<< FLAGS_inferservice_file << "] port[" << port << "] instead of flags";
<< FLAGS_inferservice_file << "] port[" << port
<< "] instead of flags";
}
return;
}
......@@ -109,17 +124,17 @@ int main(int argc, char** argv) {
LOG(INFO) << "Succ initialize logger";
// initialize resource manager
if (Resource::instance().initialize(
FLAGS_resource_path, FLAGS_resource_file) != 0) {
LOG(ERROR) << "Failed initialize resource, conf:"
<< FLAGS_resource_path << "/" << FLAGS_resource_file;
if (Resource::instance().initialize(FLAGS_resource_path,
FLAGS_resource_file) != 0) {
LOG(ERROR) << "Failed initialize resource, conf:" << FLAGS_resource_path
<< "/" << FLAGS_resource_file;
return -1;
}
LOG(INFO) << "Succ initialize resource";
// initialize workflow manager
if (WorkflowManager::instance().initialize(
FLAGS_workflow_path, FLAGS_workflow_file) != 0) {
if (WorkflowManager::instance().initialize(FLAGS_workflow_path,
FLAGS_workflow_file) != 0) {
LOG(ERROR) << "Failed initialize workflow manager, conf:"
<< FLAGS_workflow_path << "/" << FLAGS_workflow_file;
return -1;
......@@ -129,8 +144,7 @@ int main(int argc, char** argv) {
// initialize service manager
if (InferServiceManager::instance().initialize(
FLAGS_inferservice_path, FLAGS_inferservice_file) != 0) {
LOG(ERROR)
<< "Failed initialize infer service manager, conf:"
LOG(ERROR) << "Failed initialize infer service manager, conf:"
<< FLAGS_inferservice_path << "/" << FLAGS_inferservice_file;
return -1;
}
......@@ -138,7 +152,8 @@ int main(int argc, char** argv) {
int errcode = bthread_set_worker_startfn(pthread_worker_start_fn);
if (errcode != 0) {
LOG(ERROR) << "Failed call pthread worker start function, error_code[" << errcode << "]";
LOG(ERROR) << "Failed call pthread worker start function, error_code["
<< errcode << "]";
return -1;
}
LOG(INFO) << "Succ call pthread worker start function";
......
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file test_bsf.cpp
* @author root(com@baidu.com)
* @date 2018/09/20 13:54:52
* @brief
*
**/
#include "test_bsf.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "predictor/unittest/test_bsf.h"
#include <vector>
namespace baidu {
namespace paddle_serving {
......@@ -28,7 +29,8 @@ void TestItem::auto_gen() {
printf("id:%d,text:%s\n", id, text.c_str());
}
void work(const std::vector<TestItem>& in, std::vector<TestItem>& out) {
void work(const std::vector<TestItem>& in,
std::vector<TestItem>& out) { // NOLINT
for (size_t i = 0; i < in.size(); ++i) {
out[i] = in[i];
usleep(50);
......@@ -38,9 +40,12 @@ void work(const std::vector<TestItem>& in, std::vector<TestItem>& out) {
TEST_F(TestBsf, test_single_thread) {
// initialize TaskExecutor
global_id.store(0, butil::memory_order_relaxed);
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->set_thread_callback_fn(
boost::bind(&work, _1, _2));
EXPECT_EQ((im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->start(1)), 0);
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem>>::instance()
->set_thread_callback_fn(boost::bind(&work, _1, _2));
EXPECT_EQ(
(im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem>>::instance()
->start(1)),
0);
std::vector<TestItem> in;
std::vector<TestItem> out;
......@@ -62,19 +67,24 @@ TEST_F(TestBsf, test_single_thread) {
EXPECT_STREQ(temp, out[i].text.c_str());
}
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->stop();
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem>>::instance()->stop();
}
TEST_F(TestBsf, test_multi_thread) {
// initialize TaskExecutor
global_id.store(0, butil::memory_order_relaxed);
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->set_thread_callback_fn(
boost::bind(&work, _1, _2));
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->set_batch_size(100);
EXPECT_EQ((im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->start(3)), 0);
size_t psize = 5;
pthread_t pid[psize];
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem>>::instance()
->set_thread_callback_fn(boost::bind(&work, _1, _2));
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem>>::instance()
->set_batch_size(100);
EXPECT_EQ(
(im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem>>::instance()
->start(3)),
0);
const size_t psize = 5;
std::unique_ptr<pthread_t*> pid;
pid.reset(new pthread_t[psize]);
for (size_t i = 0; i < psize; ++i) {
pthread_create(&pid[i], NULL, &TestBsf::task_trigger, NULL);
}
......@@ -83,11 +93,10 @@ TEST_F(TestBsf, test_multi_thread) {
pthread_join(pid[i], NULL);
}
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem> >::instance()->stop();
}
}
}
im::bsf::TaskExecutor<im::bsf::Task<TestItem, TestItem>>::instance()->stop();
}
} // namespace unittest
} // namespace paddle_serving
} // namespace baidu
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file test_bsf.h
* @author root(com@baidu.com)
* @date 2018/09/20 13:53:01
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TEST_BSF_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TEST_BSF_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include "common/inner_common.h"
#include "framework/bsf.h"
......@@ -29,14 +29,15 @@ namespace unittest {
void TearDown() {}
struct TestItem {
void auto_gen();
bool operator==(const TestItem& other) {
return text == other.text && id == other.id;
}
static void create(std::vector<TestItem>& in, std::vector<TestItem>& out, size_t size) {
static void create(std::vector<TestItem>& in, // NOLINT
std::vector<TestItem>& out, // NOLINT
size_t size) {
in.clear();
out.clear();
for (size_t i = 0; i < size; i++) {
......@@ -53,7 +54,7 @@ struct TestItem {
};
class TestBsf : public ::testing::Test {
public:
public:
TestBsf() {}
virtual ~TestBsf() {}
......@@ -62,7 +63,7 @@ public:
std::vector<TestItem> in;
std::vector<TestItem> out;
size_t count = rand() % 10 + 1;
size_t count = rand_r() % 10 + 1;
TestItem::create(in, out, count);
im::bsf::TaskManager<TestItem, TestItem> task_manager;
......@@ -86,11 +87,6 @@ public:
#undef DEFINE_UP_DOWN
#endif
}
}
}
#endif //BAIDU_PADDLE_SERVING_PREDICTOR_TEST_FTL_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace unittest
} // namespace paddle_serving
} // namespace baidu
#include "common/inner_common.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "common/inner_common.h"
int main(int argc, char** argv) {
LOG(INFO) << "Start running all ut cases...";
......
#include "test_tool.h"
#include "test_manager.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "predictor/unittest/test_manager.h"
#include "framework/manager.h"
#include "framework/service.h"
#include "predictor/unittest/test_tool.h"
namespace baidu {
namespace paddle_serving {
......@@ -17,13 +31,9 @@ struct ManagerItem {
int a;
float b;
int init(const comcfg::ConfigUnit& c) {
return 0;
}
int init(const comcfg::ConfigUnit& c) { return 0; }
static const char* tag() {
return "Item";
}
static const char* tag() { return "Item"; }
};
TEST_F(TestManager, test_manager_instance) {
......@@ -48,12 +58,12 @@ TEST_F(TestManager, test_infer_service_create) {
TEST_F(TestManager, test_conf_success) {
const char* conf_content =
"[@Item]\n\
name: item1\n\
a:b\n\
[@Item]\n\
name: item2\n\
c:d";
"[@Item]\n"
"name: item1\n"
"a:b\n"
"[@Item]\n"
"name: item2\n"
"c:d";
AutoTempFile file(conf_content);
......@@ -71,12 +81,12 @@ TEST_F(TestManager, test_conf_success) {
TEST_F(TestManager, test_conf_success_item_not_found) {
const char* conf_content =
"[@Item1]\n\
name: item1\n\
a:b\n\
[@Item2]\n\
name: item2\n\
c:d";
"[@Item1]\n"
"name: item1\n"
"a:b\n"
"[@Item2]\n"
"name: item2\n"
"c:d";
AutoTempFile file(conf_content);
......@@ -86,19 +96,18 @@ TEST_F(TestManager, test_conf_success_item_not_found) {
TEST_F(TestManager, test_conf_failed_name_not_found) {
const char* conf_content =
"[@Item]\n\
name2: item1\n\
a:b\n\
[@Item]\n\
name: item2\n\
c:d";
"[@Item]\n"
"name2: item1\n"
"a:b\n"
"[@Item]\n"
"name: item2\n"
"c:d";
AutoTempFile file(conf_content);
typedef Manager<ManagerItem> mgr;
EXPECT_EQ(mgr::instance().initialize("./", file.name()), -1);
}
}
}
}
} // namespace unittest
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TEST_MANAGER_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TEST_MANAGER_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <gtest/gtest.h>
namespace baidu {
......@@ -10,10 +22,10 @@ namespace unittest {
#ifndef DEFINE_UP_DOWN
#define DEFINE_UP_DOWN \
void SetUp() {} \
void TearDown() {} \
void TearDown() {}
class TestManager : public ::testing::Test {
public:
public:
TestManager() {}
virtual ~TestManager() {}
......@@ -22,9 +34,6 @@ public:
#undef DEFINE_UP_DOWN
#endif
}
}
}
#endif
} // namespace unittest
} // namespace paddle_serving
} // namespace baidu
#include "test_tool.h"
#include "test_message_op.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "predictor/unittest/test_message_op.h"
#include <string>
#include "framework/dag.h"
#include "framework/manager.h"
#include "framework/service.h"
#include "framework/dag.h"
#include "predictor/unittest/test_tool.h"
namespace baidu {
namespace paddle_serving {
......@@ -51,7 +66,6 @@ TEST_F(TestMSGOP, test_init) {
// Message OP can obtain data via message()
EXPECT_EQ(ab, chn->message());
}
}
}
}
} // namespace unittest
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TEST_MESSAGE_OP_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TEST_MESSAGE_OP_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <gtest/gtest.h>
#include "op/op.h"
#include "msg_data.pb.h"
#include "framework/channel.h"
#include "op/op.h"
#include "predictor/msg_data.pb.h"
namespace baidu {
namespace paddle_serving {
......@@ -12,8 +24,7 @@ namespace unittest {
class MsgOP : public baidu::paddle_serving::predictor::OpWithChannel<
pds::ut::OpMessageData> {
public:
public:
int inference() {
pds::ut::OpMessageData* msg = mutable_data<pds::ut::OpMessageData>();
msg->set_a(11);
......@@ -25,10 +36,10 @@ public:
#ifndef DEFINE_UP_DOWN
#define DEFINE_UP_DOWN \
void SetUp() {} \
void TearDown() {} \
void TearDown() {}
class TestMSGOP : public ::testing::Test {
public:
public:
TestMSGOP() {}
virtual ~TestMSGOP() {}
......@@ -37,9 +48,6 @@ public:
#undef DEFINE_UP_DOWN
#endif
}
}
}
#endif
} // namespace unittest
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <butil/files/temp_file.h>
#include "framework/manager.h"
#include "framework/service.h"
#include <string>
#include <vector>
#include "framework/dag.h"
#include "framework/dag_view.h"
#include "framework/manager.h"
#include "framework/service.h"
#include "test_tool.h"
#include "test_op.h"
#include "test_message_op.h"
#include "predictor/unittest/test_message_op.h"
#include "predictor/unittest/test_op.h"
#include "predictor/unittest/test_tool.h"
namespace baidu {
namespace paddle_serving {
......@@ -63,30 +79,30 @@ TEST_F(TestOP, test_depend_argment) {
Dag dag;
AutoTempFile file(
"[@Node]\n\
name: node1\n\
type: ABOP\n\
[@Node]\n\
name: node2\n\
type: ABOP\n\
[.@Depend]\n\
name: node1\n\
mode: RO\n\
[@Node]\n\
name: node3\n\
type: ABOP\n\
[.@Depend]\n\
name: node1\n\
mode: RO\n\
[@Node]\n\
name: node4\n\
type: ABOP\n\
[.@Depend]\n\
name: node2\n\
mode: RW\n\
[.@Depend]\n\
name: node3\n\
mode: RO");
"[@Node]\n"
"name: node1\n"
"type: ABOP\n"
"[@Node]\n"
"name: node2\n"
"type: ABOP\n"
"[.@Depend]\n"
"name: node1\n"
"mode: RO\n"
"[@Node]\n"
"name: node3\n"
"type: ABOP\n"
"[.@Depend]\n"
"name: node1\n"
"mode: RO\n"
"[@Node]\n"
"name: node4\n"
"type: ABOP\n"
"[.@Depend]\n"
"name: node2\n"
"mode: RW\n"
"[.@Depend]\n"
"name: node3\n"
"mode: RO");
std::string dag_name = "DagTest";
EXPECT_EQ(0, dag.init("./", file.name(), dag_name));
......@@ -118,30 +134,30 @@ TEST_F(TestOP, test_inference) {
Dag dag;
AutoTempFile file(
"[@Node]\n\
name: node1\n\
type: ABOP\n\
[@Node]\n\
name: node2\n\
type: ABOP\n\
[.@Depend]\n\
name: node1\n\
mode: RO\n\
[@Node]\n\
name: node3\n\
type: ABOP\n\
[.@Depend]\n\
name: node1\n\
mode: RO\n\
[@Node]\n\
name: node4\n\
type: ABOP\n\
[.@Depend]\n\
name: node2\n\
mode: RW\n\
[.@Depend]\n\
name: node3\n\
mode: RO");
"[@Node]\n"
"name: node1\n"
"type: ABOP\n"
"[@Node]\n"
"name: node2\n"
"type: ABOP\n"
"[.@Depend]\n"
"name: node1\n"
"mode: RO\n"
"[@Node]\n"
"name: node3\n"
"type: ABOP\n"
"[.@Depend]\n"
"name: node1\n"
"mode: RO\n"
"[@Node]\n"
"name: node4\n"
"type: ABOP\n"
"[.@Depend]\n"
"name: node2\n"
"mode: RW\n"
"[.@Depend]\n"
"name: node3\n"
"mode: RO");
std::string dag_name = "DagTest";
EXPECT_EQ(0, dag.init("./", file.name(), dag_name));
......@@ -202,22 +218,19 @@ TEST_F(TestOP, test_inference) {
EXPECT_FALSE(NULL == op4.mutable_depend_channel("node2"));
EXPECT_TRUE(NULL == op4.mutable_depend_channel("node3"));
const AB* dop1
= op4.get_depend_argument<AB>("node1");
const AB* dop1 = op4.get_depend_argument<AB>("node1");
const AB* dop21
= op4.get_depend_argument<AB>("node2");
const google::protobuf::Message* dop22
= op4.get_depend_channel("node2")->message();
const google::protobuf::Message* dop23
= op4.get_depend_argument<google::protobuf::Message>("node2");
const AB* dop21 = op4.get_depend_argument<AB>("node2");
const google::protobuf::Message* dop22 =
op4.get_depend_channel("node2")->message();
const google::protobuf::Message* dop23 =
op4.get_depend_argument<google::protobuf::Message>("node2");
const OpMessageData* dop31
= op4.get_depend_argument<OpMessageData>("node3");
const google::protobuf::Message* dop32
= op4.get_depend_channel("node3")->message();
const google::protobuf::Message* dop33
= op4.get_depend_argument<google::protobuf::Message>("node3");
const OpMessageData* dop31 = op4.get_depend_argument<OpMessageData>("node3");
const google::protobuf::Message* dop32 =
op4.get_depend_channel("node3")->message();
const google::protobuf::Message* dop33 =
op4.get_depend_argument<google::protobuf::Message>("node3");
EXPECT_EQ(NULL, dop1);
......@@ -251,7 +264,9 @@ TEST_F(TestOP, test_op_with_channel_and_conf) {
"[@Node]\n"
"name: %s\n"
"type: OpWithConf\n"
"name_in_conf: %s\n", op_name.c_str(), name_in_conf.c_str());
"name_in_conf: %s\n",
op_name.c_str(),
name_in_conf.c_str());
std::string dag_name = "DagTest";
ASSERT_EQ(0, dag.init("./", dag_conf.fname(), dag_name));
......@@ -270,16 +285,13 @@ TEST_F(TestOP, test_op_with_channel_and_conf) {
OpWithConf* op = dynamic_cast<OpWithConf*>(vnode->op);
ASSERT_NE(NULL, op);
EXPECT_STREQ(op->name(), op_name.c_str());
EXPECT_STREQ(
op->get_self_config()->name_in_conf.c_str(),
EXPECT_STREQ(op->get_self_config()->name_in_conf.c_str(),
name_in_conf.c_str());
EXPECT_STREQ(
op->mutable_data<OpOutput>()->name_for_output.c_str(),
EXPECT_STREQ(op->mutable_data<OpOutput>()->name_for_output.c_str(),
name_in_conf.c_str());
}
}
}
}
}
}
} // namespace unittest
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TEST_OP_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TEST_OP_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <gtest/gtest.h>
#include "op/op.h"
#include <string>
#include "framework/channel.h"
#include "op/op.h"
namespace baidu {
namespace paddle_serving {
......@@ -31,7 +44,7 @@ struct AB {
};
class ABOP : public baidu::paddle_serving::predictor::OpWithChannel<AB> {
public:
public:
int inference() {
AB* ab = mutable_data<AB>();
ab->a = 1;
......@@ -53,9 +66,10 @@ struct OpOutput {
std::string ShortDebugString() const { return name_for_output; }
};
class OpWithConf : public baidu::paddle_serving::predictor::OpWithChannelAndConf<
OpOutput, OpConf> {
public:
class OpWithConf
: public baidu::paddle_serving::predictor::OpWithChannelAndConf<OpOutput,
OpConf> {
public:
DECLARE_OP(OpWithConf);
void* create_config(const comcfg::ConfigUnit& conf) {
OpConf* op_conf = new (std::nothrow) OpConf();
......@@ -82,10 +96,10 @@ DEFINE_OP(OpWithConf);
#ifndef DEFINE_UP_DOWN
#define DEFINE_UP_DOWN \
void SetUp() {} \
void TearDown() {} \
void TearDown() {}
class TestOP : public ::testing::Test {
public:
public:
TestOP() {}
virtual ~TestOP() {}
......@@ -94,9 +108,6 @@ public:
#undef DEFINE_UP_DOWN
#endif
}
}
}
#endif
} // namespace unittest
} // namespace paddle_serving
} // namespace baidu
#include "test_server_manager.h" // TestServerManager
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "predictor/unittest/test_server_manager.h" // TestServerManager
#include <gflags/gflags.h> // FLAGS
#include <string>
#include "framework/server.h" // ServerManager
namespace baidu {
......
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TEST_SERVER_MANAGER_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TEST_SERVER_MANAGER_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <gtest/gtest.h>
namespace baidu {
......@@ -8,13 +20,11 @@ namespace paddle_serving {
namespace unittest {
class TestServerManager : public ::testing::Test {
public:
void SetUp() { }
void TearDown() { }
public:
void SetUp() {}
void TearDown() {}
};
} // namespace unittest
} // namespace paddle_serving
} // namespace baidu
#endif // BAIDU_PADDLE_SERVING_PREDICTOR_TEST_SERVER_MANAGER_H
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_TEST_TOOL_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_TEST_TOOL_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#pragma once
#include <fcntl.h>
#include<unistd.h>
#include <sys/time.h>
#include <gtest/gtest.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#include <string>
namespace baidu {
namespace paddle_serving {
namespace unittest {
class AutoTempFile {
public:
AutoTempFile(const char* content) {
public:
explicit AutoTempFile(const char* content) {
_need_del = false;
_name = generate_temp_name();
FILE* fd = fopen(_name.c_str(), "w");
if (!fd) {
return ;
return;
}
fprintf(fd, "%s", content);
fclose(fd);
......@@ -33,11 +46,9 @@ public:
}
}
const char* name() {
return _name.c_str();
}
const char* name() { return _name.c_str(); }
private:
private:
std::string generate_temp_name() {
timeval tv;
srand(time(0));
......@@ -46,20 +57,17 @@ private:
oss << "uttest_temp_";
oss << tv.tv_sec * 1000 + tv.tv_usec / 1000;
oss << "_";
oss << (int)getpid();
oss << static_cast<int>(getpid());
oss << "_";
oss << rand();
oss << rand_r();
oss << ".conf";
return oss.str();
}
private:
private:
std::string _name;
bool _need_del;
};
}
}
}
#endif
} // namespace unittest
} // namespace paddle_serving
} // namespace baidu
......@@ -96,4 +96,3 @@ install(TARGETS int64tensor_format
${PADDLE_SERVING_INSTALL_DIR}/demo/client/int64tensor_format/bin)
install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/conf DESTINATION
${PADDLE_SERVING_INSTALL_DIR}/demo/client/int64tensor_format/)
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file demo.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 20:12:44
* @brief
*
**/
#include <sys/types.h>
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "common.h"
#include <fstream>
#include "predictor_sdk.h"
#include "dense_service.pb.h"
#include "builtin_format.pb.h"
#include "sdk-cpp/builtin_format.pb.h"
#include "sdk-cpp/dense_service.pb.h"
#include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/predictor_sdk.h"
using baidu::paddle_serving::sdk_cpp::Predictor;
using baidu::paddle_serving::sdk_cpp::PredictorApi;
......@@ -28,8 +29,8 @@ using baidu::paddle_serving::predictor::dense_service::Response;
using baidu::paddle_serving::predictor::format::DensePrediction;
using baidu::paddle_serving::predictor::format::DenseInstance;
int create_req(Request& req) {
DenseInstance *ins = req.mutable_instances()->Add();
int create_req(Request& req) { // NOLINT
DenseInstance* ins = req.mutable_instances()->Add();
ins->add_features(1.5);
ins->add_features(16.0);
ins->add_features(14.0);
......@@ -41,14 +42,12 @@ int create_req(Request& req) {
return 0;
}
void print_res(
const Request& req,
void print_res(const Request& req,
const Response& res,
std::string route_tag,
uint64_t elapse_ms) {
for (uint32_t i = 0; i < res.predictions_size(); ++i) {
const DensePrediction &prediction = res.predictions(i);
const DensePrediction& prediction = res.predictions(i);
std::ostringstream oss;
for (uint32_t j = 0; j < prediction.categories_size(); ++j) {
oss << prediction.categories(j) << " ";
......@@ -56,9 +55,8 @@ void print_res(
LOG(INFO) << "Receive result " << oss.str();
}
LOG(INFO)
<< "Succ call predictor[dense_format], the tag is: "
<< route_tag << ", elapse_ms: " << elapse_ms;
LOG(INFO) << "Succ call predictor[dense_format], the tag is: " << route_tag
<< ", elapse_ms: " << elapse_ms;
}
int main(int argc, char** argv) {
......@@ -109,8 +107,7 @@ int main(int argc, char** argv) {
butil::IOBufBuilder debug_os;
if (predictor->debug(&req, &res, &debug_os) != 0) {
LOG(ERROR) << "failed call predictor with req:"
<< req.ShortDebugString();
LOG(ERROR) << "failed call predictor with req:" << req.ShortDebugString();
return -1;
}
......@@ -121,14 +118,13 @@ int main(int argc, char** argv) {
timeval end;
gettimeofday(&end, NULL);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000)
- (start.tv_sec * 1000 + start.tv_usec / 1000);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000) -
(start.tv_sec * 1000 + start.tv_usec / 1000);
print_res(req, res, predictor->tag(), elapse_ms);
res.Clear();
usleep(50);
} // while (true)
api.thrd_finalize();
......
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file demo.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 20:12:44
* @brief
*
**/
#include <sys/types.h>
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "common.h"
#include <fstream>
#include "predictor_sdk.h"
#include "echo_service.pb.h"
#include "builtin_format.pb.h"
#include "sdk-cpp/builtin_format.pb.h"
#include "sdk-cpp/echo_service.pb.h"
#include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/predictor_sdk.h"
using baidu::paddle_serving::sdk_cpp::Predictor;
using baidu::paddle_serving::sdk_cpp::PredictorApi;
using baidu::paddle_serving::predictor::echo_service::RequestAndResponse;
int create_req(RequestAndResponse& req) {
int create_req(RequestAndResponse& req) { // NOLINT
req.set_a(1);
req.set_b(0.1);
return 0;
}
void print_res(
const RequestAndResponse& req,
void print_res(const RequestAndResponse& req,
const RequestAndResponse& res,
std::string route_tag,
uint64_t elapse_ms) {
LOG(INFO) << "Reqeive result: a = " << res.a() << ", b = " << res.b();
LOG(INFO)
<< "Succ call predictor[echo_service], the tag is: "
<< route_tag << ", elapse_ms: " << elapse_ms;
LOG(INFO) << "Succ call predictor[echo_service], the tag is: " << route_tag
<< ", elapse_ms: " << elapse_ms;
}
int main(int argc, char** argv) {
......@@ -91,8 +90,7 @@ int main(int argc, char** argv) {
butil::IOBufBuilder debug_os;
if (predictor->debug(&req, &res, &debug_os) != 0) {
LOG(ERROR) << "failed call predictor with req:"
<< req.ShortDebugString();
LOG(ERROR) << "failed call predictor with req:" << req.ShortDebugString();
return -1;
}
......@@ -103,14 +101,13 @@ int main(int argc, char** argv) {
timeval end;
gettimeofday(&end, NULL);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000)
- (start.tv_sec * 1000 + start.tv_usec / 1000);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000) -
(start.tv_sec * 1000 + start.tv_usec / 1000);
print_res(req, res, predictor->tag(), elapse_ms);
res.Clear();
usleep(50);
} // while (true)
api.thrd_finalize();
......
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file demo.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 20:12:44
* @brief
*
**/
#include <sys/types.h>
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "common.h"
#include <fstream>
#include "predictor_sdk.h"
#include "int64tensor_service.pb.h"
#include "builtin_format.pb.h"
#include "sdk-cpp/builtin_format.pb.h"
#include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/predictor_sdk.h"
#include "sdk-cpp/int64tensor_service.pb.h"
using baidu::paddle_serving::sdk_cpp::Predictor;
using baidu::paddle_serving::sdk_cpp::PredictorApi;
......@@ -28,8 +29,8 @@ using baidu::paddle_serving::predictor::int64tensor_service::Response;
using baidu::paddle_serving::predictor::format::Float32TensorPredictor;
using baidu::paddle_serving::predictor::format::Int64TensorInstance;
int create_req(Request& req) {
Int64TensorInstance *ins = req.mutable_instances()->Add();
int create_req(Request& req) { // NOLINT
Int64TensorInstance* ins = req.mutable_instances()->Add();
ins->add_data(1);
ins->add_data(2);
ins->add_data(3);
......@@ -48,14 +49,12 @@ int create_req(Request& req) {
return 0;
}
void print_res(
const Request& req,
void print_res(const Request& req,
const Response& res,
std::string route_tag,
uint64_t elapse_ms) {
for (uint32_t i = 0; i < res.predictions_size(); ++i) {
const Float32TensorPredictor &prediction = res.predictions(i);
const Float32TensorPredictor& prediction = res.predictions(i);
std::ostringstream oss1;
for (uint32_t j = 0; j < prediction.data_size(); ++j) {
oss1 << prediction.data(j) << " ";
......@@ -68,8 +67,7 @@ void print_res(
LOG(INFO) << "Receive result " << oss1.str() << ", shape " << oss2.str();
}
LOG(INFO)
<< "Succ call predictor[int64tensor_format], the tag is: "
LOG(INFO) << "Succ call predictor[int64tensor_format], the tag is: "
<< route_tag << ", elapse_ms: " << elapse_ms;
}
......@@ -121,8 +119,7 @@ int main(int argc, char** argv) {
butil::IOBufBuilder debug_os;
if (predictor->debug(&req, &res, &debug_os) != 0) {
LOG(ERROR) << "failed call predictor with req:"
<< req.ShortDebugString();
LOG(ERROR) << "failed call predictor with req:" << req.ShortDebugString();
return -1;
}
......@@ -133,14 +130,13 @@ int main(int argc, char** argv) {
timeval end;
gettimeofday(&end, NULL);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000)
- (start.tv_sec * 1000 + start.tv_usec / 1000);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000) -
(start.tv_sec * 1000 + start.tv_usec / 1000);
print_res(req, res, predictor->tag(), elapse_ms);
res.Clear();
usleep(50);
} // while (true)
api.thrd_finalize();
......
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file demo.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 20:12:44
* @brief
*
**/
#include <sys/types.h>
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "common.h"
#include <fstream>
#include "predictor_sdk.h"
#include "sparse_service.pb.h"
#include "builtin_format.pb.h"
#include "sdk-cpp/builtin_format.pb.h"
#include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/predictor_sdk.h"
#include "sdk-cpp/sparse_service.pb.h"
using baidu::paddle_serving::sdk_cpp::Predictor;
using baidu::paddle_serving::sdk_cpp::PredictorApi;
......@@ -28,8 +29,8 @@ using baidu::paddle_serving::predictor::sparse_service::Response;
using baidu::paddle_serving::predictor::format::SparsePrediction;
using baidu::paddle_serving::predictor::format::SparseInstance;
int create_req(Request& req) {
SparseInstance *ins = req.mutable_instances()->Add();
int create_req(Request& req) { // NOLINT
SparseInstance* ins = req.mutable_instances()->Add();
ins->add_keys(26);
ins->add_keys(182);
ins->add_keys(232);
......@@ -51,14 +52,12 @@ int create_req(Request& req) {
return 0;
}
void print_res(
const Request& req,
void print_res(const Request& req,
const Response& res,
std::string route_tag,
uint64_t elapse_ms) {
for (uint32_t i = 0; i < res.predictions_size(); ++i) {
const SparsePrediction &prediction = res.predictions(i);
const SparsePrediction& prediction = res.predictions(i);
std::ostringstream oss;
for (uint32_t j = 0; j < prediction.categories_size(); ++j) {
oss << prediction.categories(j) << " ";
......@@ -66,9 +65,8 @@ void print_res(
LOG(INFO) << "Receive result " << oss.str();
}
LOG(INFO)
<< "Succ call predictor[sparse_format], the tag is: "
<< route_tag << ", elapse_ms: " << elapse_ms;
LOG(INFO) << "Succ call predictor[sparse_format], the tag is: " << route_tag
<< ", elapse_ms: " << elapse_ms;
}
int main(int argc, char** argv) {
......@@ -119,8 +117,7 @@ int main(int argc, char** argv) {
butil::IOBufBuilder debug_os;
if (predictor->debug(&req, &res, &debug_os) != 0) {
LOG(ERROR) << "failed call predictor with req:"
<< req.ShortDebugString();
LOG(ERROR) << "failed call predictor with req:" << req.ShortDebugString();
return -1;
}
......@@ -131,14 +128,13 @@ int main(int argc, char** argv) {
timeval end;
gettimeofday(&end, NULL);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000)
- (start.tv_sec * 1000 + start.tv_usec / 1000);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000) -
(start.tv_sec * 1000 + start.tv_usec / 1000);
print_res(req, res, predictor->tag(), elapse_ms);
res.Clear();
usleep(50);
} // while (true)
api.thrd_finalize();
......
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file demo.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 20:12:44
* @brief
*
**/
#include <sys/types.h>
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "common.h"
#include <fstream>
#include "predictor_sdk.h"
#include "image_class.pb.h"
#include "builtin_format.pb.h"
#include "sdk-cpp/builtin_format.pb.h"
#include "sdk-cpp/image_class.pb.h"
#include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/predictor_sdk.h"
using baidu::paddle_serving::sdk_cpp::Predictor;
using baidu::paddle_serving::sdk_cpp::PredictorApi;
......@@ -28,7 +29,7 @@ using baidu::paddle_serving::predictor::format::DensePrediction;
using baidu::paddle_serving::predictor::image_classification::Request;
using baidu::paddle_serving::predictor::image_classification::Response;
int create_req(Request& req) {
int create_req(Request& req) { // NOLINT
static const char* TEST_IMAGE_PATH = "./data/images/what.jpg";
FILE* fp = fopen(TEST_IMAGE_PATH, "rb");
......@@ -39,7 +40,7 @@ int create_req(Request& req) {
fseek(fp, 0L, SEEK_END);
size_t isize = ftell(fp);
char* ibuf = new(std::nothrow) char[isize];
char* ibuf = new (std::nothrow) char[isize];
if (!ibuf) {
LOG(ERROR) << "Failed malloc image buffer";
fclose(fp);
......@@ -65,14 +66,11 @@ int create_req(Request& req) {
return 0;
}
void print_res(
const Request& req,
void print_res(const Request& req,
const Response& res,
std::string route_tag,
uint64_t elapse_ms) {
static const char* GT_TEXT_PATH
= "./data/images/groundtruth.txt";
static const char* GT_TEXT_PATH = "./data/images/groundtruth.txt";
std::vector<std::string> gt_labels;
std::ifstream file(GT_TEXT_PATH);
......@@ -91,7 +89,7 @@ void print_res(
butil::IOBufAsZeroCopyInputStream wrapper(buf);
if (!json2pb::JsonToProtoMessage(&wrapper, &json_msg, &err_string)) {
LOG(ERROR) << "Failed parse json from str:" << json;
return ;
return;
}
uint32_t csize = json_msg.categories_size();
......@@ -109,13 +107,12 @@ void print_res(
}
}
LOG(INFO) << "sample-" << si << "'s classify result: "
<< gt_labels[max_idx] << ", prop: " << max_prop;
LOG(INFO) << "sample-" << si << "'s classify result: " << gt_labels[max_idx]
<< ", prop: " << max_prop;
}
LOG(INFO)
<< "Succ call predictor[ximage], the tag is: "
<< route_tag << ", elapse_ms: " << elapse_ms;
LOG(INFO) << "Succ call predictor[ximage], the tag is: " << route_tag
<< ", elapse_ms: " << elapse_ms;
}
int main(int argc, char** argv) {
......@@ -166,8 +163,7 @@ int main(int argc, char** argv) {
butil::IOBufBuilder debug_os;
if (predictor->debug(&req, &res, &debug_os) != 0) {
LOG(ERROR) << "failed call predictor with req:"
<< req.ShortDebugString();
LOG(ERROR) << "failed call predictor with req:" << req.ShortDebugString();
return -1;
}
......@@ -178,14 +174,13 @@ int main(int argc, char** argv) {
timeval end;
gettimeofday(&end, NULL);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000)
- (start.tv_sec * 1000 + start.tv_usec / 1000);
uint64_t elapse_ms = (end.tv_sec * 1000 + end.tv_usec / 1000) -
(start.tv_sec * 1000 + start.tv_usec / 1000);
print_res(req, res, predictor->tag(), elapse_ms);
res.Clear();
usleep(50);
} // while (true)
api.thrd_finalize();
......
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file abtest.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/06 17:11:38
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_ABTEST_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_ABTEST_H
#include "stub.h"
#include "common.h"
#include "factory.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <google/protobuf/message.h>
#include <string>
#include <vector>
#include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/factory.h"
#include "sdk-cpp/include/stub.h"
namespace baidu {
namespace paddle_serving {
......@@ -27,26 +27,23 @@ namespace sdk_cpp {
class Stub;
class Variant;
static const std::string WEIGHT_SEPERATOR = "|";
static const char* WEIGHT_SEPERATOR = "|";
class EndpointRouterBase {
public:
public:
typedef std::vector<Variant*> VariantList;
virtual ~EndpointRouterBase() {}
virtual int initialize(
const google::protobuf::Message& conf) = 0;
virtual int initialize(const google::protobuf::Message& conf) = 0;
virtual Variant* route(const VariantList&) = 0;
virtual Variant* route(
const VariantList&,
const void*) = 0;
virtual Variant* route(const VariantList&, const void*) = 0;
};
class WeightedRandomRender : public EndpointRouterBase {
public:
public:
static int register_self() {
INLINE_REGIST_OBJECT(WeightedRandomRender, EndpointRouterBase, -1);
return 0;
......@@ -56,24 +53,17 @@ public:
~WeightedRandomRender() {}
int initialize(
const google::protobuf::Message& conf);
int initialize(const google::protobuf::Message& conf);
Variant* route(const VariantList&);
Variant* route(
const VariantList&,
const void*);
Variant* route(const VariantList&, const void*);
private:
private:
std::vector<uint32_t> _variant_weight_list;
uint32_t _normalized_sum;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_ABTEST_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file common.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 20:24:19
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_CPP_SDK_COMMON_H
#define BAIDU_PADDLE_SERVING_CPP_SDK_COMMON_H
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdint.h>
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <error.h>
#include <getopt.h>
#include <pthread.h>
#include <stdint.h>
#include <stdlib.h>
#include <strings.h>
#include <getopt.h>
#include <sys/types.h>
#include <unistd.h>
#include <exception>
#include <google/protobuf/message.h>
#include <boost/unordered_map.hpp>
#include <gflags/gflags.h>
#include <bvar/bvar.h>
#include <butil/logging.h>
#include <butil/time.h>
#include <butil/object_pool.h>
#include <brpc/channel.h>
#include <brpc/parallel_channel.h>
#include <brpc/traceprintf.h>
#include <bthread/bthread.h>
#include <error.h>
#include <json2pb/json_to_pb.h>
#include "sdk_configure.pb.h"
#include "configure_parser.h"
#include "boost/unordered_map.hpp"
#include "gflags/gflags.h"
#include "google/protobuf/message.h"
#include "utils.h"
#include "brpc/channel.h"
#include "brpc/parallel_channel.h"
#include "brpc/traceprintf.h"
#include "bthread/bthread.h"
#include "butil/logging.h"
#include "butil/object_pool.h"
#include "butil/time.h"
#include "bvar/bvar.h"
#include "json2pb/json_to_pb.h"
#endif //BAIDU_PADDLE_SERVING_CPP_SDK_COMMON_H
#include "configure/include/configure_parser.h"
#include "configure/sdk_configure.pb.h"
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
#include "sdk-cpp/include/utils.h"
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file config_manager.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 15:28:43
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_CONFIG_MANAGER_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_CONFIG_MANAGER_H
#include "common.h"
#include "endpoint_config.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <map>
#include <string>
#include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/endpoint_config.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
class EndpointConfigManager {
public:
public:
static EndpointConfigManager& instance() {
static EndpointConfigManager singleton;
return singleton;
}
EndpointConfigManager()
: _last_update_timestamp(0),
_current_endpointmap_id(1) {}
: _last_update_timestamp(0), _current_endpointmap_id(1) {}
int create(const char* path, const char* file);
int load();
bool need_reload() {
return false;
}
bool need_reload() { return false; }
int reload() {
if (!need_reload()) {
......@@ -52,33 +47,25 @@ public:
return load();
}
const std::map<std::string, EndpointInfo>& config() {
return _ep_map;
}
const std::map<std::string, EndpointInfo>& config() { return _ep_map; }
const std::map<std::string, EndpointInfo>& config() const {
return _ep_map;
}
const std::map<std::string, EndpointInfo>& config() const { return _ep_map; }
private:
int init_one_variant(
const configure::VariantConf& conf,
VariantInfo& var);
private:
int init_one_variant(const configure::VariantConf& conf,
VariantInfo& var); // NOLINT
int init_one_endpoint(
const configure::Predictor& conf,
EndpointInfo& ep,
int init_one_endpoint(const configure::Predictor& conf,
EndpointInfo& ep, // NOLINT
const VariantInfo& default_var);
int merge_variant(
const VariantInfo& default_var,
int merge_variant(const VariantInfo& default_var,
const configure::VariantConf& conf,
VariantInfo& merged_var);
VariantInfo& merged_var); // NOLINT
int parse_tag_values(
SplitParameters& split);
int parse_tag_values(SplitParameters& split); // NOLINT
private:
private:
std::map<std::string, EndpointInfo> _ep_map;
std::string _endpoint_config_path;
std::string _endpoint_config_file;
......@@ -86,10 +73,6 @@ private:
uint32_t _current_endpointmap_id;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_CONFIG_MANAGER_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/resource.h
* @author wanlijin(wanlijin01@baidu.com)
* @date 2018/07/06 17:06:25
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_ENDPOINT_H
#define BAIDU_PADDLE_SERVING_SDK_ENDPOINT_H
#include "common.h"
#include "endpoint_config.h"
#include "stub.h"
#include "variant.h"
#include "predictor.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/endpoint_config.h"
#include "sdk-cpp/include/predictor.h"
#include "sdk-cpp/include/stub.h"
#include "sdk-cpp/include/variant.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
class Endpoint {
friend class EndpointRouterBase;
public:
friend class EndpointRouterBase;
public:
virtual ~Endpoint() {}
Endpoint() {
_variant_list.clear();
}
Endpoint() { _variant_list.clear(); }
int initialize(const EndpointInfo& ep_info);
......@@ -49,26 +48,19 @@ public:
int ret_predictor(Predictor* predictor);
const std::string& endpoint_name() const {
return _endpoint_name;
}
const std::string& endpoint_name() const { return _endpoint_name; }
private:
int initialize_variant(
const VariantInfo& var_info,
private:
int initialize_variant(const VariantInfo& var_info,
const std::string& service,
const std::string& ep_name,
std::vector<Stub*>& stubs);
std::vector<Stub*>& stubs); // NOLINT
private:
private:
std::string _endpoint_name;
std::vector<Variant*> _variant_list;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPPRESOURCE_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file endpoint_config.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/12 15:17:56
* @brief
*
**/
#include "common.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stdint.h>
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_ENDPOINT_CONFIG_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_ENDPOINT_CONFIG_H
#include <string>
#include <vector>
#include "sdk-cpp/include/common.h"
namespace baidu {
namespace paddle_serving {
......@@ -28,30 +26,31 @@ namespace sdk_cpp {
do { \
if (conf.has_##name()) { \
item.set(conf.name()); \
} \
else { \
LOG(ERROR) << "Not found key in configue: " << #name;\
} else { \
LOG(ERROR) << "Not found key in configue: " << #name; \
} \
} while (0)
#define ASSIGN_CONF_ITEM(dest, src, fail) \
do { \
if (!src.init) { \
LOG(ERROR) << "Cannot assign an unintialized item: " \
<< #src << " to dest: " << #dest; \
LOG(ERROR) << "Cannot assign an unintialized item: " << #src \
<< " to dest: " << #dest; \
return fail; \
} \
dest = src.value; \
} while (0)
template<typename T> struct type_traits {
template <typename T>
struct type_traits {
static type_traits<T> tag;
};
template<typename T>
template <typename T>
type_traits<T> type_traits<T>::tag;
template<typename T> struct ConfigItem {
template <typename T>
struct ConfigItem {
T value;
bool init;
ConfigItem() : init(false) {}
......@@ -107,10 +106,6 @@ struct EndpointInfo {
void* ab_test;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_ENDPOINT_CONFIG_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/factory.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/10 22:09:57
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_FACTORY_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_FACTORY_H
#include "common.h"
#include "stub_impl.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <map>
#include <string>
#include <utility>
#include "glog/raw_logging.h"
#include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/stub_impl.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
#define INLINE_REGIST_OBJECT(D, B, E) \
do { \
Factory<D, B>* factory = \
new (std::nothrow) Factory<D, B>(); \
if (factory == NULL \
|| FactoryPool<B>::instance().register_factory(\
#D, factory) != 0) { \
do { \
Factory<D, B>* factory = new (std::nothrow) Factory<D, B>(); \
if (factory == NULL || \
FactoryPool<B>::instance().register_factory(#D, factory) != 0) { \
RAW_LOG_ERROR("Failed regist factory: %s->%s in macro!", #D, #B); \
return E; \
} \
} while (0)
} while (0)
#define DECLARE_FACTORY_OBJECT(D, B) \
static int regist(const std::string& tag) { \
Factory<D, B>* factory = \
new (std::nothrow) Factory<D, B>();\
if (factory == NULL \
|| FactoryPool<B>::instance().register_factory(\
tag, factory) != 0) { \
RAW_LOG_ERROR("Failed regist factory: %s in macro!", #D);\
Factory<D, B>* factory = new (std::nothrow) Factory<D, B>(); \
if (factory == NULL || \
FactoryPool<B>::instance().register_factory(tag, factory) != 0) { \
RAW_LOG_ERROR("Failed regist factory: %s in macro!", #D); \
return -1; \
} \
return 0; \
}
#define PDS_STR_CAT(a, b) PDS_STR_CAT_I(a, b)
#define PDS_STR_CAT_I(a, b) a ## b
#define PDS_STR_CAT_I(a, b) a##b
#define DEFINE_FACTORY_OBJECT(D) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \
__LINE__)(void) { \
D::regist(#D); \
}
}
#define REGIST_FACTORY_OBJECT_IMPL(D, B) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
::baidu::paddle_serving::sdk_cpp::Factory<D, B>* factory =\
new (::std::nothrow) ::baidu::paddle_serving::sdk_cpp::Factory<D, B>();\
if (factory == NULL \
|| ::baidu::paddle_serving::sdk_cpp::FactoryPool<B>::instance().register_factory(\
#D, factory) != 0) { \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \
__LINE__)(void) { \
::baidu::paddle_serving::sdk_cpp::Factory<D, B>* factory = \
new (::std::nothrow)::baidu::paddle_serving::sdk_cpp::Factory<D, B>(); \
if (factory == NULL || \
::baidu::paddle_serving::sdk_cpp::FactoryPool<B>::instance() \
.register_factory(#D, factory) != 0) { \
RAW_LOG_ERROR("Failed regist factory: %s->%s in macro!", #D, #B); \
return ; \
return; \
} \
return ; \
}
#define REGIST_FACTORY_OBJECT_IMPL_WITH_TAG(D, B, T)\
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
::baidu::paddle_serving::sdk_cpp::Factory<D, B>* factory =\
new (::std::nothrow) ::baidu::paddle_serving::sdk_cpp::Factory<D, B>();\
if (factory == NULL \
|| ::baidu::paddle_serving::sdk_cpp::FactoryPool<B>::instance().register_factory(\
T, factory) != 0) { \
RAW_LOG_ERROR("Failed regist factory: %s->%s, tag %s in macro!", #D, #B, T); \
return ; \
return; \
}
#define REGIST_FACTORY_OBJECT_IMPL_WITH_TAG(D, B, T) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \
__LINE__)(void) { \
::baidu::paddle_serving::sdk_cpp::Factory<D, B>* factory = \
new (::std::nothrow)::baidu::paddle_serving::sdk_cpp::Factory<D, B>(); \
if (factory == NULL || \
::baidu::paddle_serving::sdk_cpp::FactoryPool<B>::instance() \
.register_factory(T, factory) != 0) { \
RAW_LOG_ERROR( \
"Failed regist factory: %s->%s, tag %s in macro!", #D, #B, T); \
return; \
} \
return ; \
}
return; \
}
#define REGIST_ABTEST_OBJECT(D) \
REGIST_FACTORY_OBJECT_IMPL( \
D, \
::baidu::paddle_serving::sdk_cpp::ABTestRouterBase)
D, ::baidu::paddle_serving::sdk_cpp::ABTestRouterBase)
#define REGIST_ABTEST_OBJECT_WITH_TAG(D, T) \
REGIST_FACTORY_OBJECT_IMPL_WITH_TAG( \
D, \
::baidu::paddle_serving::sdk_cpp::ABTestRouterBase,\
T)
D, ::baidu::paddle_serving::sdk_cpp::ABTestRouterBase, T)
#define REGIST_STUB_OBJECT_WITH_TAG(D, C, R, I, O, T) \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, __LINE__)(void) \
{ \
__attribute__((constructor)) static void PDS_STR_CAT(GlobalRegistObject, \
__LINE__)(void) { \
RAW_LOG_INFO("REGIST_STUB_OBJECT_WITH_TAG"); \
::baidu::paddle_serving::sdk_cpp::Factory< \
::baidu::paddle_serving::sdk_cpp::StubImpl<D, C, R, I, O>,\
::baidu::paddle_serving::sdk_cpp::StubImpl<D, C, R, I, O>, \
::baidu::paddle_serving::sdk_cpp::Stub>* factory = \
new (::std::nothrow) ::baidu::paddle_serving::sdk_cpp::Factory< \
::baidu::paddle_serving::sdk_cpp::StubImpl<D, C, R, I, O>,\
new (::std::nothrow)::baidu::paddle_serving::sdk_cpp::Factory< \
::baidu::paddle_serving::sdk_cpp::StubImpl<D, C, R, I, O>, \
::baidu::paddle_serving::sdk_cpp::Stub>(); \
if (factory == NULL \
|| ::baidu::paddle_serving::sdk_cpp::FactoryPool< \
::baidu::paddle_serving::sdk_cpp::Stub>::instance().register_factory(\
T, factory) != 0) { \
RAW_LOG_ERROR("Failed regist factory: %s->Stub, tag: %s in macro!", #D, T); \
return ; \
if (factory == NULL || \
::baidu::paddle_serving::sdk_cpp::FactoryPool< \
::baidu::paddle_serving::sdk_cpp::Stub>::instance() \
.register_factory(T, factory) != 0) { \
RAW_LOG_ERROR( \
"Failed regist factory: %s->Stub, tag: %s in macro!", #D, T); \
return; \
} \
return ; \
}
return; \
}
class Stub;
class EndpointRouterBase;
class VariantRouterBase;
template<typename B>
template <typename B>
class FactoryBase {
public:
public:
virtual B* gen() = 0;
virtual void del(B* obj) = 0;
};
template<typename D, typename B>
template <typename D, typename B>
class Factory : public FactoryBase<B> {
public:
B* gen() {
return new(std::nothrow) D();
}
public:
B* gen() { return new (std::nothrow) D(); }
void del(B* obj) {
delete dynamic_cast<D*>(obj);
}
void del(B* obj) { delete dynamic_cast<D*>(obj); }
};
template<typename B>
template <typename B>
class FactoryPool {
public:
public:
static FactoryPool<B>& instance() {
static FactoryPool<B> singleton;
return singleton;
}
int register_factory(const std::string& tag,
FactoryBase<B>* factory) {
typename std::map<std::string, FactoryBase<B>*>::iterator it
= _pool.find(tag);
int register_factory(const std::string& tag, FactoryBase<B>* factory) {
typename std::map<std::string, FactoryBase<B>*>::iterator it =
_pool.find(tag);
if (it != _pool.end()) {
RAW_LOG_ERROR("Insert duplicate with tag: %s", tag.c_str());
return -1;
}
std::pair<
typename std::map<std::string, FactoryBase<B>*>::iterator,
bool> r = _pool.insert(std::make_pair(tag, factory));
std::pair<typename std::map<std::string, FactoryBase<B>*>::iterator, bool>
r = _pool.insert(std::make_pair(tag, factory));
if (!r.second) {
RAW_LOG_ERROR("Failed insert new factory with: %s", tag.c_str());
return -1;
}
RAW_LOG_INFO("Succ insert one factory, tag: %s, base type %s", tag.c_str(), typeid(B).name());
RAW_LOG_INFO("Succ insert one factory, tag: %s, base type %s",
tag.c_str(),
typeid(B).name());
return 0;
}
B* generate_object(const std::string& tag) {
typename std::map<std::string, FactoryBase<B>*>::iterator it
= _pool.find(tag);
typename std::map<std::string, FactoryBase<B>*>::iterator it =
_pool.find(tag);
if (it == _pool.end() || it->second == NULL) {
RAW_LOG_ERROR("Not found factory pool, tag: %s, pool size: %u", tag.c_str(), _pool.size());
RAW_LOG_ERROR("Not found factory pool, tag: %s, pool size: %u",
tag.c_str(),
_pool.size());
return NULL;
}
return it->second->gen();
}
template<typename D>
template <typename D>
void return_object(B* object) {
Factory<D, B> factory;
factory->del(object);
}
private:
private:
std::map<std::string, FactoryBase<B>*> _pool;
};
......@@ -196,10 +190,6 @@ typedef FactoryPool<brpc::ResponseMerger> ResponseMergerFactory;
typedef FactoryPool<EndpointRouterBase> EndpointRouterFactory;
typedef FactoryPool<VariantRouterBase> VariantRouterFactory;
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_FACTORY_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/predictor.h
* @author wanlijin01(com@baidu.com)
* @date 2018/07/05 16:53:43
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_H
#include "stub.h"
#include "common.h"
#include "endpoint_config.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/endpoint_config.h"
#include "sdk-cpp/include/stub.h"
namespace baidu {
namespace paddle_serving {
......@@ -28,38 +28,39 @@ namespace sdk_cpp {
param = butil::get_object<T>(); \
if (!param) { \
LOG(ERROR) << "Failed get object from pool" \
<< ", arg:" << #param << "type: " \
<< #T; \
<< ", arg:" << #param << "type: " << #T; \
return err; \
} \
} while (0)
static const brpc::CompressType compress_types[] = {
brpc::COMPRESS_TYPE_NONE,
static const brpc::CompressType compress_types[] = {brpc::COMPRESS_TYPE_NONE,
brpc::COMPRESS_TYPE_SNAPPY,
brpc::COMPRESS_TYPE_GZIP,
brpc::COMPRESS_TYPE_ZLIB,
brpc::COMPRESS_TYPE_LZ4};
typedef void (*DoneType) (google::protobuf::Message* res,
typedef void (*DoneType)(google::protobuf::Message* res,
brpc::Controller* controller);
template<typename Arg1, typename Arg2>
template <typename Arg1, typename Arg2>
class FunctionClosure : public ::google::protobuf::Closure {
public:
public:
typedef void (*FunctionType)(Arg1* arg1, Arg2* arg2);
FunctionClosure() {}
~FunctionClosure() {}
int init(FunctionType function, bool self_deleting,
bool arg1_deleting, bool arg2_deleting,
Arg1* arg1 = NULL, Arg2* arg2 = NULL);
int init(FunctionType function,
bool self_deleting,
bool arg1_deleting,
bool arg2_deleting,
Arg1* arg1 = NULL,
Arg2* arg2 = NULL);
void Run();
private:
private:
FunctionType _function;
Arg1* _arg1;
Arg2* _arg2;
......@@ -69,51 +70,51 @@ private:
};
class InterfaceAdaptor {
public:
public:
typedef google::protobuf::Message RequestMessage;
typedef google::protobuf::Message ResponseMessage;
virtual int partition(RequestMessage& request, std::vector<RequestMessage*>& out) = 0;
virtual int merge(std::vector<ResponseMessage*>& response, ResponseMessage& out) = 0;
virtual int partition(RequestMessage& request, // NOLINT
std::vector<RequestMessage*>& out) = 0; // NOLINT
virtual int merge(std::vector<ResponseMessage*>& response, // NOLINT
ResponseMessage& out) = 0; // NOLINT
};
class EchoAdaptor : public InterfaceAdaptor {
public:
public:
typedef google::protobuf::Message RequestMessage;
typedef google::protobuf::Message ResponseMessage;
int partition(RequestMessage& request, std::vector<RequestMessage*>& out) {
int partition(RequestMessage& request, // NOLINT
std::vector<RequestMessage*>& out) { // NOLINT
return 0;
}
int merge(std::vector<ResponseMessage*>& response, ResponseMessage*& out) {
int merge(std::vector<ResponseMessage*>& response, // NOLINT
ResponseMessage*& out) { // NOLINT
return 0;
}
};
class Predictor {
public:
public:
// synchronize interface
virtual int inference(
google::protobuf::Message* req,
virtual int inference(google::protobuf::Message* req,
google::protobuf::Message* res) = 0;
// asynchronize interface
virtual int inference(
google::protobuf::Message* req,
virtual int inference(google::protobuf::Message* req,
google::protobuf::Message* res,
DoneType done,
brpc::CallId* cid = NULL) = 0;
// synchronize interface
virtual int debug(
google::protobuf::Message* req,
virtual int debug(google::protobuf::Message* req,
google::protobuf::Message* res,
butil::IOBufBuilder* debug_os) = 0;
// un-blocked interface
virtual int send_inference(
google::protobuf::Message* req,
virtual int send_inference(google::protobuf::Message* req,
google::protobuf::Message* res) = 0;
virtual int recv_inference() = 0;
virtual void cancel_inference() = 0;
......@@ -131,21 +132,24 @@ public:
virtual bool is_inited() = 0;
};
template<typename T>
template <typename T>
class PredictorImpl : public Predictor {
public:
public:
typedef google::protobuf::MethodDescriptor MethodDescriptor;
PredictorImpl() : _service(NULL), _stub(NULL), _infer(NULL),
_debug(NULL), _channel(NULL), _inited(false) {
PredictorImpl()
: _service(NULL),
_stub(NULL),
_infer(NULL),
_debug(NULL),
_channel(NULL),
_inited(false) {
// _inferid = 0;
}
~PredictorImpl() {}
int init(
google::protobuf::RpcChannel* chnl,
int init(google::protobuf::RpcChannel* chnl,
T* service,
const MethodDescriptor* infer,
const MethodDescriptor* debug,
......@@ -153,37 +157,28 @@ public:
Stub* stub,
const std::string& tag);
int reset(
const RpcParameters& options,
brpc::Controller& cntl);
int reset(const RpcParameters& options, brpc::Controller& cntl); // NOLINT
int deinit();
bool is_inited() {
return _inited;
}
bool is_inited() { return _inited; }
// 同步接口
int inference(
google::protobuf::Message* req,
google::protobuf::Message* res);
int inference(google::protobuf::Message* req, google::protobuf::Message* res);
// 异步接口
int inference(
google::protobuf::Message* req,
int inference(google::protobuf::Message* req,
google::protobuf::Message* res,
DoneType done,
brpc::CallId* cid = NULL);
// Debug同步接口
int debug(
google::protobuf::Message* req,
int debug(google::protobuf::Message* req,
google::protobuf::Message* res,
butil::IOBufBuilder* debug_os);
// 半同步(非阻塞)接口
int send_inference(
google::protobuf::Message* req,
int send_inference(google::protobuf::Message* req,
google::protobuf::Message* res);
// 半同步(非阻塞)接口
......@@ -194,23 +189,15 @@ public:
const char* tag();
const google::protobuf::Service* service() {
return _service;
}
const google::protobuf::Service* service() { return _service; }
const brpc::Controller* controller() {
return &_cntl;
}
const brpc::Controller* controller() { return &_cntl; }
const google::protobuf::RpcChannel* channel() {
return _channel;
}
const google::protobuf::RpcChannel* channel() { return _channel; }
const Stub* stub() {
return _stub;
}
const Stub* stub() { return _stub; }
private:
private:
T* _service;
Stub* _stub;
const MethodDescriptor* _infer;
......@@ -223,12 +210,8 @@ private:
bool _inited;
};
} // sdk_cpp
} // paddle_serving
} // baidu
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
#include "predictor.hpp"
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_HPP
#define BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_HPP
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
class MetricScope;
class Stub;
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
class StubImpl;
template<typename Arg1, typename Arg2>
inline ::google::protobuf::Closure* NewClosure(
void (*function)(Arg1*, Arg2*),
Arg1* arg1 = NULL, Arg2* arg2 = NULL) {
FunctionClosure<Arg1, Arg2>* closure = butil::get_object<
FunctionClosure<Arg1, Arg2> >();
template <typename Arg1, typename Arg2>
inline ::google::protobuf::Closure* NewClosure(void (*function)(Arg1*, Arg2*),
Arg1* arg1 = NULL,
Arg2* arg2 = NULL) {
FunctionClosure<Arg1, Arg2>* closure =
butil::get_object<FunctionClosure<Arg1, Arg2>>();
if (closure) {
if (closure->init(function, true, false, true,
arg1, arg2) != 0) {
if (closure->init(function, true, false, true, arg1, arg2) != 0) {
LOG(FATAL) << "Failed create closure objects";
return NULL;
}
......@@ -29,11 +40,13 @@ inline ::google::protobuf::Closure* NewClosure(
return closure;
}
template<typename Arg1, typename Arg2>
int FunctionClosure<Arg1, Arg2>::init(
FunctionType function, bool self_deleting,
bool arg1_deleting, bool arg2_deleting,
Arg1* arg1, Arg2* arg2) {
template <typename Arg1, typename Arg2>
int FunctionClosure<Arg1, Arg2>::init(FunctionType function,
bool self_deleting,
bool arg1_deleting,
bool arg2_deleting,
Arg1* arg1,
Arg2* arg2) {
_function = function;
_self_deleting = self_deleting;
_arg1_deleting = arg1_deleting;
......@@ -47,7 +60,7 @@ int FunctionClosure<Arg1, Arg2>::init(
return 0;
}
template<typename Arg1, typename Arg2>
template <typename Arg1, typename Arg2>
void FunctionClosure<Arg1, Arg2>::Run() {
bool self_delete = _self_deleting;
bool arg1_delete = _arg1_deleting;
......@@ -64,8 +77,8 @@ void FunctionClosure<Arg1, Arg2>::Run() {
}
}
template<typename T> int PredictorImpl<T>::init(
google::protobuf::RpcChannel* chnl,
template <typename T>
int PredictorImpl<T>::init(google::protobuf::RpcChannel* chnl,
T* service,
const MethodDescriptor* infer,
const MethodDescriptor* debug,
......@@ -86,40 +99,38 @@ template<typename T> int PredictorImpl<T>::init(
return 0;
}
template<typename T> int PredictorImpl<T>::reset(
const RpcParameters& options,
brpc::Controller& cntl) {
template <typename T>
int PredictorImpl<T>::reset(const RpcParameters& options,
brpc::Controller& cntl) { // NOLINT
cntl.Reset();
if (options.compress_type.init) {
cntl.set_request_compress_type(
compress_types[options.compress_type.value]);
cntl.set_request_compress_type(compress_types[options.compress_type.value]);
}
return 0;
}
template<typename T> int PredictorImpl<T>::deinit() {
template <typename T>
int PredictorImpl<T>::deinit() {
// do nothing
_inited = false;
return 0;
}
template<typename T> int PredictorImpl<T>::inference(
google::protobuf::Message* req,
template <typename T>
int PredictorImpl<T>::inference(google::protobuf::Message* req,
google::protobuf::Message* res) {
MetricScope metric(_stub, "infer_sync");
_service->CallMethod(_infer, &_cntl, req, res, NULL);
if (_cntl.Failed()) {
LOG(WARNING)
<< "inference call failed, message: "
<< _cntl.ErrorText();
LOG(WARNING) << "inference call failed, message: " << _cntl.ErrorText();
_stub->update_average(1, "failure");
return -1;
}
return 0;
}
template<typename T> int PredictorImpl<T>::inference(
google::protobuf::Message* req,
template <typename T>
int PredictorImpl<T>::inference(google::protobuf::Message* req,
google::protobuf::Message* res,
DoneType done,
brpc::CallId* cid) {
......@@ -128,8 +139,7 @@ template<typename T> int PredictorImpl<T>::inference(
// 在对象池临时申请一个独立的对象,且直到异步回调执行完
// 成后才能释放,而该释放行为被NewClosure自动托管,用户
// 无需关注。
brpc::Controller* cntl
= butil::get_object<brpc::Controller>();
brpc::Controller* cntl = butil::get_object<brpc::Controller>();
if (!cntl || reset(_options, *cntl) != 0) {
LOG(FATAL) << "Failed get controller from object pool,"
<< "cntl is null: " << (cntl == NULL);
......@@ -141,21 +151,18 @@ template<typename T> int PredictorImpl<T>::inference(
*cid = cntl->call_id();
}
_service->CallMethod(_infer, cntl, req, res, NewClosure(
done, res, cntl));
_service->CallMethod(_infer, cntl, req, res, NewClosure(done, res, cntl));
return 0;
}
template<typename T> int PredictorImpl<T>::debug(
google::protobuf::Message* req,
template <typename T>
int PredictorImpl<T>::debug(google::protobuf::Message* req,
google::protobuf::Message* res,
butil::IOBufBuilder* debug_os) {
MetricScope metric(_stub, "debug");
_service->CallMethod(_debug, &_cntl, req, res, NULL);
if (_cntl.Failed()) {
LOG(WARNING)
<< "inference call failed, message: "
<< _cntl.ErrorText();
LOG(WARNING) << "inference call failed, message: " << _cntl.ErrorText();
_stub->update_average(1, "failure");
return -1;
}
......@@ -165,17 +172,17 @@ template<typename T> int PredictorImpl<T>::debug(
return 0;
}
template<typename T> int PredictorImpl<T>::send_inference(
google::protobuf::Message* req,
template <typename T>
int PredictorImpl<T>::send_inference(google::protobuf::Message* req,
google::protobuf::Message* res) {
MetricScope metric(_stub, "infer_send");
_inferid = _cntl.call_id();
_service->CallMethod(
_infer, &_cntl, req, res, brpc::DoNothing());
_service->CallMethod(_infer, &_cntl, req, res, brpc::DoNothing());
return 0;
}
template<typename T> int PredictorImpl<T>::recv_inference() {
template <typename T>
int PredictorImpl<T>::recv_inference() {
// waiting for callback done
MetricScope metric(_stub, "infer_recv");
brpc::Join(_inferid);
......@@ -188,17 +195,17 @@ template<typename T> int PredictorImpl<T>::recv_inference() {
return 0;
}
template<typename T> void PredictorImpl<T>::cancel_inference() {
template <typename T>
void PredictorImpl<T>::cancel_inference() {
MetricScope metric(_stub, "infer_cancel");
brpc::StartCancel(_inferid);
}
template<typename T> const char* PredictorImpl<T>::tag() {
template <typename T>
const char* PredictorImpl<T>::tag() {
return _tag.c_str();
}
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTORR_HPP
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/predictor_api.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 17:33:59
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_SDK_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_SDK_H
#include "stub.h"
#include "predictor.h"
#include "endpoint_config.h"
#include "endpoint.h"
#include "config_manager.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <map>
#include <string>
#include "sdk-cpp/include/config_manager.h"
#include "sdk-cpp/include/endpoint.h"
#include "sdk-cpp/include/endpoint_config.h"
#include "sdk-cpp/include/predictor.h"
#include "sdk-cpp/include/stub.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
class PredictorApi {
public:
public:
PredictorApi() {}
int register_all();
......@@ -47,8 +47,7 @@ public:
}
Predictor* fetch_predictor(std::string ep_name) {
std::map<std::string, Endpoint*>::iterator it
= _endpoints.find(ep_name);
std::map<std::string, Endpoint*>::iterator it = _endpoints.find(ep_name);
if (it == _endpoints.end() || !it->second) {
LOG(ERROR) << "Failed fetch predictor:"
<< ", ep_name: " << ep_name;
......@@ -57,10 +56,8 @@ public:
return it->second->get_predictor();
}
Predictor* fetch_predictor(std::string ep_name,
const void* params) {
std::map<std::string, Endpoint*>::iterator it
= _endpoints.find(ep_name);
Predictor* fetch_predictor(std::string ep_name, const void* params) {
std::map<std::string, Endpoint*>::iterator it = _endpoints.find(ep_name);
if (it == _endpoints.end() || !it->second) {
LOG(ERROR) << "Failed fetch predictor:"
<< ", ep_name: " << ep_name;
......@@ -79,15 +76,11 @@ public:
return 0;
}
private:
private:
EndpointConfigManager _config_manager;
std::map<std::string, Endpoint*> _endpoints;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_PREDICTOR_SDK_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/stub.h
* @author wanlijin(wanlijin01@baidu.com)
* @date 2018/12/04 16:42:29
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_STUB_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_STUB_H
#include "common.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include "sdk-cpp/include/common.h"
namespace baidu {
namespace paddle_serving {
......@@ -25,13 +24,15 @@ class Predictor;
struct VariantInfo;
class Stub {
public:
public:
typedef google::protobuf::Message Message;
virtual ~Stub() {}
virtual int initialize(const VariantInfo& var, const std::string& ep,
const std::string* tag, const std::string* tag_value) = 0;
virtual int initialize(const VariantInfo& var,
const std::string& ep,
const std::string* tag,
const std::string* tag_value) = 0;
// predictor
virtual Predictor* fetch_predictor() = 0;
......@@ -61,10 +62,6 @@ public:
virtual void update_latency(int64_t acc, const char* name) = 0;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_STUB_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/stub.h
* @author wanlijin(wanlijin01@baidu.com)
* @date 2018/07/04 16:42:29
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_STUB_IMPL_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_STUB_IMPL_H
#include "common.h"
#include "predictor.h"
#include "stub.h"
#include "endpoint_config.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/endpoint_config.h"
#include "sdk-cpp/include/predictor.h"
#include "sdk-cpp/include/stub.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
static const std::string AVG_PREFIX = "avg_";
static const std::string LTC_PREFIX = "ltc_";
static const char* AVG_PREFIX = "avg_";
static const char* LTC_PREFIX = "ltc_";
class Predictor;
template<typename T>
template <typename T>
class PredictorImpl;
static const char* INFERENCE_METHOD_NAME = "inference";
static const char* DEBUG_METHOD_NAME = "debug";
class MetricScope {
public:
MetricScope(Stub* stub, const char* routine) :
_stub(stub), _tt(butil::Timer::STARTED), _routine(routine) {
public:
MetricScope(Stub* stub, const char* routine)
: _stub(stub), _tt(butil::Timer::STARTED), _routine(routine) {
TRACEPRINTF("enter %s", routine);
}
......@@ -47,21 +49,20 @@ public:
_stub->update_latency(_tt.u_elapsed(), _routine.c_str());
}
private:
private:
Stub* _stub;
butil::Timer _tt;
std::string _routine;
};
class TracePackScope {
public:
TracePackScope(const char* routine) :
_routine(routine), _index(-1) {
public:
explicit TracePackScope(const char* routine) : _routine(routine), _index(-1) {
TRACEPRINTF("start pack: %s", routine);
}
TracePackScope(const char* routine, int index) :
_routine(routine), _index(index) {
TracePackScope(const char* routine, int index)
: _routine(routine), _index(index) {
TRACEPRINTF("start pack: %s, index: %d", routine, index);
}
......@@ -73,16 +74,16 @@ public:
}
}
private:
private:
std::string _routine;
int _index;
};
class TagFilter : public brpc::NamingServiceFilter {
public:
public:
class TagHelper {
public:
TagHelper(const std::string& kv_str) {
explicit TagHelper(const std::string& kv_str) {
if (kv_str.compare("") == 0) {
return;
}
......@@ -112,13 +113,12 @@ public:
std::string key = kv_pair_str.substr(0, kv_delim_pos);
std::string value = kv_pair_str.substr(kv_delim_pos + 1);
_kv_map.insert(std::pair<std::string, std::string>(key, value));
} while (end_pos != std::string::npos);
}
bool container(const std::string& k, const std::string& v) const {
std::map<std::string, std::string>::const_iterator found
= _kv_map.find(k);
std::map<std::string, std::string>::const_iterator found =
_kv_map.find(k);
if (found == _kv_map.end()) {
// key not found
return false;
......@@ -145,50 +145,43 @@ public:
return helper.container(_key, _value);
}
private:
private:
std::string _key;
std::string _value;
};
class BvarWrapper {
public:
public:
virtual void update_latency(int64_t acc) = 0;
virtual void update_average(int64_t acc) = 0;
};
class LatencyWrapper : public BvarWrapper {
public:
LatencyWrapper(const std::string& name) :
_ltc(name + "_ltc") {
}
public:
explicit LatencyWrapper(const std::string& name) : _ltc(name + "_ltc") {}
void update_latency(int64_t acc) {
_ltc << acc;
}
void update_latency(int64_t acc) { _ltc << acc; }
void update_average(int64_t acc) {
LOG(ERROR) << "Cannot update average to a LatencyRecorder";
}
private:
private:
bvar::LatencyRecorder _ltc;
};
class AverageWrapper : public BvarWrapper {
public:
AverageWrapper(const std::string& name) :
_win(name + "_avg", &_avg, ::bvar::FLAGS_bvar_dump_interval) {
}
public:
explicit AverageWrapper(const std::string& name)
: _win(name + "_avg", &_avg, ::bvar::FLAGS_bvar_dump_interval) {}
void update_latency(int64_t acc) {
LOG(ERROR) << "Cannot update latency to a AverageWrapper";
}
void update_average(int64_t acc) {
_avg << acc;
}
void update_average(int64_t acc) { _avg << acc; }
private:
private:
bvar::IntRecorder _avg;
bvar::Window<bvar::IntRecorder> _win;
};
......@@ -205,18 +198,24 @@ struct StubTLS {
std::vector<google::protobuf::Message*> response_pools;
};
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
class StubImpl : public Stub {
public:
public:
typedef google::protobuf::Message Message;
StubImpl()
: _channel(NULL), _pchannel(NULL), _gchannel(NULL),
_service_stub(NULL), _infer(NULL), _debug(NULL) {}
: _channel(NULL),
_pchannel(NULL),
_gchannel(NULL),
_service_stub(NULL),
_infer(NULL),
_debug(NULL) {}
~StubImpl() {}
int initialize(const VariantInfo& var, const std::string& ep,
const std::string* tag, const std::string* tag_value);
int initialize(const VariantInfo& var,
const std::string& ep,
const std::string* tag,
const std::string* tag_value);
Predictor* fetch_predictor();
int return_predictor(Predictor* predictor);
......@@ -234,24 +233,22 @@ public:
int thrd_clear();
int thrd_finalize();
const std::string& which_endpoint() const {
return _endpoint;
}
const std::string& which_endpoint() const { return _endpoint; }
private:
private:
google::protobuf::RpcChannel* init_channel(
const VariantInfo& var,
brpc::NamingServiceFilter* filter = NULL);
const VariantInfo& var, brpc::NamingServiceFilter* filter = NULL);
brpc::ParallelChannel* init_pchannel(
brpc::Channel* sub_channel, uint32_t channel_count,
uint32_t package_size, const brpc::ChannelOptions& options);
brpc::ParallelChannel* init_pchannel(brpc::Channel* sub_channel,
uint32_t channel_count,
uint32_t package_size,
const brpc::ChannelOptions& options);
StubTLS* get_tls() {
return static_cast<StubTLS*>(bthread_getspecific(_bthread_key));
}
private:
private:
brpc::Channel* _channel;
brpc::ParallelChannel* _pchannel;
google::protobuf::RpcChannel* _gchannel;
......@@ -273,15 +270,14 @@ private:
mutable butil::Mutex _bvar_mutex;
#ifndef DECLARE_LATENCY
#define DECLARE_LATENCY(item) \
LatencyWrapper* _ltc_##item;
#define DECLARE_LATENCY(item) LatencyWrapper* _ltc_##item;
#endif
DECLARE_LATENCY(infer_sync); // 同步请求
DECLARE_LATENCY(infer_async); // 异步请求
DECLARE_LATENCY(infer_send); // 半同步send
DECLARE_LATENCY(infer_recv); // 半同步recv
DECLARE_LATENCY(infer_cancel);// 半同步cancel
DECLARE_LATENCY(infer_cancel); // 半同步cancel
DECLARE_LATENCY(debug); // 调试请求
DECLARE_LATENCY(rpc_init); // rpc reset
DECLARE_LATENCY(thrd_clear); // thrd clear
......@@ -291,8 +287,7 @@ private:
#undef DECLARE_LATENCY
#ifndef DECLARE_AVERAGE
#define DECLARE_AVERAGE(item) \
AverageWrapper* _avg_##item;
#define DECLARE_AVERAGE(item) AverageWrapper* _avg_##item;
#endif
DECLARE_AVERAGE(failure); // 失败请求数
......@@ -302,13 +297,13 @@ private:
#undef DECLARE_AVERAGE
public:
public:
void update_average(int64_t acc, const char* name) {
std::map<std::string, BvarWrapper*>::iterator iter =
_avg_bvars.find(AVG_PREFIX + name);
_avg_bvars.find(std::string(AVG_PREFIX) + name);
if (iter == _avg_bvars.end()) {
LOG(ERROR) << "Not found average record:avg_" << name;
return ;
return;
}
iter->second->update_average(acc);
......@@ -316,22 +311,18 @@ public:
void update_latency(int64_t acc, const char* name) {
std::map<std::string, BvarWrapper*>::iterator iter =
_ltc_bvars.find(LTC_PREFIX + name);
_ltc_bvars.find(std::string(LTC_PREFIX) + name);
if (iter == _ltc_bvars.end()) {
LOG(ERROR) << "Not found latency record:ltc_" << name;
return ;
return;
}
iter->second->update_latency(acc);
}
};
} // sdk_cpp
} // paddle_serving
} // baidu
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
#include "stub_impl.hpp"
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_STUB_IMPL_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_STUB_IMPL_HPP
#define BAIDU_PADDLE_SERVING_SDK_CPP_STUB_IMPL_HPP
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
template<typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::initialize(
const VariantInfo& var, const std::string& ep,
const std::string* tag, const std::string* tag_value) {
template <typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::initialize(const VariantInfo& var,
const std::string& ep,
const std::string* tag,
const std::string* tag_value) {
if (tag != NULL && tag_value != NULL) {
TagFilter* filter = new (std::nothrow) TagFilter(
*tag, *tag_value);
TagFilter* filter = new (std::nothrow) TagFilter(*tag, *tag_value);
if (!filter) {
LOG(FATAL) << "Failed create tag filter, key: " << tag
<< ", value: " << tag_value;
......@@ -19,9 +32,8 @@ int StubImpl<T, C, R, I, O>::initialize(
}
_gchannel = init_channel(var, filter);
LOG(INFO)
<< "Create stub with tag: " << *tag
<< ", " << *tag_value << ", ep: " << ep;
LOG(INFO) << "Create stub with tag: " << *tag << ", " << *tag_value
<< ", ep: " << ep;
} else {
_gchannel = init_channel(var, NULL);
LOG(INFO) << "Create stub without tag, ep " << ep;
......@@ -38,16 +50,15 @@ int StubImpl<T, C, R, I, O>::initialize(
return -1;
}
_infer = _service_stub->GetDescriptor()->FindMethodByName(
INFERENCE_METHOD_NAME);
_infer =
_service_stub->GetDescriptor()->FindMethodByName(INFERENCE_METHOD_NAME);
if (!_infer) {
LOG(FATAL) << "Failed get inference method, "
<< "method name: " << INFERENCE_METHOD_NAME;
return -1;
}
_debug = _service_stub->GetDescriptor()->FindMethodByName(
DEBUG_METHOD_NAME);
_debug = _service_stub->GetDescriptor()->FindMethodByName(DEBUG_METHOD_NAME);
if (!_debug) {
LOG(FATAL) << "Failed get debug method, "
<< "method name: " << DEBUG_METHOD_NAME;
......@@ -61,8 +72,9 @@ int StubImpl<T, C, R, I, O>::initialize(
return -1;
}
const std::string& name
= _endpoint + "_" + _service_stub->GetDescriptor()->full_name() + "_" + _tag;
const std::string& name = _endpoint + "_" +
_service_stub->GetDescriptor()->full_name() + "_" +
_tag;
_ltc_bvars.clear();
_avg_bvars.clear();
......@@ -71,14 +83,13 @@ int StubImpl<T, C, R, I, O>::initialize(
#ifndef DEFINE_LATENCY
#define DEFINE_LATENCY(item) \
do { \
_ltc_##item = new (std::nothrow) LatencyWrapper(name + "_"#item);\
_ltc_##item = new (std::nothrow) LatencyWrapper(name + "_" #item); \
if (!_ltc_##item) { \
LOG(FATAL) << "Failed create latency recorder:" \
<< name + "_"#item; \
LOG(FATAL) << "Failed create latency recorder:" << name + "_" #item; \
return -1; \
} \
_ltc_bvars["ltc_"#item] = _ltc_##item; \
} while(0)
_ltc_bvars["ltc_" #item] = _ltc_##item; \
} while (0)
#endif
DEFINE_LATENCY(infer_sync);
......@@ -97,14 +108,13 @@ int StubImpl<T, C, R, I, O>::initialize(
#ifndef DEFINE_AVERAGE
#define DEFINE_AVERAGE(item) \
do { \
_avg_##item = new(std::nothrow) AverageWrapper(name + "_"#item);\
_avg_##item = new (std::nothrow) AverageWrapper(name + "_" #item); \
if (!_avg_##item) { \
LOG(FATAL) << "Failed create average recorder:" \
<< name + "_"#item; \
LOG(FATAL) << "Failed create average recorder:" << name + "_" #item; \
return -1; \
} \
_avg_bvars["avg_"#item] = _avg_##item; \
} while(0)
_avg_bvars["avg_" #item] = _avg_##item; \
} while (0)
#endif
DEFINE_AVERAGE(failure);
......@@ -117,7 +127,7 @@ int StubImpl<T, C, R, I, O>::initialize(
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::thrd_initialize() {
if (bthread_getspecific(_bthread_key) != NULL) {
LOG(WARNING) << "Already thread initialized for stub";
......@@ -135,7 +145,7 @@ int StubImpl<T, C, R, I, O>::thrd_initialize() {
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::thrd_clear() {
MetricScope metric(this, "thrd_clear");
StubTLS* tls = get_tls();
......@@ -158,7 +168,7 @@ int StubImpl<T, C, R, I, O>::thrd_clear() {
// clear request
size_t is = tls->request_pools.size();
for (size_t ii = 0; ii < is; ++ii) {
if (return_request(tls->request_pools[ii])!= 0) {
if (return_request(tls->request_pools[ii]) != 0) {
LOG(FATAL) << "Failed return request: " << ii;
return -1;
}
......@@ -168,7 +178,7 @@ int StubImpl<T, C, R, I, O>::thrd_clear() {
// clear response
size_t os = tls->response_pools.size();
for (size_t oi = 0; oi < os; ++oi) {
if (return_response(tls->response_pools[oi])!= 0) {
if (return_response(tls->response_pools[oi]) != 0) {
LOG(FATAL) << "Failed return response: " << oi;
return -1;
}
......@@ -177,7 +187,7 @@ int StubImpl<T, C, R, I, O>::thrd_clear() {
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::thrd_finalize() {
StubTLS* tls = get_tls();
if (!tls || thrd_clear() != 0) {
......@@ -189,7 +199,7 @@ int StubImpl<T, C, R, I, O>::thrd_finalize() {
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
Predictor* StubImpl<T, C, R, I, O>::fetch_predictor() {
StubTLS* tls = get_tls();
if (!tls) {
......@@ -197,14 +207,15 @@ Predictor* StubImpl<T, C, R, I, O>::fetch_predictor() {
return NULL;
}
PredictorImpl<T>* predictor = butil::get_object<PredictorImpl<T> >();
PredictorImpl<T>* predictor = butil::get_object<PredictorImpl<T>>();
if (!predictor) {
LOG(FATAL) << "Failed fetch predictor";
return NULL;
}
if (predictor->init(_gchannel, _service_stub, _infer, _debug, _options,
this, _tag) != 0) {
if (predictor->init(
_gchannel, _service_stub, _infer, _debug, _options, this, _tag) !=
0) {
LOG(FATAL) << "Failed init fetched predictor";
return NULL;
}
......@@ -213,7 +224,7 @@ Predictor* StubImpl<T, C, R, I, O>::fetch_predictor() {
return predictor;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::return_predictor(Predictor* predictor) {
if ((dynamic_cast<PredictorImpl<T>*>(predictor))->deinit() != 0) {
LOG(FATAL) << "Failed deinit fetched predictor";
......@@ -223,7 +234,7 @@ int StubImpl<T, C, R, I, O>::return_predictor(Predictor* predictor) {
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::return_predictor(Predictor* predictor) const {
if ((dynamic_cast<PredictorImpl<T>*>(predictor))->deinit() != 0) {
LOG(FATAL) << "Failed deinit fetched predictor";
......@@ -233,7 +244,7 @@ int StubImpl<T, C, R, I, O>::return_predictor(Predictor* predictor) const {
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
google::protobuf::Message* StubImpl<T, C, R, I, O>::fetch_request() {
StubTLS* tls = get_tls();
if (!tls) {
......@@ -252,7 +263,7 @@ google::protobuf::Message* StubImpl<T, C, R, I, O>::fetch_request() {
return req;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::return_request(
google::protobuf::Message* request) const {
request->Clear();
......@@ -260,7 +271,7 @@ int StubImpl<T, C, R, I, O>::return_request(
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::return_request(
google::protobuf::Message* request) {
request->Clear();
......@@ -268,7 +279,7 @@ int StubImpl<T, C, R, I, O>::return_request(
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
google::protobuf::Message* StubImpl<T, C, R, I, O>::fetch_response() {
StubTLS* tls = get_tls();
if (!tls) {
......@@ -287,7 +298,7 @@ google::protobuf::Message* StubImpl<T, C, R, I, O>::fetch_response() {
return res;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::return_response(
google::protobuf::Message* response) const {
response->Clear();
......@@ -295,7 +306,7 @@ int StubImpl<T, C, R, I, O>::return_response(
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::return_response(
google::protobuf::Message* response) {
response->Clear();
......@@ -303,7 +314,7 @@ int StubImpl<T, C, R, I, O>::return_response(
return 0;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
google::protobuf::RpcChannel* StubImpl<T, C, R, I, O>::init_channel(
const VariantInfo& var, brpc::NamingServiceFilter* filter) {
brpc::ChannelOptions chn_options;
......@@ -322,15 +333,16 @@ google::protobuf::RpcChannel* StubImpl<T, C, R, I, O>::init_channel(
// connection
ASSIGN_CONF_ITEM(chn_options.max_retry, var.connection.cnt_retry_conn, NULL);
ASSIGN_CONF_ITEM(chn_options.connect_timeout_ms, var.connection.tmo_conn, NULL);
ASSIGN_CONF_ITEM(
chn_options.connect_timeout_ms, var.connection.tmo_conn, NULL);
ASSIGN_CONF_ITEM(chn_options.timeout_ms, var.connection.tmo_rpc, NULL);
ASSIGN_CONF_ITEM(chn_options.backup_request_ms, var.connection.tmo_hedge, NULL);
ASSIGN_CONF_ITEM(
chn_options.backup_request_ms, var.connection.tmo_hedge, NULL);
// connection type
std::string conn_type_str;
ASSIGN_CONF_ITEM(conn_type_str, var.connection.type_conn, NULL);
chn_options.connection_type
= brpc::StringToConnectionType(conn_type_str);
chn_options.connection_type = brpc::StringToConnectionType(conn_type_str);
// naminginfo
std::string cluster_naming_info;
......@@ -345,32 +357,29 @@ google::protobuf::RpcChannel* StubImpl<T, C, R, I, O>::init_channel(
return NULL;
}
if (_channel->Init(
cluster_naming_info.c_str(),
if (_channel->Init(cluster_naming_info.c_str(),
cluster_loadbalancer.c_str(),
&chn_options) != 0) {
LOG(ERROR)
<< "Failed to initialize channel, path: "
<< cluster_naming_info;
LOG(ERROR) << "Failed to initialize channel, path: " << cluster_naming_info;
return NULL;
}
// brpc parallel channel
_pchannel = init_pchannel(
_channel, _max_channel, _package_size, chn_options);
_pchannel = init_pchannel(_channel, _max_channel, _package_size, chn_options);
if (_pchannel) {
LOG(INFO) << "Succ create parallel channel, count: "
<< _max_channel;
LOG(INFO) << "Succ create parallel channel, count: " << _max_channel;
return _pchannel;
}
return _channel;
}
template<typename T, typename C, typename R, typename I, typename O>
template <typename T, typename C, typename R, typename I, typename O>
brpc::ParallelChannel* StubImpl<T, C, R, I, O>::init_pchannel(
brpc::Channel* sub_channel, uint32_t channel_count,
uint32_t package_size, const brpc::ChannelOptions& options) {
brpc::Channel* sub_channel,
uint32_t channel_count,
uint32_t package_size,
const brpc::ChannelOptions& options) {
if (channel_count <= 1) { // noneed use parallel channel
LOG(INFO) << "channel count <= 1, noneed use pchannel.";
return NULL;
......@@ -391,8 +400,7 @@ brpc::ParallelChannel* StubImpl<T, C, R, I, O>::init_pchannel(
}
for (uint32_t si = 0; si < channel_count; ++si) {
if (_pchannel->AddChannel(
sub_channel,
if (_pchannel->AddChannel(sub_channel,
brpc::DOESNT_OWN_CHANNEL,
new C(package_size, this),
new R(package_size, this)) != 0) {
......@@ -405,8 +413,6 @@ brpc::ParallelChannel* StubImpl<T, C, R, I, O>::init_pchannel(
return _pchannel;
}
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_STUB_IMPL_HPP
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file utils.h
* @author root(com@baidu.com)
* @date 2018/07/09 19:43:36
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_SDK_CPP_UTILS_H
#define BAIDU_PADDLE_SERVING_SDK_CPP_UTILS_H
#include "common.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "sdk-cpp/include/common.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
inline int str_split(
const std::string& source,
inline int str_split(const std::string& source,
const std::string& delim,
std::vector<std::string>* vector_spliter) {
int delim_length = delim.length();
int total_length = source.length();
int last = 0;
......@@ -40,32 +36,25 @@ inline int str_split(
if (delim_length == 1) {
size_t index = source.find_first_of(delim, last);
while (index != std::string::npos) {
vector_spliter->push_back(source.substr(last,
index - last));
vector_spliter->push_back(source.substr(last, index - last));
last = index + delim_length;
index = source.find_first_of(delim, last);
}
} else {
size_t index = source.find(delim, last);
while (index != std::string::npos) {
vector_spliter->push_back(source.substr(last,
index - last));
vector_spliter->push_back(source.substr(last, index - last));
last = index + delim_length;
index = source.find(delim, last);
}
}
if (last < total_length) {
vector_spliter->push_back(source.substr(last,
total_length - last));
vector_spliter->push_back(source.substr(last, total_length - last));
}
return 0;
}
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_SDK_CPP_UTILS_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file include/variant.h
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/27 17:37:31
* @brief
*
**/
#ifndef BAIDU_PADDLE_SERVING_CPP_SDK_VARIANT_H
#define BAIDU_PADDLE_SERVING_CPP_SDK_VARIANT_H
#include "common.h"
#include "endpoint_config.h"
#include "stub.h"
#include "predictor.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <map>
#include <string>
#include "sdk-cpp/include/common.h"
#include "sdk-cpp/include/endpoint_config.h"
#include "sdk-cpp/include/predictor.h"
#include "sdk-cpp/include/stub.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
class Variant {
friend class VariantRouterBase;
public:
friend class VariantRouterBase;
public:
virtual ~Variant() {}
Variant() : _default_stub(NULL) {
_stub_map.clear();
}
Variant() : _default_stub(NULL) { _stub_map.clear(); }
int initialize(
const EndpointInfo& ep_info,
const VariantInfo& var_info);
int initialize(const EndpointInfo& ep_info, const VariantInfo& var_info);
int thrd_initialize();
......@@ -46,18 +40,15 @@ public:
int thrd_finalize();
Predictor* get_predictor(
const void* params);
Predictor* get_predictor(const void* params);
Predictor* get_predictor();
int ret_predictor(Predictor* predictor);
const std::string& variant_tag() const {
return _variant_tag;
}
const std::string& variant_tag() const { return _variant_tag; }
private:
private:
std::string _endpoint_name;
std::string _stub_service;
......@@ -66,10 +57,6 @@ private:
Stub* _default_stub;
};
} // sdk_cpp
} // paddle_serving
} // baidu
#endif //BAIDU_PADDLE_SERVING_CPP_SDK_VARIANT_H
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
package baidu.paddle_serving.predictor.format;
// dense format
message DenseInstance {
repeated float features = 1;
};
message DenseInstance { repeated float features = 1; };
message DensePrediction {
repeated float categories = 1;
};
message DensePrediction { repeated float categories = 1; };
// sparse format
message SparseInstance {
......@@ -17,9 +27,7 @@ message SparseInstance {
repeated float values = 3;
};
message SparsePrediction {
repeated float categories = 1;
};
message SparsePrediction { repeated float categories = 1; };
// int64-tensor format
message Int64TensorInstance {
......@@ -38,9 +46,7 @@ message XImageReqInstance {
required uint32 image_length = 2;
};
message XImageResInstance {
required string response_json = 1;
};
message XImageResInstance { required string response_json = 1; };
// x-record format
message XRecordInstance {
......
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
package baidu.paddle_serving.fluid_engine;
......@@ -11,13 +25,9 @@ message DenseTensor {
required bytes features = 3;
};
message DenseInstance {
repeated DenseTensor tensors = 1;
};
message DenseInstance { repeated DenseTensor tensors = 1; };
message DenseRequest {
repeated DenseInstance instances = 1;
};
message DenseRequest { repeated DenseInstance instances = 1; };
// default sparse request
message SparseTensor {
......@@ -27,22 +37,14 @@ message SparseTensor {
required bytes features = 4;
};
message SparseInstance {
repeated SparseTensor tensors = 1;
};
message SparseInstance { repeated SparseTensor tensors = 1; };
message SparseRequest {
repeated SparseInstance instances = 1;
};
message SparseRequest { repeated SparseInstance instances = 1; };
// default response
message Prediction {
repeated float categories = 1;
};
message Prediction { repeated float categories = 1; };
message Response {
repeated Prediction predictions = 1;
};
message Response { repeated Prediction predictions = 1; };
service DefaultSparseService {
rpc inference(SparseRequest) returns (Response);
......
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
import "builtin_format.proto";
package baidu.paddle_serving.predictor.dense_service;
......@@ -10,7 +24,8 @@ message Request {
};
message Response {
repeated baidu.paddle_serving.predictor.format.DensePrediction predictions = 1;
repeated baidu.paddle_serving.predictor.format.DensePrediction predictions =
1;
};
service BuiltinDenseFormatService {
......
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
package baidu.paddle_serving.predictor.echo_service;
......
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
import "builtin_format.proto";
package baidu.paddle_serving.predictor.image_classification;
......@@ -6,16 +20,19 @@ package baidu.paddle_serving.predictor.image_classification;
option cc_generic_services = true;
message ClassifyResponse {
repeated baidu.paddle_serving.predictor.format.DensePrediction predictions = 1;
repeated baidu.paddle_serving.predictor.format.DensePrediction predictions =
1;
};
message Request {
repeated baidu.paddle_serving.predictor.format.XImageReqInstance instances = 1;
repeated baidu.paddle_serving.predictor.format.XImageReqInstance instances =
1;
};
message Response {
// Each json string is serialized from ClassifyResponse predictions
repeated baidu.paddle_serving.predictor.format.XImageResInstance predictions = 1;
repeated baidu.paddle_serving.predictor.format.XImageResInstance predictions =
1;
};
service ImageClassifyService {
......
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
import "builtin_format.proto";
package baidu.paddle_serving.predictor.int64tensor_service;
......@@ -6,8 +20,8 @@ package baidu.paddle_serving.predictor.int64tensor_service;
option cc_generic_services = true;
message Request {
repeated baidu.paddle_serving.predictor.format.Int64TensorInstance
instances = 1;
repeated baidu.paddle_serving.predictor.format.Int64TensorInstance instances =
1;
};
message Response {
......
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "google/protobuf/descriptor.proto";
package pds;
extend google.protobuf.FieldOptions {
optional bool pack_on = 70000 [default=false];
optional bool pack_on = 70000 [ default = false ];
};
extend google.protobuf.ServiceOptions {
......@@ -11,6 +25,6 @@ extend google.protobuf.ServiceOptions {
};
message PaddleServiceOption {
optional bool generate_impl = 1 [default = false];
optional bool generate_stub = 2 [default = false];
optional bool generate_impl = 1 [ default = false ];
optional bool generate_stub = 2 [ default = false ];
};
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
import "builtin_format.proto";
package baidu.paddle_serving.predictor.sparse_service;
......@@ -10,7 +24,8 @@ message Request {
};
message Response {
repeated baidu.paddle_serving.predictor.format.SparsePrediction predictions = 1;
repeated baidu.paddle_serving.predictor.format.SparsePrediction predictions =
1;
};
service BuiltinSparseFormatService {
......
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file ../src/abtest.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 17:41:27
* @brief
*
**/
#include "abtest.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sdk-cpp/include/abtest.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
int WeightedRandomRender::initialize(
const google::protobuf::Message& conf) {
int WeightedRandomRender::initialize(const google::protobuf::Message& conf) {
srand((unsigned)time(NULL));
try {
const configure::WeightedRandomRenderConf &weighted_random_render_conf =
const configure::WeightedRandomRenderConf& weighted_random_render_conf =
dynamic_cast<const configure::WeightedRandomRenderConf&>(conf);
std::string weights
= weighted_random_render_conf.variant_weight_list();
std::string weights = weighted_random_render_conf.variant_weight_list();
std::vector<std::string> splits;
if (str_split(weights, WEIGHT_SEPERATOR, &splits) != 0) {
LOG(ERROR) << "Failed split string:" <<
weights;
LOG(ERROR) << "Failed split string:" << weights;
return -1;
}
......@@ -40,11 +36,10 @@ int WeightedRandomRender::initialize(
_normalized_sum = 0;
for (uint32_t wi = 0; wi < weight_size; ++wi) {
char* end_pos = NULL;
uint32_t ratio = strtoul(
splits[wi].c_str(), &end_pos, 10);
uint32_t ratio = strtoul(splits[wi].c_str(), &end_pos, 10);
if (end_pos == splits[wi].c_str()) {
LOG(ERROR) << "Error ratio(uint32) format:"
<< splits[wi] << " at " << wi;
LOG(ERROR) << "Error ratio(uint32) format:" << splits[wi] << " at "
<< wi;
return -1;
}
......@@ -73,22 +68,20 @@ int WeightedRandomRender::initialize(
return 0;
}
Variant* WeightedRandomRender::route(
const VariantList& variants,
Variant* WeightedRandomRender::route(const VariantList& variants,
const void* params) {
return route(variants);
}
Variant* WeightedRandomRender::route(
const VariantList& variants) {
Variant* WeightedRandomRender::route(const VariantList& variants) {
if (variants.size() != _variant_weight_list.size()) {
LOG(ERROR) << "#(Weights) is not equal #(Stubs)"
<< ", size: " << _variant_weight_list.size()
<< " vs. " << variants.size();
<< ", size: " << _variant_weight_list.size() << " vs. "
<< variants.size();
return NULL;
}
uint32_t sample = rand() % _normalized_sum;
uint32_t sample = rand() % _normalized_sum; // NOLINT
uint32_t cand_size = _variant_weight_list.size();
uint32_t cur_total = 0;
for (uint32_t ci = 0; ci < cand_size; ++ci) {
......@@ -101,14 +94,12 @@ Variant* WeightedRandomRender::route(
}
}
LOG(ERROR) << "Errors accurs in sampling, sample:"
<< sample << ", total: " << _normalized_sum;
LOG(ERROR) << "Errors accurs in sampling, sample:" << sample
<< ", total: " << _normalized_sum;
return NULL;
}
} // sdk_cpp
} // paddle_serving
} // baidu
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file endpoint_config.cpp
* @author wanlijin01(com@baidu.com)
* @date 2018/07/09 15:30:09
* @brief
*
**/
#include "abtest.h"
#include "config_manager.h"
#include <brpc/server.h>
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sdk-cpp/include/config_manager.h"
#include "brpc/server.h"
#include "sdk-cpp/include/abtest.h"
namespace baidu {
namespace paddle_serving {
......@@ -36,20 +37,17 @@ int EndpointConfigManager::create(const char* path, const char* file) {
int EndpointConfigManager::load() {
try {
SDKConf sdk_conf;
if (configure::read_proto_conf(
_endpoint_config_path.c_str(),
if (configure::read_proto_conf(_endpoint_config_path.c_str(),
_endpoint_config_file.c_str(),
&sdk_conf) != 0) {
LOG(ERROR)
<< "Failed initialize endpoint list"
<< ", config: " << _endpoint_config_path
<< "/" << _endpoint_config_file;
LOG(ERROR) << "Failed initialize endpoint list"
<< ", config: " << _endpoint_config_path << "/"
<< _endpoint_config_file;
return -1;
}
VariantInfo default_var;
if (init_one_variant(sdk_conf.default_variant_conf(),
default_var) != 0) {
if (init_one_variant(sdk_conf.default_variant_conf(), default_var) != 0) {
LOG(ERROR) << "Failed read default var conf";
return -1;
}
......@@ -60,10 +58,8 @@ int EndpointConfigManager::load() {
#endif
for (uint32_t ei = 0; ei < ep_size; ++ei) {
EndpointInfo ep;
if (init_one_endpoint(sdk_conf.predictors(ei), ep,
default_var) != 0) {
LOG(ERROR) << "Failed read endpoint info at: "
<< ei;
if (init_one_endpoint(sdk_conf.predictors(ei), ep, default_var) != 0) {
LOG(ERROR) << "Failed read endpoint info at: " << ei;
return -1;
}
......@@ -73,28 +69,24 @@ int EndpointConfigManager::load() {
<< ", ep name: " << ep.endpoint_name;
}
std::pair<std::map<
std::string, EndpointInfo>::iterator, bool> r
= _ep_map.insert(std::make_pair(ep.endpoint_name, ep));
std::pair<std::map<std::string, EndpointInfo>::iterator, bool> r =
_ep_map.insert(std::make_pair(ep.endpoint_name, ep));
if (!r.second) {
LOG(ERROR) << "Failed insert endpoint, name"
<< ep.endpoint_name;
LOG(ERROR) << "Failed insert endpoint, name" << ep.endpoint_name;
return -1;
}
}
} catch (std::exception& e) {
LOG(ERROR) << "Failed load configure" << e.what();
return -1;
}
LOG(INFO)
<< "Success reload endpoint config file, id: "
LOG(INFO) << "Success reload endpoint config file, id: "
<< _current_endpointmap_id;
return 0;
}
int EndpointConfigManager::init_one_endpoint(
const configure::Predictor& conf, EndpointInfo& ep,
int EndpointConfigManager::init_one_endpoint(const configure::Predictor& conf,
EndpointInfo& ep,
const VariantInfo& dft_var) {
#if 1
LOG(INFO) << "init_one_endpoint " << conf.name().c_str();
......@@ -113,11 +105,10 @@ int EndpointConfigManager::init_one_endpoint(
return -1;
}
EndpointRouterBase* router
= EndpointRouterFactory::instance().generate_object(
ep_router.value);
EndpointRouterBase* router =
EndpointRouterFactory::instance().generate_object(ep_router.value);
const configure::WeightedRandomRenderConf &router_conf =
const configure::WeightedRandomRenderConf& router_conf =
conf.weighted_random_render_conf();
if (!router || router->initialize(router_conf) != 0) {
LOG(ERROR) << "Failed fetch valid ab test strategy"
......@@ -134,10 +125,8 @@ int EndpointConfigManager::init_one_endpoint(
#endif
for (uint32_t vi = 0; vi < var_size; ++vi) {
VariantInfo var;
if (merge_variant(dft_var, conf.variants(vi),
var) != 0) {
LOG(ERROR) << "Failed merge variant info at: "
<< vi;
if (merge_variant(dft_var, conf.variants(vi), var) != 0) {
LOG(ERROR) << "Failed merge variant info at: " << vi;
return -1;
}
......@@ -150,10 +139,8 @@ int EndpointConfigManager::init_one_endpoint(
return -1;
}
LOG(INFO)
<< "Succ load one endpoint, name: " << ep.endpoint_name
LOG(INFO) << "Succ load one endpoint, name: " << ep.endpoint_name
<< ", count of variants: " << ep.vars.size() << ".";
} catch (std::exception& e) {
LOG(ERROR) << "Exception acccurs when load endpoint conf"
<< ", message: " << e.what();
......@@ -162,68 +149,57 @@ int EndpointConfigManager::init_one_endpoint(
return 0;
}
int EndpointConfigManager::init_one_variant(
const configure::VariantConf& conf, VariantInfo& var) {
int EndpointConfigManager::init_one_variant(const configure::VariantConf& conf,
VariantInfo& var) {
try {
// Connect
const configure::ConnectionConf& conn = conf.connection_conf();
PARSE_CONF_ITEM(conn, var.connection.tmo_conn,
connect_timeout_ms, -1);
PARSE_CONF_ITEM(conn, var.connection.tmo_rpc,
rpc_timeout_ms, -1);
PARSE_CONF_ITEM(conn, var.connection.tmo_hedge,
hedge_request_timeout_ms, -1);
PARSE_CONF_ITEM(conn, var.connection.cnt_retry_conn,
connect_retry_count, -1);
PARSE_CONF_ITEM(conn, var.connection.cnt_retry_hedge,
hedge_fetch_retry_count, -1);
PARSE_CONF_ITEM(conn, var.connection.cnt_maxconn_per_host,
max_connection_per_host, -1);
PARSE_CONF_ITEM(conn, var.connection.type_conn,
connection_type, -1);
PARSE_CONF_ITEM(conn, var.connection.tmo_conn, connect_timeout_ms, -1);
PARSE_CONF_ITEM(conn, var.connection.tmo_rpc, rpc_timeout_ms, -1);
PARSE_CONF_ITEM(
conn, var.connection.tmo_hedge, hedge_request_timeout_ms, -1);
PARSE_CONF_ITEM(
conn, var.connection.cnt_retry_conn, connect_retry_count, -1);
PARSE_CONF_ITEM(
conn, var.connection.cnt_retry_hedge, hedge_fetch_retry_count, -1);
PARSE_CONF_ITEM(
conn, var.connection.cnt_maxconn_per_host, max_connection_per_host, -1);
PARSE_CONF_ITEM(conn, var.connection.type_conn, connection_type, -1);
// Naming
const configure::NamingConf& name = conf.naming_conf();
PARSE_CONF_ITEM(name, var.naminginfo.cluster_naming,
cluster, -1);
PARSE_CONF_ITEM(name, var.naminginfo.load_balancer,
load_balance_strategy, -1);
PARSE_CONF_ITEM(name, var.naminginfo.cluster_filter,
cluster_filter_strategy, -1);
PARSE_CONF_ITEM(name, var.naminginfo.cluster_naming, cluster, -1);
PARSE_CONF_ITEM(
name, var.naminginfo.load_balancer, load_balance_strategy, -1);
PARSE_CONF_ITEM(
name, var.naminginfo.cluster_filter, cluster_filter_strategy, -1);
// Rpc
const configure::RpcParameter& params = conf.rpc_parameter();
PARSE_CONF_ITEM(params, var.parameters.protocol,
protocol, -1);
PARSE_CONF_ITEM(params, var.parameters.protocol, protocol, -1);
#if 1
LOG(WARNING) << var.parameters.protocol.value.c_str();
#endif
PARSE_CONF_ITEM(params, var.parameters.compress_type,
compress_type, -1);
PARSE_CONF_ITEM(params, var.parameters.package_size,
package_size, -1);
PARSE_CONF_ITEM(params, var.parameters.max_channel,
max_channel_per_request, -1);
PARSE_CONF_ITEM(params, var.parameters.compress_type, compress_type, -1);
PARSE_CONF_ITEM(params, var.parameters.package_size, package_size, -1);
PARSE_CONF_ITEM(
params, var.parameters.max_channel, max_channel_per_request, -1);
// Split
const configure::SplitConf& splits = conf.split_conf();
PARSE_CONF_ITEM(splits, var.splitinfo.split_tag,
split_tag_name, -1);
PARSE_CONF_ITEM(splits, var.splitinfo.tag_cands_str,
tag_candidates, -1);
PARSE_CONF_ITEM(splits, var.splitinfo.split_tag, split_tag_name, -1);
PARSE_CONF_ITEM(splits, var.splitinfo.tag_cands_str, tag_candidates, -1);
if (parse_tag_values(var.splitinfo) != 0) {
LOG(ERROR) << "Failed parse tag_values:" <<
var.splitinfo.tag_cands_str.value;
LOG(ERROR) << "Failed parse tag_values:"
<< var.splitinfo.tag_cands_str.value;
return -1;
}
// tag
PARSE_CONF_ITEM(conf, var.parameters.route_tag,
tag, -1);
PARSE_CONF_ITEM(conf, var.parameters.route_tag, tag, -1);
} catch (...) {
LOG(ERROR) << "Failed load variant from configure unit";
return -1;
......@@ -232,8 +208,7 @@ int EndpointConfigManager::init_one_variant(
return 0;
}
int EndpointConfigManager::merge_variant(
const VariantInfo& default_var,
int EndpointConfigManager::merge_variant(const VariantInfo& default_var,
const configure::VariantConf& conf,
VariantInfo& merged_var) {
merged_var = default_var;
......@@ -244,9 +219,7 @@ int EndpointConfigManager::merge_variant(
return init_one_variant(conf, merged_var);
}
int EndpointConfigManager::parse_tag_values(
SplitParameters& split) {
int EndpointConfigManager::parse_tag_values(SplitParameters& split) {
split.tag_values.clear();
if (!split.split_tag.init || !split.tag_cands_str.init) {
LOG(WARNING) << "split info not set, skip...";
......@@ -264,8 +237,7 @@ int EndpointConfigManager::parse_tag_values(
if (end_pos == std::string::npos) {
tag_value_str = tag_str.substr(start_pos);
} else {
tag_value_str = tag_str.substr(
start_pos, end_pos - start_pos);
tag_value_str = tag_str.substr(start_pos, end_pos - start_pos);
start_pos = end_pos + 1;
}
......@@ -275,8 +247,8 @@ int EndpointConfigManager::parse_tag_values(
return 0;
}
} // sdk_cpp
} // paddle_serving
} // baidu
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file endpoint.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 14:10:44
* @brief
*
**/
#include "endpoint.h"
#include "factory.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sdk-cpp/include/endpoint.h"
#include "sdk-cpp/include/factory.h"
namespace baidu {
namespace paddle_serving {
......@@ -36,8 +34,8 @@ int Endpoint::initialize(const EndpointInfo& ep_info) {
return -1;
}
_variant_list.push_back(var);
LOG(INFO) << "Succ create variant: " << vi << ", endpoint:"
<< _endpoint_name;
LOG(INFO) << "Succ create variant: " << vi
<< ", endpoint:" << _endpoint_name;
}
return 0;
......@@ -83,8 +81,7 @@ int Endpoint::thrd_finalize() {
}
// 带全流量分层实验路由信息
Predictor* Endpoint::get_predictor(
const void* params) {
Predictor* Endpoint::get_predictor(const void* params) {
Variant* var = NULL;
if (_variant_list.size() == 1) {
var = _variant_list[0];
......@@ -115,8 +112,7 @@ Predictor* Endpoint::get_predictor() {
int Endpoint::ret_predictor(Predictor* predictor) {
const Stub* stub = predictor->stub();
if (!stub || stub->return_predictor(
predictor) != 0) {
if (!stub || stub->return_predictor(predictor) != 0) {
LOG(ERROR) << "Failed return predictor to pool";
return -1;
}
......@@ -124,8 +120,6 @@ int Endpoint::ret_predictor(Predictor* predictor) {
return 0;
}
} // sdk_cpp
} // paddle_serving
} // baidu
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file src/predictor_api.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/09 17:36:13
* @brief
*
**/
#include "abtest.h"
#include "predictor_sdk.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sdk-cpp/include/predictor_sdk.h"
#include "sdk-cpp/include/abtest.h"
namespace baidu {
namespace paddle_serving {
......@@ -36,36 +37,31 @@ int PredictorApi::create(const char* path, const char* file) {
}
if (_config_manager.create(path, file) != 0) {
LOG(ERROR) << "Failed create config manager from conf:"
<< path << "/" << file;
LOG(ERROR) << "Failed create config manager from conf:" << path << "/"
<< file;
return -1;
}
const std::map<std::string, EndpointInfo>& map
= _config_manager.config();
const std::map<std::string, EndpointInfo>& map = _config_manager.config();
std::map<std::string, EndpointInfo>::const_iterator it;
for (it = map.begin(); it != map.end(); ++it) {
const EndpointInfo& ep_info = it->second;
Endpoint* ep = new (std::nothrow) Endpoint();
if (ep->initialize(ep_info) != 0) {
LOG(ERROR) << "Failed intialize endpoint:"
<< ep_info.endpoint_name;
LOG(ERROR) << "Failed intialize endpoint:" << ep_info.endpoint_name;
return -1;
}
if (_endpoints.find(
ep_info.endpoint_name) != _endpoints.end()) {
if (_endpoints.find(ep_info.endpoint_name) != _endpoints.end()) {
LOG(ERROR) << "Cannot insert duplicated endpoint:"
<< ep_info.endpoint_name;
return -1;
}
std::pair<std::map<std::string, Endpoint*>::iterator, bool> r
= _endpoints.insert(std::make_pair(
ep_info.endpoint_name, ep));
std::pair<std::map<std::string, Endpoint*>::iterator, bool> r =
_endpoints.insert(std::make_pair(ep_info.endpoint_name, ep));
if (!r.second) {
LOG(ERROR) << "Failed insert endpoint:"
<< ep_info.endpoint_name;
LOG(ERROR) << "Failed insert endpoint:" << ep_info.endpoint_name;
return -1;
}
......@@ -81,13 +77,11 @@ int PredictorApi::thrd_initialize() {
for (it = _endpoints.begin(); it != _endpoints.end(); ++it) {
Endpoint* ep = it->second;
if (ep->thrd_initialize() != 0) {
LOG(ERROR) << "Failed thrd initialize endpoint:"
<< it->first;
LOG(ERROR) << "Failed thrd initialize endpoint:" << it->first;
return -1;
}
LOG(WARNING) << "Succ thrd initialize endpoint:"
<< it->first;
LOG(WARNING) << "Succ thrd initialize endpoint:" << it->first;
}
return 0;
}
......@@ -97,13 +91,11 @@ int PredictorApi::thrd_clear() {
for (it = _endpoints.begin(); it != _endpoints.end(); ++it) {
Endpoint* ep = it->second;
if (ep->thrd_clear() != 0) {
LOG(ERROR) << "Failed thrd clear endpoint:"
<< it->first;
LOG(ERROR) << "Failed thrd clear endpoint:" << it->first;
return -1;
}
LOG(INFO) << "Succ thrd clear endpoint:"
<< it->first;
LOG(INFO) << "Succ thrd clear endpoint:" << it->first;
}
return 0;
}
......@@ -113,24 +105,17 @@ int PredictorApi::thrd_finalize() {
for (it = _endpoints.begin(); it != _endpoints.end(); ++it) {
Endpoint* ep = it->second;
if (ep->thrd_finalize() != 0) {
LOG(ERROR) << "Failed thrd finalize endpoint:"
<< it->first;
LOG(ERROR) << "Failed thrd finalize endpoint:" << it->first;
return -1;
}
LOG(INFO) << "Succ thrd finalize endpoint:"
<< it->first;
LOG(INFO) << "Succ thrd finalize endpoint:" << it->first;
}
return 0;
}
void PredictorApi::destroy() {
// TODO
return ;
}
} // sdk_cpp
} // paddle_serving
} // baidu
void PredictorApi::destroy() { return; }
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
/***************************************************************************
*
* Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
*
**************************************************************************/
/**
* @file src/variant.cpp
* @author wanlijin01(wanlijin01@baidu.com)
* @date 2018/07/27 17:42:21
* @brief
*
**/
#include "variant.h"
#include "factory.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sdk-cpp/include/variant.h"
#include "sdk-cpp/include/factory.h"
namespace baidu {
namespace paddle_serving {
......@@ -23,7 +21,6 @@ namespace sdk_cpp {
int Variant::initialize(const EndpointInfo& ep_info,
const VariantInfo& var_info) {
_endpoint_name = ep_info.endpoint_name;
_stub_service = ep_info.stub_service;
......@@ -33,11 +30,13 @@ int Variant::initialize(const EndpointInfo& ep_info,
const SplitParameters& split_info = var_info.splitinfo;
uint32_t tag_size = split_info.tag_values.size();
for (uint32_t ti = 0; ti < tag_size; ++ti) { // split
Stub* stub = StubFactory::instance().generate_object(
_stub_service);
Stub* stub = StubFactory::instance().generate_object(_stub_service);
const std::string& tag_value = split_info.tag_values[ti];
if (!stub || stub->initialize(var_info, ep_info.endpoint_name,
&split_info.split_tag.value, &tag_value) != 0) {
if (!stub ||
stub->initialize(var_info,
ep_info.endpoint_name,
&split_info.split_tag.value,
&tag_value) != 0) {
LOG(ERROR) << "Failed init stub from factory"
<< ", stub name: " << ep_info.stub_service
<< ", filter tag: " << tag_value;
......@@ -45,11 +44,9 @@ int Variant::initialize(const EndpointInfo& ep_info,
}
// 判重
std::map<std::string, Stub*>::iterator iter =
_stub_map.find(tag_value);
std::map<std::string, Stub*>::iterator iter = _stub_map.find(tag_value);
if (iter != _stub_map.end()) {
LOG(ERROR) << "duplicated tag value: "
<< tag_value;
LOG(ERROR) << "duplicated tag value: " << tag_value;
return -1;
}
_stub_map[tag_value] = stub;
......@@ -61,10 +58,8 @@ int Variant::initialize(const EndpointInfo& ep_info,
return 0;
}
Stub* stub = StubFactory::instance().generate_object(
ep_info.stub_service);
if (!stub || stub->initialize(
var_info, _endpoint_name, NULL, NULL) != 0) {
Stub* stub = StubFactory::instance().generate_object(ep_info.stub_service);
if (!stub || stub->initialize(var_info, _endpoint_name, NULL, NULL) != 0) {
LOG(ERROR) << "Failed init stub from factory"
<< ", stub name: " << ep_info.stub_service;
return -1;
......@@ -134,9 +129,7 @@ Predictor* Variant::get_predictor() {
return NULL;
}
Predictor* Variant::get_predictor(
const void* params) {
Predictor* Variant::get_predictor(const void* params) {
if (_default_stub) {
return _default_stub->fetch_predictor();
}
......@@ -144,8 +137,6 @@ Predictor* Variant::get_predictor(
return NULL;
}
} // sdk_cpp
} // paddle_serving
} // baidu
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
......@@ -20,5 +20,3 @@ install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/conf DESTINATION
${PADDLE_SERVING_INSTALL_DIR}/demo/serving/)
install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/data DESTINATION
${PADDLE_SERVING_INSTALL_DIR}/demo/serving/)
#include "op/reader_op.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "op/classify_op.h"
#include "framework/memory.h"
#include "framework/infer.h"
#include "framework/memory.h"
#include "op/reader_op.h"
namespace baidu {
namespace paddle_serving {
......@@ -36,8 +50,7 @@ int ClassifyOp::inference() {
// call paddle fluid model for inferencing
if (InferManager::instance().infer(
IMAGE_CLASSIFICATION_MODEL_NAME, in,
out, sample_size)) {
IMAGE_CLASSIFICATION_MODEL_NAME, in, out, sample_size)) {
LOG(ERROR) << "Failed do infer in fluid model: "
<< IMAGE_CLASSIFICATION_MODEL_NAME;
return -1;
......@@ -68,7 +81,7 @@ int ClassifyOp::inference() {
// assign output data
uint32_t data_size = out_tensor.data.length() / sizeof(float);
float* data = (float*)out_tensor.data.data();
float* data = reinterpret_cast<float*>(out_tensor.data.data());
for (uint32_t di = 0; di < data_size; ++di) {
ins->add_categories(data[di]);
}
......@@ -91,6 +104,6 @@ int ClassifyOp::inference() {
DEFINE_OP(ClassifyOp);
} // serving
} // paddle_serving
} // baidu
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_OP_IMAGE_CLASSIFY_OP_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_OP_IMAGE_CLASSIFY_OP_H
#include "builtin_format.pb.h"
#include "image_class.pb.h"
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <vector>
#include "paddle/fluid/inference/paddle_inference_api.h"
#include "serving/image_class.pb.h"
namespace baidu {
namespace paddle_serving {
namespace serving {
static const char* IMAGE_CLASSIFICATION_MODEL_NAME
= "image_classification_resnet";
static const char* IMAGE_CLASSIFICATION_MODEL_NAME =
"image_classification_resnet";
class ClassifyOp : public baidu::paddle_serving::predictor::OpWithChannel<
baidu::paddle_serving::predictor::image_classification::ClassifyResponse> {
public:
baidu::paddle_serving::predictor::image_classification::
ClassifyResponse> {
public:
typedef std::vector<paddle::PaddleTensor> TensorVector;
DECLARE_OP(ClassifyOp);
......@@ -25,8 +35,6 @@ public:
int inference();
};
} // serving
} // paddle_serving
} // baidu
#endif
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "op/common_echo_op.h"
namespace baidu {
......@@ -6,6 +20,6 @@ namespace predictor {
DEFINE_OP(CommonEchoOp);
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PREDICTOR_PREDICTOR_COMMON_ECHO_OP_H
#define BAIDU_PREDICTOR_PREDICTOR_COMMON_ECHO_OP_H
#include "echo_service.pb.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "serving/echo_service.pb.h"
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
#include "op/op.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class CommonEchoOp : public OpWithChannel<
class CommonEchoOp
: public OpWithChannel<
baidu::paddle_serving::predictor::echo_service::RequestAndResponse> {
public:
public:
typedef baidu::paddle_serving::predictor::echo_service::RequestAndResponse
RequestAndResponse;
DECLARE_OP(CommonEchoOp);
int inference() {
const RequestAndResponse* req = dynamic_cast<const RequestAndResponse*>(
get_request_message());
const RequestAndResponse* req =
dynamic_cast<const RequestAndResponse*>(get_request_message());
RequestAndResponse* data = mutable_data<RequestAndResponse>();
......@@ -33,8 +45,6 @@ public:
}
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "op/dense_echo_op.h"
namespace baidu {
......@@ -9,15 +23,12 @@ using baidu::paddle_serving::predictor::dense_service::Request;
using baidu::paddle_serving::predictor::dense_service::Response;
int DenseEchoOp::inference() {
const Request* req =
dynamic_cast<const Request*>(get_request_message());
const Request* req = dynamic_cast<const Request*>(get_request_message());
Response* res = mutable_data<Response>();
LOG(INFO) << "Receive request in dense service:"
<< req->ShortDebugString();
LOG(INFO) << "Receive request in dense service:" << req->ShortDebugString();
uint32_t sample_size = req->instances_size();
for (uint32_t si = 0; si < sample_size; si++) {
DensePrediction* dense_res =
res->mutable_predictions()->Add();
DensePrediction* dense_res = res->mutable_predictions()->Add();
dense_res->add_categories(100.0 + si * 0.1);
dense_res->add_categories(200.0 + si * 0.1);
}
......@@ -26,6 +37,6 @@ int DenseEchoOp::inference() {
DEFINE_OP(DenseEchoOp);
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SSERVER_PREDICTOR_OP_DENSE_ECHO_OP_H
#define BAIDU_PADDLE_SSERVER_PREDICTOR_OP_DENSE_ECHO_OP_H
#include "dense_service.pb.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "serving/dense_service.pb.h"
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
#include "op/op.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class DenseEchoOp : public OpWithChannel<
class DenseEchoOp
: public OpWithChannel<
baidu::paddle_serving::predictor::dense_service::Response> {
public:
public:
DECLARE_OP(DenseEchoOp);
int inference();
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "op/int64tensor_echo_op.h"
namespace baidu {
......@@ -9,11 +23,9 @@ using baidu::paddle_serving::predictor::int64tensor_service::Request;
using baidu::paddle_serving::predictor::int64tensor_service::Response;
int Int64TensorEchoOp::inference() {
const Request* req =
dynamic_cast<const Request*>(get_request_message());
const Request* req = dynamic_cast<const Request*>(get_request_message());
Response* res = mutable_data<Response>();
LOG(INFO) << "Receive request in dense service:"
<< req->ShortDebugString();
LOG(INFO) << "Receive request in dense service:" << req->ShortDebugString();
uint32_t sample_size = req->instances_size();
for (uint32_t si = 0; si < sample_size; si++) {
Float32TensorPredictor* float32_tensor_res =
......@@ -28,6 +40,6 @@ int Int64TensorEchoOp::inference() {
DEFINE_OP(Int64TensorEchoOp);
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SSERVER_PREDICTOR_OP_DENSE_ECHO_OP_H
#define BAIDU_PADDLE_SSERVER_PREDICTOR_OP_DENSE_ECHO_OP_H
#include "int64tensor_service.pb.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "serving/int64tensor_service.pb.h"
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
#include "op/op.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class Int64TensorEchoOp : public OpWithChannel<
class Int64TensorEchoOp
: public OpWithChannel<
baidu::paddle_serving::predictor::int64tensor_service::Response> {
public:
public:
DECLARE_OP(Int64TensorEchoOp);
int inference();
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "op/reader_op.h"
#include <algorithm>
#include "framework/memory.h"
namespace baidu {
......@@ -10,10 +25,8 @@ using baidu::paddle_serving::predictor::format::XImageReqInstance;
using baidu::paddle_serving::predictor::image_classification::Request;
int ReaderOp::inference() {
const Request* req =
dynamic_cast<const Request*>(get_request_message());
LOG(INFO) << "Receive request in dense service:"
<< req->ShortDebugString();
const Request* req = dynamic_cast<const Request*>(get_request_message());
LOG(INFO) << "Receive request in dense service:" << req->ShortDebugString();
ReaderOutput* res = mutable_data<ReaderOutput>();
if (!res) {
......@@ -28,10 +41,9 @@ int ReaderOp::inference() {
return -1;
}
// TODO pmeans/scales/isize/enable_crop should be configurable.
// TODO(xxx) pmeans/scales/isize/enable_crop should be configurable.
float pmean[3] = {0.485 * 255, 0.456 * 255, 0.406 * 255};
float scale[3] = { 1 / (0.229 * 255), 1 / (0.224 * 255), \
1 / (0.225 * 255)};
float scale[3] = {1 / (0.229 * 255), 1 / (0.224 * 255), 1 / (0.225 * 255)};
size_t iresize[] = {244, 244}; // row, column
bool enable_crop = true;
......@@ -53,7 +65,7 @@ int ReaderOp::inference() {
_image_vec_tmp.clear();
_image_vec_tmp.assign(binary, binary + length);
_image_8u_tmp = cv::imdecode(cv::Mat(_image_vec_tmp),
CV_LOAD_IMAGE_COLOR/*1*/); // in B/G/R order.
CV_LOAD_IMAGE_COLOR /*1*/); // in B/G/R order.
if (_image_8u_tmp.data == NULL) {
LOG(ERROR) << "Image decode failed!";
return -1;
......@@ -65,27 +77,24 @@ int ReaderOp::inference() {
const int CC = _image_8u_tmp.channels();
// resize/crop
if (_image_8u_tmp.cols != resize.width
|| _image_8u_tmp.rows != resize.height) {
int short_egde = std::min<int>(
_image_8u_tmp.cols, _image_8u_tmp.rows);
int yy = int((_image_8u_tmp.rows - short_egde) / 2);
int xx = int((_image_8u_tmp.cols - short_egde) / 2);
_image_8u_tmp = cv::Mat(_image_8u_tmp,
cv::Rect(xx, yy, short_egde, short_egde));
if (_image_8u_tmp.cols != resize.width
|| _image_8u_tmp.rows != resize.height) {
if (_image_8u_tmp.cols != resize.width ||
_image_8u_tmp.rows != resize.height) {
int short_egde = std::min<int>(_image_8u_tmp.cols, _image_8u_tmp.rows);
int yy = static_cast<int>((_image_8u_tmp.rows - short_egde) / 2);
int xx = static_cast<int>((_image_8u_tmp.cols - short_egde) / 2);
_image_8u_tmp =
cv::Mat(_image_8u_tmp, cv::Rect(xx, yy, short_egde, short_egde));
if (_image_8u_tmp.cols != resize.width ||
_image_8u_tmp.rows != resize.height) {
cv::Mat resize_image;
cv::resize(_image_8u_tmp, resize_image, resize);
_image_8u_tmp = resize_image;
}
LOG(INFO) << "Succ crop one image[CHW="
<< _image_8u_tmp.channels() << ", "
<< _image_8u_tmp.cols << ", "
<< _image_8u_tmp.rows << "]"
<< " from image[CHW=" << CC << ", "
<< HH << ", " << WW << "]";
LOG(INFO) << "Succ crop one image[CHW=" << _image_8u_tmp.channels()
<< ", " << _image_8u_tmp.cols << ", " << _image_8u_tmp.rows
<< "]"
<< " from image[CHW=" << CC << ", " << HH << ", " << WW << "]";
}
// BGR->RGB transformer
......@@ -109,12 +118,13 @@ int ReaderOp::inference() {
in_tensor.shape.push_back(W);
in_tensor.shape.push_back(H);
LOG(INFO) << "Succ read one image, C: " << C
<< ", W: " << W << ", H: " << H;
LOG(INFO) << "Succ read one image, C: " << C << ", W: " << W
<< ", H: " << H;
// tls resource assignment
size_t len = dense_capacity * sizeof(float);
float* data = (float*) MempoolWrapper::instance().malloc(len);
float* data =
reinterpret_cast<float*>(MempoolWrapper::instance().malloc(len));
if (data == NULL) {
LOG(ERROR) << "Failed create temp float array, "
<< "size=" << dense_capacity;
......@@ -123,12 +133,11 @@ int ReaderOp::inference() {
for (int h = 0; h < H; h++) {
// p points to a new line
unsigned char* p = _image_8u_rgb.ptr < unsigned char>(h);
unsigned char* p = _image_8u_rgb.ptr<unsigned char>(h);
for (int w = 0; w < W; w++) {
for (int c = 0; c < C; c++) {
// HWC(row,column,channel) -> CWH
data[W * H * c + W * h + w] =
(p[C * w + c] - pmean[c]) * scale[c];
data[W * H * c + W * h + w] = (p[C * w + c] - pmean[c]) * scale[c];
}
}
}
......@@ -144,6 +153,6 @@ int ReaderOp::inference() {
DEFINE_OP(ReaderOp);
} // serving
} // paddle_serving
} // baidu
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_SERVING_OP_READER_OP_H
#define BAIDU_PADDLE_SERVING_SERVING_OP_READER_OP_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "builtin_format.pb.h"
#include "image_class.pb.h"
#pragma once
#include <string>
#include <vector>
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
#include "op/op.h"
#include "predictor/builtin_format.pb.h"
#include "serving/image_class.pb.h"
// opencv
#include "opencv/cv.h"
#include "opencv/highgui.h"
#include "opencv/cxcore.h"
#include "opencv/cv.hpp"
#include "opencv/cxcore.h"
#include "opencv/highgui.h"
#include "paddle/fluid/inference/paddle_inference_api.h"
......@@ -31,28 +45,24 @@ struct ReaderOutput {
tensors.clear();
}
std::string ShortDebugString() const {
return "Not implemented!";
}
std::string ShortDebugString() const { return "Not implemented!"; }
};
class ReaderOp : public baidu::paddle_serving::predictor::OpWithChannel<
ReaderOutput> {
public:
class ReaderOp
: public baidu::paddle_serving::predictor::OpWithChannel<ReaderOutput> {
public:
typedef std::vector<paddle::PaddleTensor> TensorVector;
DECLARE_OP(ReaderOp);
int inference();
private:
private:
cv::Mat _image_8u_tmp;
cv::Mat _image_8u_rgb;
std::vector<char> _image_vec_tmp;
};
} // serving
} // paddle_serving
} // baidu
#endif
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "op/sparse_echo_op.h"
namespace baidu {
......@@ -7,8 +21,7 @@ namespace predictor {
int SparseEchoOp::inference() {
// Every op can obtain request message by:
// get_request_message()
const Request* req =
dynamic_cast<const Request*>(get_request_message());
const Request* req = dynamic_cast<const Request*>(get_request_message());
// Each op can obtain self-writable-data by:
// mutable_data()
......@@ -18,13 +31,10 @@ int SparseEchoOp::inference() {
// get/mutable_depend_argment()
// ...
LOG(INFO)
<< "Receive request in sparse service:"
<< req->ShortDebugString();
LOG(INFO) << "Receive request in sparse service:" << req->ShortDebugString();
uint32_t sample_size = req->instances_size();
for (uint32_t si = 0; si < sample_size; si++) {
SparsePrediction* sparse_res =
res->mutable_predictions()->Add();
SparsePrediction* sparse_res = res->mutable_predictions()->Add();
sparse_res->add_categories(100.0 + si * 0.1);
sparse_res->add_categories(200.0 + si * 0.1);
}
......@@ -33,6 +43,6 @@ int SparseEchoOp::inference() {
DEFINE_OP(SparseEchoOp);
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_OP_SPARSE_ECHO_OP_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_OP_SPARSE_ECHO_OP_H
#include "sparse_service.pb.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "serving/sparse_service.pb.h"
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
#include "op/op.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class SparseEchoOp : public OpWithChannel<
class SparseEchoOp
: public OpWithChannel<
baidu::paddle_serving::predictor::sparse_service::Response> {
public:
public:
DECLARE_OP(SparseEchoOp);
typedef baidu::paddle_serving::predictor::sparse_service::Request Request;
......@@ -26,8 +38,6 @@ public:
int inference();
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#include "json2pb/pb_to_json.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <google/protobuf/text_format.h>
#include <string>
#include "json2pb/pb_to_json.h"
#include "op/write_json_op.h"
#include "framework/memory.h"
#include "op/write_json_op.h"
namespace baidu {
namespace paddle_serving {
......@@ -38,8 +53,8 @@ int WriteJsonOp::inference() {
return -1;
}
std::string* text = ins->mutable_response_json();
if (!json2pb::ProtoMessageToJson(classify_out->predictions(si),
text, &err_string)) {
if (!json2pb::ProtoMessageToJson(
classify_out->predictions(si), text, &err_string)) {
LOG(ERROR) << "Failed convert message["
<< classify_out->predictions(si).ShortDebugString()
<< "], err: " << err_string;
......@@ -47,14 +62,13 @@ int WriteJsonOp::inference() {
}
}
LOG(INFO) << "Succ write json:"
<< classify_out->ShortDebugString();
LOG(INFO) << "Succ write json:" << classify_out->ShortDebugString();
return 0;
}
DEFINE_OP(WriteJsonOp);
} // predictor
} // paddle_serving
} // baidu
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_OP_WRITE_JSON_OP_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_OP_WRITE_JSON_OP_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "builtin_format.pb.h"
#include "image_class.pb.h"
#pragma once
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
#include "op/op.h"
#include "serving/image_class.pb.h"
namespace baidu {
namespace paddle_serving {
namespace predictor {
class WriteJsonOp : public OpWithChannel<
class WriteJsonOp
: public OpWithChannel<
baidu::paddle_serving::predictor::image_classification::Response> {
public:
public:
DECLARE_OP(WriteJsonOp);
int inference();
};
} // predictor
} // paddle_serving
} // baidu
#endif
} // namespace predictor
} // namespace paddle_serving
} // namespace baidu
#include "json2pb/pb_to_json.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <google/protobuf/text_format.h>
#include <string>
#include "json2pb/pb_to_json.h"
#include "op/write_op.h"
#include "framework/memory.h"
#include "op/write_op.h"
namespace baidu {
namespace paddle_serving {
......@@ -38,8 +53,8 @@ int WriteOp::inference() {
return -1;
}
std::string* text = ins->mutable_response_json();
if (!json2pb::ProtoMessageToJson(classify_out->predictions(si),
text, &err_string)) {
if (!json2pb::ProtoMessageToJson(
classify_out->predictions(si), text, &err_string)) {
LOG(ERROR) << "Failed convert message["
<< classify_out->predictions(si).ShortDebugString()
<< "], err: " << err_string;
......@@ -47,14 +62,13 @@ int WriteOp::inference() {
}
}
LOG(INFO) << "Succ write json:"
<< classify_out->ShortDebugString();
LOG(INFO) << "Succ write json:" << classify_out->ShortDebugString();
return 0;
}
DEFINE_OP(WriteOp);
} // predictor
} // paddle_serving
} // baidu
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
#ifndef BAIDU_PADDLE_SERVING_PREDICTOR_OP_WRITE_OP_H
#define BAIDU_PADDLE_SERVING_PREDICTOR_OP_WRITE_OP_H
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "builtin_format.pb.h"
#include "image_class.pb.h"
#pragma once
#include "common/inner_common.h"
#include "op/op.h"
#include "framework/channel.h"
#include "framework/op_repository.h"
#include "op/op.h"
#include "predictor/builtin_format.pb.h"
#include "serving/image_class.pb.h"
namespace baidu {
namespace paddle_serving {
namespace serving {
class WriteOp : public baidu::paddle_serving::predictor::OpWithChannel<
class WriteOp
: public baidu::paddle_serving::predictor::OpWithChannel<
baidu::paddle_serving::predictor::image_classification::Response> {
public:
public:
DECLARE_OP(WriteOp);
int inference();
};
} // serving
} // paddle_serving
} // baidu
#endif
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
package baidu.paddle_serving.predictor.format;
// dense format
message DenseInstance {
repeated float features = 1;
};
message DenseInstance { repeated float features = 1; };
message DensePrediction {
repeated float categories = 1;
};
message DensePrediction { repeated float categories = 1; };
// sparse format
message SparseInstance {
......@@ -18,9 +28,7 @@ message SparseInstance {
repeated float values = 3;
};
message SparsePrediction {
repeated float categories = 1;
};
message SparsePrediction { repeated float categories = 1; };
// int64-tensor format
message Int64TensorInstance {
......@@ -39,9 +47,7 @@ message XImageReqInstance {
required uint32 image_length = 2;
};
message XImageResInstance {
required string response_json = 1;
};
message XImageResInstance { required string response_json = 1; };
// x-record format
message XRecordInstance {
......
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
import "builtin_format.proto";
package baidu.paddle_serving.predictor.dense_service;
......@@ -10,7 +24,8 @@ message Request {
};
message Response {
repeated baidu.paddle_serving.predictor.format.DensePrediction predictions = 1;
repeated baidu.paddle_serving.predictor.format.DensePrediction predictions =
1;
};
service BuiltinDenseFormatService {
......
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
package baidu.paddle_serving.predictor.echo_service;
......
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
import "builtin_format.proto";
package baidu.paddle_serving.predictor.image_classification;
......@@ -6,16 +20,19 @@ package baidu.paddle_serving.predictor.image_classification;
option cc_generic_services = true;
message ClassifyResponse {
repeated baidu.paddle_serving.predictor.format.DensePrediction predictions = 1;
repeated baidu.paddle_serving.predictor.format.DensePrediction predictions =
1;
};
message Request {
repeated baidu.paddle_serving.predictor.format.XImageReqInstance instances = 1;
repeated baidu.paddle_serving.predictor.format.XImageReqInstance instances =
1;
};
message Response {
// Each json string is serialized from ClassifyResponse predictions
repeated baidu.paddle_serving.predictor.format.XImageResInstance predictions = 1;
repeated baidu.paddle_serving.predictor.format.XImageResInstance predictions =
1;
};
service ImageClassifyService {
......
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
import "builtin_format.proto";
package baidu.paddle_serving.predictor.int64tensor_service;
......@@ -6,8 +20,8 @@ package baidu.paddle_serving.predictor.int64tensor_service;
option cc_generic_services = true;
message Request {
repeated baidu.paddle_serving.predictor.format.Int64TensorInstance
instances = 1;
repeated baidu.paddle_serving.predictor.format.Int64TensorInstance instances =
1;
};
message Response {
......
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
package baidu.paddle_serving.predictor.native_tensor;
......@@ -25,13 +39,9 @@ message DenseTensor {
repeated uint64 uint64_data = 9;
};
message DenseRequest {
repeated DenseTensor tensors = 1;
};
message DenseRequest { repeated DenseTensor tensors = 1; };
message DenseResponse {
repeated DenseTensor tensors = 1;
};
message DenseResponse { repeated DenseTensor tensors = 1; };
service BuiltinDenseFormatService {
rpc inference(DenseRequest) returns (DenseResponse);
......@@ -52,13 +62,9 @@ message SparseTensor {
repeated uint64 uint64_data = 10;
};
message SparseRequest {
repeated SparseTensor tensors = 1;
};
message SparseRequest { repeated SparseTensor tensors = 1; };
message SparseResponse {
repeated SparseTensor tensors = 1;
};
message SparseResponse { repeated SparseTensor tensors = 1; };
service BuiltinSparseFormatService {
rpc inference(SparseRequest) returns (SparseResponse);
......
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "google/protobuf/descriptor.proto";
package pds;
extend google.protobuf.FieldOptions {
optional bool pack_on = 70000 [default=false];
optional bool pack_on = 70000 [ default = false ];
};
extend google.protobuf.ServiceOptions {
......@@ -11,6 +25,6 @@ extend google.protobuf.ServiceOptions {
};
message PaddleServiceOption {
optional bool generate_impl = 1 [default = false];
optional bool generate_stub = 2 [default = false];
optional bool generate_impl = 1 [ default = false ];
optional bool generate_stub = 2 [ default = false ];
};
syntax="proto2";
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
import "pds_option.proto";
import "builtin_format.proto";
package baidu.paddle_serving.predictor.sparse_service;
......@@ -10,7 +24,8 @@ message Request {
};
message Response {
repeated baidu.paddle_serving.predictor.format.SparsePrediction predictions = 1;
repeated baidu.paddle_serving.predictor.format.SparsePrediction predictions =
1;
};
service BuiltinSparseFormatService {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册