提交 d457e5a5 编写于 作者: W wangguibao

20190222

Change-Id: Iadcfd4a3c92f2b91dd6fbcb5fa81f01f605c9ec2
上级 2ed3094a
......@@ -101,6 +101,7 @@ add_subdirectory(bsl)
add_subdirectory(ullib)
add_subdirectory(spreg)
add_subdirectory(configure)
add_subdirectory(proto_configure)
add_subdirectory(mempool)
add_subdirectory(predictor)
add_subdirectory(inferencer-fluid-cpu)
......
......@@ -17,49 +17,59 @@ set_source_files_properties(
PROPERTIES
COMPILE_FLAGS "-Wno-strict-aliasing -Wno-unused-variable -Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor")
add_dependencies(pdserving protobuf boost brpc leveldb bsl pdcodegen configure
proto_configure
ullib spreg mempool)
target_include_directories(pdserving PUBLIC
${CMAKE_CURRENT_LIST_DIR}/
${CMAKE_CURRENT_BINARY_DIR}/
${CMAKE_CURRENT_BINARY_DIR}/../proto_configure
${CMAKE_CURRENT_LIST_DIR}/../configure
${CMAKE_CURRENT_LIST_DIR}/../proto_configure/include
${CMAKE_CURRENT_LIST_DIR}/../mempool
${CMAKE_CURRENT_LIST_DIR}/../spreg
${CMAKE_CURRENT_LIST_DIR}/../ullib/include
${CMAKE_CURRENT_BINARY_DIR}/../bsl/include)
target_link_libraries(pdserving brpc protobuf boost leveldb bsl
configure ullib spreg mempool -lpthread -lcrypto -lm -lrt -lssl -ldl -lz)
target_link_libraries(pdserving
brpc protobuf boost leveldb bsl configure proto_configure ullib spreg
mempool -lpthread -lcrypto -lm -lrt -lssl -ldl -lz)
add_executable(pdserving_exe ${pdserving_srcs})
set_source_files_properties(
${pdserving_srcs}
PROPERTIES
COMPILE_FLAGS "-Wno-strict-aliasing -Wno-unused-variable -Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor")
add_dependencies(pdserving_exe protobuf boost brpc leveldb bsl pdcodegen
configure ullib spreg mempool)
add_dependencies(pdserving_exe
protobuf boost brpc leveldb bsl pdcodegen configure proto_configure
ullib spreg mempool)
target_include_directories(pdserving_exe PUBLIC
${CMAKE_CURRENT_LIST_DIR}/
${CMAKE_CURRENT_BINARY_DIR}/
${CMAKE_CURRENT_BINARY_DIR}/../proto_configure
${CMAKE_CURRENT_LIST_DIR}/../configure
${CMAKE_CURRENT_LIST_DIR}/../proto_configure/include
${CMAKE_CURRENT_LIST_DIR}/../mempool
${CMAKE_CURRENT_LIST_DIR}/../spreg
${CMAKE_CURRENT_LIST_DIR}/../ullib/include
${CMAKE_CURRENT_BINARY_DIR}/../bsl/include)
target_link_libraries(pdserving_exe brpc protobuf leveldb bsl configure ullib
spreg mempool -lpthread -lcrypto -lm -lrt -lssl -ldl -lz)
target_link_libraries(pdserving_exe brpc protobuf leveldb bsl configure
proto_configure ullib spreg mempool -lpthread -lcrypto -lm -lrt -lssl
-ldl -lz)
add_library(pdclient ${pdclient_srcs})
set_source_files_properties(
${pdclient_srcs}
PROPERTIES
COMPILE_FLAGS "-Wno-strict-aliasing -Wno-unused-variable -Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor")
add_dependencies(pdclient protobuf boost brpc pdcodegen)
add_dependencies(pdclient protobuf boost brpc pdcodegen proto_configure)
target_include_directories(pdclient PUBLIC
${CMAKE_CURRENT_LIST_DIR}/
${CMAKE_CURRENT_BINARY_DIR}/
${CMAKE_CURRENT_BINARY_DIR}/../proto_configure
${CMAKE_CURRENT_LIST_DIR}/../configure
${CMAKE_CURRENT_LIST_DIR}/../proto_configure/include
${CMAKE_CURRENT_LIST_DIR}/../mempool
${CMAKE_CURRENT_LIST_DIR}/../spreg
${CMAKE_CURRENT_LIST_DIR}/../ullib/include
......
......@@ -9,24 +9,18 @@ DEFINE_int32(el_log_level, 16, "");
DEFINE_int32(idle_timeout_s, 16, "");
DEFINE_int32(port, 8010, "");
DEFINE_string(workflow_path, "./conf", "");
DEFINE_string(workflow_file, "workflow.conf", "");
DEFINE_string(workflow_file, "workflow.prototxt", "");
DEFINE_string(inferservice_path, "./conf", "");
DEFINE_string(inferservice_file, "service.conf", "");
DEFINE_string(inferservice_file, "service.prototxt", "");
DEFINE_string(logger_path, "./conf", "");
DEFINE_string(logger_file, "log.conf", "");
DEFINE_string(resource_path, "./conf", "");
DEFINE_string(resource_file, "resource.conf", "");
DEFINE_bool(enable_yacl, false, "enable yacl");
DEFINE_string(yacl_module_name, "predictor", "yacl module name");
DEFINE_string(yacl_param_dump_file, "./data/yacl_param_list.txt", "yacl param dump file path");
DEFINE_bool(enable_mc_cache, false, "enable mc cache");
DEFINE_bool(enable_nshead_protocol, false, "enable nshead protocol in server side");
DEFINE_string(nshead_protocol, "itp", "type of nshead protocol, support itp, nova_pbrpc, public_pbrpc, nshead_mcpack");
DEFINE_string(resource_file, "resource.prototxt", "");
DEFINE_int32(max_concurrency, 0, "Limit of request processing in parallel, 0: unlimited");
DEFINE_int32(num_threads, 0, "Number of pthreads that server runs on, not change if this value <= 0");
DEFINE_int32(reload_interval_s, 10, "");
DEFINE_bool(enable_model_toolkit, false, "enable model toolkit");
DEFINE_string(enable_protocol_list, "baidu_std nshead", "set protocol list");
DEFINE_string(enable_protocol_list, "baidu_std", "set protocol list");
const char* START_OP_NAME = "startup_op";
} // predictor
......
......@@ -26,7 +26,9 @@
#include <error.h>
#include "Configure.h"
// #include <comlog/comlog.h>
#include "configure.pb.h"
#include "configure_parser.h"
#include "common/utils.h"
#include "common/types.h"
......
......@@ -13,12 +13,14 @@ namespace baidu {
namespace paddle_serving {
namespace predictor {
using configure::ModelToolkitConf;
class InferEngine {
public:
virtual ~InferEngine() {}
virtual int proc_initialize(const comcfg::ConfigUnit& conf, bool version) {
virtual int proc_initialize(const configure::EngineDesc& conf, bool version) {
return proc_initialize_impl(conf, version);
}
virtual int proc_finalize() {
......@@ -43,7 +45,7 @@ public:
// begin: framework inner call
virtual int proc_initialize_impl(
const comcfg::ConfigUnit& conf, bool version) = 0;
const configure::EngineDesc& conf, bool version) = 0;
virtual int thrd_initialize_impl() = 0;
virtual int thrd_finalize_impl() = 0;
virtual int thrd_clear_impl() = 0;
......@@ -68,13 +70,13 @@ public:
virtual int load(const std::string& data_path) = 0;
int proc_initialize_impl(const comcfg::ConfigUnit& conf, bool version) {
_reload_tag_file = conf["ReloadableMeta"].to_cstr();
_reload_mode_tag = conf["ReloadableType"].to_cstr();
_model_data_path = conf["ModelDataPath"].to_cstr();
_infer_thread_num = conf["RuntimeThreadNum"].to_uint32();
_infer_batch_size = conf["BatchInferSize"].to_uint32();
_infer_batch_align = conf["EnableBatchAlign"].to_uint32();
int proc_initialize_impl(const configure::EngineDesc& conf, bool version) {
_reload_tag_file = conf.reloadable_meta();
_reload_mode_tag = conf.reloadable_type();
_model_data_path = conf.model_data_path();
_infer_thread_num = conf.runtime_thread_num();
_infer_batch_size = conf.batch_infer_size();
_infer_batch_align = conf.enable_batch_align();
if (!check_need_reload() || load(_model_data_path) != 0) {
LOG(FATAL) << "Failed load model_data_path" << _model_data_path;
return -1;
......@@ -89,7 +91,7 @@ public:
return 0;
}
int proc_initialize(const comcfg::ConfigUnit& conf, bool version) {
int proc_initialize(const configure::EngineDesc& conf, bool version) {
if (proc_initialize_impl(conf, version) != 0) {
LOG(FATAL) << "Failed proc initialize impl";
return -1;
......@@ -178,10 +180,10 @@ public:
}
private:
int parse_version_info(const comcfg::ConfigUnit& config, bool version) {
int parse_version_info(const configure::EngineDesc& config, bool version) {
try {
std::string version_file = config["VersionFile"].to_cstr();
std::string version_type = config["VersionType"].to_cstr();
std::string version_file = config.version_file();
std::string version_type = config.version_type();
if (version_type == "abacus_version") {
if (parse_abacus_version(version_file) != 0) {
......@@ -387,7 +389,7 @@ class DBReloadableInferEngine : public ReloadableInferEngine {
public:
virtual ~DBReloadableInferEngine() {}
int proc_initialize(const comcfg::ConfigUnit& conf, bool version) {
int proc_initialize(const configure::EngineDesc& conf, bool version) {
THREAD_KEY_CREATE(&_skey, NULL);
THREAD_MUTEX_INIT(&_mutex, NULL);
return ReloadableInferEngine::proc_initialize(conf, version);
......@@ -486,7 +488,7 @@ class CloneDBReloadableInferEngine : public DBReloadableInferEngine<EngineCore>
public:
virtual ~CloneDBReloadableInferEngine() {}
virtual int proc_initialize(const comcfg::ConfigUnit& conf, bool version) {
virtual int proc_initialize(const configure::EngineDesc& conf, bool version) {
_pd = new (std::nothrow) ModelData<EngineCore>;
if (!_pd) {
LOG(FATAL) << "Failed to allocate for ProcData";
......@@ -754,30 +756,30 @@ public:
}
~VersionedInferEngine() {}
int proc_initialize(const comcfg::ConfigUnit& conf) {
size_t version_num = conf["Version"].size();
int proc_initialize(const configure::VersionedEngine& conf) {
size_t version_num = conf.versions_size();
for (size_t vi = 0; vi < version_num; ++vi) {
if (proc_initialize(conf["Version"][vi], true) != 0) {
if (proc_initialize(conf.versions(vi), true) != 0) {
LOG(FATAL) << "Failed proc initialize version: "
<< vi << ", model: " << conf["Name"].to_cstr();
<< vi << ", model: " << conf.name().c_str();
return -1;
}
}
if (version_num == 0) {
if (proc_initialize(conf, false) != 0) {
if (proc_initialize(conf.default_version(), false) != 0) {
LOG(FATAL) << "Failed proc intialize engine: "
<< conf["Name"].to_cstr();
<< conf.name().c_str();
return -1;
}
}
LOG(WARNING)
<< "Succ proc initialize engine: " << conf["Name"].to_cstr();
<< "Succ proc initialize engine: " << conf.name().c_str();
return 0;
}
int proc_initialize(const comcfg::ConfigUnit& conf, bool version) {
std::string engine_type = conf["Type"].to_cstr();
int proc_initialize(const configure::EngineDesc& conf, bool version) {
std::string engine_type = conf.type();
InferEngine* engine
= StaticInferFactory::instance().generate_object(
engine_type);
......@@ -938,7 +940,7 @@ public:
}
// --
int proc_initialize_impl(const comcfg::ConfigUnit& conf, bool) { return -1; }
int proc_initialize_impl(const configure::EngineDesc& conf, bool) { return -1; }
int thrd_initialize_impl() { return -1; }
int thrd_finalize_impl() { return -1; }
int thrd_clear_impl() { return -1; }
......@@ -958,23 +960,23 @@ public:
}
int proc_initialize(const char* path, const char* file) {
comcfg::Configure conf;
if (conf.load(path, file) != 0) {
LOG(FATAL) << "failed load infer config, path:"
ModelToolkitConf model_toolkit_conf;
if (configure::read_proto_conf(path, file, &model_toolkit_conf) != 0) {
LOG(FATAL) << "failed load infer config, path: "
<< path << "/" << file;
return -1;
}
size_t engine_num = conf["Engine"].size();
size_t engine_num = model_toolkit_conf.engines_size();
for (size_t ei = 0; ei < engine_num; ++ei) {
std::string engine_name = conf["Engine"][ei]["Name"].to_cstr();
std::string engine_name = model_toolkit_conf.engines(ei).name();
VersionedInferEngine* engine = new (std::nothrow) VersionedInferEngine();
if (!engine) {
LOG(FATAL) << "Failed generate versioned engine: " << engine_name;
return -1;
}
if (engine->proc_initialize(conf["Engine"][ei]) != 0) {
if (engine->proc_initialize(model_toolkit_conf.engines(ei)) != 0) {
LOG(FATAL) << "Failed initialize version engine, name:"
<< engine_name;
return -1;
......
......@@ -10,6 +10,8 @@ namespace baidu {
namespace paddle_serving {
namespace predictor {
using configure::WorkflowConf;
class Workflow;
//class InferService;
//class ParallelInferService;
......@@ -37,11 +39,9 @@ public:
}
int initialize(const std::string path, const std::string file) {
comcfg::Configure conf;
if (conf.load(path.c_str(), file.c_str()) != 0) {
LOG(FATAL)
<< "Failed load manager<" << typeid(T).name()
<< "> configure!";
WorkflowConf workflow_conf;
if (configure::read_proto_conf(path, file, &workflow_conf) != 0) {
LOG(FATAL) << "Failed load manager<" << typeid<T>.name() << "> configure!";
return -1;
}
......
......@@ -6,6 +6,8 @@ namespace baidu {
namespace paddle_serving {
namespace predictor {
using configure::ResourceConf;
// __thread bool p_thread_initialized = false;
static void dynamic_resource_deleter(void* d) {
......@@ -28,8 +30,8 @@ int DynamicResource::clear() {
}
int Resource::initialize(const std::string& path, const std::string& file) {
comcfg::Configure conf;
if (conf.load(path.c_str(), file.c_str()) != 0) {
ResourceConf resource_conf;
if (configure::read_proto_conf(path, file, &resource_conf) != 0) {
LOG(ERROR) << "Failed initialize resource from: "
<< path << "/" << file;
return -1;
......@@ -44,13 +46,13 @@ int Resource::initialize(const std::string& path, const std::string& file) {
if (FLAGS_enable_model_toolkit) {
int err = 0;
std::string model_toolkit_path = conf["model_toolkit_path"].to_cstr(&err);
std::string model_toolkit_path = resource_conf.model_toolkit_path();
if (err != 0) {
LOG(ERROR) << "read model_toolkit_path failed, path["
<< path << "], file[" << file << "]";
return -1;
}
std::string model_toolkit_file = conf["model_toolkit_file"].to_cstr(&err);
std::string model_toolkit_file = resource_conf.model_toolkit_file();
if (err != 0) {
LOG(ERROR) << "read model_toolkit_file failed, path["
<< path << "], file[" << file << "]";
......
......@@ -24,11 +24,6 @@ bool ServerManager::_compare_string_piece_without_case(
ServerManager::ServerManager() {
_format_services.clear();
_options.idle_timeout_sec = FLAGS_idle_timeout_s;
if (FLAGS_enable_nshead_protocol) {
LOG(INFO) << "FLAGS_enable_nshead_protocol on, try to set FLAGS_nshead_protocol["
<< FLAGS_nshead_protocol << "] in server side";
_set_server_option_by_protocol(FLAGS_nshead_protocol);
}
_options.max_concurrency = FLAGS_max_concurrency;
_options.num_threads = FLAGS_num_threads;
}
......
......@@ -14,7 +14,7 @@ public:
typedef OpChannel<google::protobuf::Message> BuiltinChannel;
static const char* tag() {
return "Service";
return "service";
}
InferService() :
......
......@@ -17,7 +17,7 @@ public:
Workflow() {}
static const char* tag() {
return "Workflow";
return "workflow";
}
// Each workflow object corresponds to an independent
......
LIST(APPEND protofiles
${CMAKE_CURRENT_LIST_DIR}/proto/configure.proto
)
PROTOBUF_GENERATE_CPP(configure_proto_srcs configure_proto_hdrs ${protofiles})
list(APPEND proto_configure_srcs ${configure_proto_srcs})
list(APPEND proto_configure_srcs ${CMAKE_CURRENT_LIST_DIR}/src/configure_parser.cpp)
add_library(proto_configure ${proto_configure_srcs})
add_executable(test_configure
${CMAKE_CURRENT_LIST_DIR}/tests/test_configure.cpp)
target_include_directories(test_configure PUBLIC
${CMAKE_CURRENT_BINARY_DIR}/
${CMAKE_CURRENT_LIST_DIR}/include
)
target_link_libraries(test_configure proto_configure protobuf)
#pragma once
#include <google/protobuf/message.h>
namespace baidu {
namespace paddle_serving {
namespace configure {
int read_proto_conf(const std::string &conf_path,
const std::string &conf_file,
google::protobuf::Message *conf);
int write_proto_conf(google::protobuf::Message *message,
const std::string &output_path,
const std::string &output_file);
} // configure
} // paddle_serving
} // baidu
syntax="proto2";
package baidu.paddle_serving.configure;
message EngineDesc {
required string type = 1;
required string reloadable_meta = 2;
required string reloadable_type = 3;
required string model_data_path = 4;
required uint32 runtime_thread_num = 5;
required uint32 batch_infer_size = 6;
required uint32 enable_batch_align = 7;
optional string version_file = 8;
optional string version_type = 9;
};
message VersionedEngine {
required string name = 1;
repeated EngineDesc versions = 2;
optional EngineDesc default_version = 3;
};
// model_toolkit conf
message ModelToolkitConf {
repeated VersionedEngine engines = 1;
};
// reource conf
message ResourceConf {
required string model_toolkit_path = 1;
required string model_toolkit_file = 2;
};
// DAG node depency info
message DAGNodeDependency {
required string name = 1;
required string mode = 2;
};
// DAG Node
message DAGNode {
required string name = 1;
required string type = 2;
repeated DAGNodeDependency dependencies = 3;
};
// workflow entry
message Workflow {
required string name = 1;
required string workflow_type = 2;
repeated DAGNode nodes = 3;
};
// Workflow conf
message WorkflowConf {
repeated Workflow workflow = 1;
}
message InferService {
required string name = 1;
repeated string workflow = 2;
};
// InferService conf
message InferServiceConf {
repeated InferService service = 1;
};
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <fstream>
#include "butil/logging.h"
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
namespace baidu {
namespace paddle_serving {
namespace configure {
int read_proto_conf(const std::string &conf_path,
const std::string &conf_file,
google::protobuf::Message *conf)
{
std::string file_str = conf_path + conf_file;
int fd = open(file_str.c_str(), O_RDONLY);
if (fd == -1) {
LOG(WARNING) << "File not found: " << file_str.c_str();
return -1;
}
google::protobuf::io::FileInputStream input(fd);
bool success = google::protobuf::TextFormat::Parse(&input, conf);
close(fd);
if (!success) {
return -1;
}
return 0;
}
int write_proto_conf(google::protobuf::Message *message,
const std::string &output_path,
const std::string &output_file)
{
std::string binary_str;
google::protobuf::TextFormat::PrintToString(*message, &binary_str);
std::string file_str = output_path + output_file;
std::ofstream fout_bin((file_str.c_str()));
if (!fout_bin) {
LOG(WARNING) << "Open file error: " << file_str.c_str();
return -1;
}
fout_bin.write((char *)binary_str.c_str(), binary_str.size());
fout_bin.close();
return 0;
}
} // configure
} // paddle_serving
} // baidu
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <iostream>
#include "configure.pb.h"
#include "configure_parser.h"
using baidu::paddle_serving::configure::EngineDesc;
using baidu::paddle_serving::configure::VersionedEngine;
using baidu::paddle_serving::configure::ModelToolkitConf;
using baidu::paddle_serving::configure::ResourceConf;
using baidu::paddle_serving::configure::DAGNodeDependency;
using baidu::paddle_serving::configure::DAGNode;
using baidu::paddle_serving::configure::Workflow;
using baidu::paddle_serving::configure::WorkflowConf;
using baidu::paddle_serving::configure::InferService;
using baidu::paddle_serving::configure::InferServiceConf;
const std::string output_dir = "./conf/";
const std::string model_toolkit_conf_file = "model_toolkit.prototxt";
const std::string resource_conf_file = "resource.prototxt";
const std::string workflow_conf_file = "workflow.prototxt";
const std::string service_conf_file = "service.prototxt";
int test_write_conf()
{
// model_toolkit conf
ModelToolkitConf model_toolkit_conf;
// This engine has a default version
VersionedEngine *engine = model_toolkit_conf.add_engines();
engine->set_name("image_classification_resnet");
EngineDesc *engine_desc = engine->mutable_default_version();
engine_desc->set_type("FLUID_CPU_NATIVE_V2");
engine_desc->set_reloadable_meta("./data/model/paddle/fluid_time_file");
engine_desc->set_reloadable_type("timestamp_ne");
engine_desc->set_model_data_path("./data/model/paddle/fluid/SE_ResNeXt50_32x4d");
engine_desc->set_runtime_thread_num(0);
engine_desc->set_batch_infer_size(0);
engine_desc->set_enable_batch_align(0);
// This engine has two versioned branches
engine = model_toolkit_conf.add_engines();
engine->set_name("image_classification_resnet_versioned");
// Version 1
engine_desc = engine->add_versions();
engine_desc->set_type("FLUID_CPU_NATIVE_DIR");
engine_desc->set_reloadable_meta("./data/model/paddle/fluid_time_file");
engine_desc->set_reloadable_type("timestamp_ne");
engine_desc->set_model_data_path("./data/model/paddle/fluid/SE_ResNeXt50_32x4d");
engine_desc->set_runtime_thread_num(0);
engine_desc->set_batch_infer_size(0);
engine_desc->set_enable_batch_align(0);
// Version 2
engine_desc = engine->add_versions();
engine_desc->set_type("FLUID_CPU_NATIVE_DIR");
engine_desc->set_reloadable_meta("./data/model/paddle/fluid_time_file_2");
engine_desc->set_reloadable_type("timestamp_ne_2");
engine_desc->set_model_data_path("./data/model/paddle/fluid/SE_ResNeXt50_32x4d_2");
engine_desc->set_runtime_thread_num(0);
engine_desc->set_batch_infer_size(0);
engine_desc->set_enable_batch_align(0);
int ret = baidu::paddle_serving::configure::write_proto_conf(&model_toolkit_conf, output_dir, model_toolkit_conf_file);
if (ret != 0) {
return ret;
}
// resource conf
ResourceConf resource_conf;
resource_conf.set_model_toolkit_path(output_dir);
resource_conf.set_model_toolkit_file("resource.prototxt");
ret = baidu::paddle_serving::configure::write_proto_conf(&resource_conf, output_dir, resource_conf_file);
if (ret != 0) {
return ret;
}
// workflow entries conf
WorkflowConf workflow_conf;
Workflow *workflow = workflow_conf.add_workflow();
workflow->set_name("workflow1");
workflow->set_workflow_type("Sequence");
DAGNode *dag_node = workflow->add_nodes();
dag_node->set_name("image_reader_op");
dag_node->set_type("ReaderOp");
dag_node = workflow->add_nodes();
dag_node->set_name("imag_classify_op");
dag_node->set_type("ClassifyOp");
DAGNodeDependency *node_dependency = dag_node->add_dependencies();
node_dependency->set_name("image_reader_op");
node_dependency->set_mode("RO");
dag_node = workflow->add_nodes();
dag_node->set_name("write_json_op");
dag_node->set_type("WriteOp");
node_dependency = dag_node->add_dependencies();
node_dependency->set_name("image_classify_op");
node_dependency->set_mode("RO");
workflow = workflow_conf.add_workflow();
workflow->set_name("workflow2");
workflow->set_workflow_type("Sequence");
dag_node = workflow->add_nodes();
dag_node->set_name("dense_op");
dag_node->set_type("DenseOp");
ret = baidu::paddle_serving::configure::write_proto_conf(&workflow_conf, output_dir, workflow_conf_file);
if (ret != 0) {
return ret;
}
InferServiceConf infer_service_conf;
InferService *infer_service = infer_service_conf.add_service();
infer_service->set_name("ImageClassifyService");
infer_service->add_workflow("workflow1");
infer_service->add_workflow("workflow2");
infer_service = infer_service_conf.add_service();
infer_service->set_name("BuiltinDenseFormatService");
infer_service->add_workflow("workflow2");
ret = baidu::paddle_serving::configure::write_proto_conf(&infer_service_conf, output_dir, service_conf_file);
if (ret != 0) {
return ret;
}
return 0;
}
int test_read_conf()
{
int ret = 0;
ModelToolkitConf model_toolkit_conf;
ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, model_toolkit_conf_file, &model_toolkit_conf);
if (ret != 0) {
std::cout << "Read conf fail: " << model_toolkit_conf_file << std::endl;
return -1;
}
ResourceConf resource_conf;
ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, resource_conf_file, &resource_conf);
if (ret != 0) {
std::cout << "Read conf fail: " << resource_conf_file << std::endl;
return -1;
}
WorkflowConf workflow_conf;
ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, workflow_conf_file, &workflow_conf);
if (ret != 0) {
std::cout << "Read conf fail: " << workflow_conf_file << std::endl;
return -1;
}
InferServiceConf service_conf;
ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, service_conf_file, &service_conf);
if (ret != 0) {
std::cout << "Read conf fail: " << service_conf_file << std::endl;
return -1;
}
return 0;
}
int main()
{
int ret = 0;
struct stat stat_buf;
if (stat(output_dir.c_str(), &stat_buf) != 0) {
int ret = mkdir("./conf", 0777);
if (ret != 0) {
std::cout << "mkdir ./conf fail" << std::endl;
return -1;
}
if (stat("./conf", &stat_buf) != 0) {
std::cout << "./conf not exist and creating it failed" << std::endl;
return -1;
}
}
ret = test_write_conf();
if (ret != 0) {
std::cout << "test_write_conf fail" << std::endl;
return -1;
}
std::cout << "test_write_conf success" << std::endl;
ret = test_read_conf();
if (ret != 0) {
std::cout << "test_read_conf fail" << std::endl;
return -1;
}
std::cout << "test_read_conf success" << std::endl;
return 0;
}
/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册