From d457e5a531597e0eba75b43234c77f353b128ac7 Mon Sep 17 00:00:00 2001 From: wangguibao Date: Fri, 22 Feb 2019 20:18:20 +0800 Subject: [PATCH] 20190222 Change-Id: Iadcfd4a3c92f2b91dd6fbcb5fa81f01f605c9ec2 --- CMakeLists.txt | 1 + predictor/CMakeLists.txt | 24 ++- predictor/common/constant.cpp | 14 +- predictor/common/inner_common.h | 4 +- predictor/framework/infer.h | 64 +++---- predictor/framework/manager.h | 10 +- predictor/framework/resource.cpp | 12 +- predictor/framework/server.cpp | 5 - predictor/framework/service.h | 2 +- predictor/framework/workflow.h | 2 +- proto_configure/CMakeLists.txt | 18 ++ proto_configure/include/configure_parser.h | 17 ++ proto_configure/proto/configure.proto | 66 +++++++ proto_configure/src/configure_parser.cpp | 58 ++++++ proto_configure/tests/test_configure.cpp | 204 +++++++++++++++++++++ 15 files changed, 435 insertions(+), 66 deletions(-) create mode 100644 proto_configure/CMakeLists.txt create mode 100644 proto_configure/include/configure_parser.h create mode 100644 proto_configure/proto/configure.proto create mode 100644 proto_configure/src/configure_parser.cpp create mode 100644 proto_configure/tests/test_configure.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index a1600bdc..912eed05 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -101,6 +101,7 @@ add_subdirectory(bsl) add_subdirectory(ullib) add_subdirectory(spreg) add_subdirectory(configure) +add_subdirectory(proto_configure) add_subdirectory(mempool) add_subdirectory(predictor) add_subdirectory(inferencer-fluid-cpu) diff --git a/predictor/CMakeLists.txt b/predictor/CMakeLists.txt index 25192322..d26aa666 100644 --- a/predictor/CMakeLists.txt +++ b/predictor/CMakeLists.txt @@ -17,49 +17,59 @@ set_source_files_properties( PROPERTIES COMPILE_FLAGS "-Wno-strict-aliasing -Wno-unused-variable -Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") add_dependencies(pdserving protobuf boost brpc leveldb bsl pdcodegen configure + proto_configure ullib spreg mempool) target_include_directories(pdserving PUBLIC ${CMAKE_CURRENT_LIST_DIR}/ ${CMAKE_CURRENT_BINARY_DIR}/ + ${CMAKE_CURRENT_BINARY_DIR}/../proto_configure ${CMAKE_CURRENT_LIST_DIR}/../configure + ${CMAKE_CURRENT_LIST_DIR}/../proto_configure/include ${CMAKE_CURRENT_LIST_DIR}/../mempool ${CMAKE_CURRENT_LIST_DIR}/../spreg ${CMAKE_CURRENT_LIST_DIR}/../ullib/include ${CMAKE_CURRENT_BINARY_DIR}/../bsl/include) -target_link_libraries(pdserving brpc protobuf boost leveldb bsl - configure ullib spreg mempool -lpthread -lcrypto -lm -lrt -lssl -ldl -lz) +target_link_libraries(pdserving + brpc protobuf boost leveldb bsl configure proto_configure ullib spreg + mempool -lpthread -lcrypto -lm -lrt -lssl -ldl -lz) add_executable(pdserving_exe ${pdserving_srcs}) set_source_files_properties( ${pdserving_srcs} PROPERTIES COMPILE_FLAGS "-Wno-strict-aliasing -Wno-unused-variable -Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") -add_dependencies(pdserving_exe protobuf boost brpc leveldb bsl pdcodegen - configure ullib spreg mempool) +add_dependencies(pdserving_exe + protobuf boost brpc leveldb bsl pdcodegen configure proto_configure + ullib spreg mempool) target_include_directories(pdserving_exe PUBLIC ${CMAKE_CURRENT_LIST_DIR}/ ${CMAKE_CURRENT_BINARY_DIR}/ + ${CMAKE_CURRENT_BINARY_DIR}/../proto_configure ${CMAKE_CURRENT_LIST_DIR}/../configure + ${CMAKE_CURRENT_LIST_DIR}/../proto_configure/include ${CMAKE_CURRENT_LIST_DIR}/../mempool ${CMAKE_CURRENT_LIST_DIR}/../spreg ${CMAKE_CURRENT_LIST_DIR}/../ullib/include ${CMAKE_CURRENT_BINARY_DIR}/../bsl/include) -target_link_libraries(pdserving_exe brpc protobuf leveldb bsl configure ullib - spreg mempool -lpthread -lcrypto -lm -lrt -lssl -ldl -lz) +target_link_libraries(pdserving_exe brpc protobuf leveldb bsl configure + proto_configure ullib spreg mempool -lpthread -lcrypto -lm -lrt -lssl + -ldl -lz) add_library(pdclient ${pdclient_srcs}) set_source_files_properties( ${pdclient_srcs} PROPERTIES COMPILE_FLAGS "-Wno-strict-aliasing -Wno-unused-variable -Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") -add_dependencies(pdclient protobuf boost brpc pdcodegen) +add_dependencies(pdclient protobuf boost brpc pdcodegen proto_configure) target_include_directories(pdclient PUBLIC ${CMAKE_CURRENT_LIST_DIR}/ ${CMAKE_CURRENT_BINARY_DIR}/ + ${CMAKE_CURRENT_BINARY_DIR}/../proto_configure ${CMAKE_CURRENT_LIST_DIR}/../configure + ${CMAKE_CURRENT_LIST_DIR}/../proto_configure/include ${CMAKE_CURRENT_LIST_DIR}/../mempool ${CMAKE_CURRENT_LIST_DIR}/../spreg ${CMAKE_CURRENT_LIST_DIR}/../ullib/include diff --git a/predictor/common/constant.cpp b/predictor/common/constant.cpp index e7f41ca8..66132e8e 100644 --- a/predictor/common/constant.cpp +++ b/predictor/common/constant.cpp @@ -9,24 +9,18 @@ DEFINE_int32(el_log_level, 16, ""); DEFINE_int32(idle_timeout_s, 16, ""); DEFINE_int32(port, 8010, ""); DEFINE_string(workflow_path, "./conf", ""); -DEFINE_string(workflow_file, "workflow.conf", ""); +DEFINE_string(workflow_file, "workflow.prototxt", ""); DEFINE_string(inferservice_path, "./conf", ""); -DEFINE_string(inferservice_file, "service.conf", ""); +DEFINE_string(inferservice_file, "service.prototxt", ""); DEFINE_string(logger_path, "./conf", ""); DEFINE_string(logger_file, "log.conf", ""); DEFINE_string(resource_path, "./conf", ""); -DEFINE_string(resource_file, "resource.conf", ""); -DEFINE_bool(enable_yacl, false, "enable yacl"); -DEFINE_string(yacl_module_name, "predictor", "yacl module name"); -DEFINE_string(yacl_param_dump_file, "./data/yacl_param_list.txt", "yacl param dump file path"); -DEFINE_bool(enable_mc_cache, false, "enable mc cache"); -DEFINE_bool(enable_nshead_protocol, false, "enable nshead protocol in server side"); -DEFINE_string(nshead_protocol, "itp", "type of nshead protocol, support itp, nova_pbrpc, public_pbrpc, nshead_mcpack"); +DEFINE_string(resource_file, "resource.prototxt", ""); DEFINE_int32(max_concurrency, 0, "Limit of request processing in parallel, 0: unlimited"); DEFINE_int32(num_threads, 0, "Number of pthreads that server runs on, not change if this value <= 0"); DEFINE_int32(reload_interval_s, 10, ""); DEFINE_bool(enable_model_toolkit, false, "enable model toolkit"); -DEFINE_string(enable_protocol_list, "baidu_std nshead", "set protocol list"); +DEFINE_string(enable_protocol_list, "baidu_std", "set protocol list"); const char* START_OP_NAME = "startup_op"; } // predictor diff --git a/predictor/common/inner_common.h b/predictor/common/inner_common.h index 2010063d..43ef30f0 100644 --- a/predictor/common/inner_common.h +++ b/predictor/common/inner_common.h @@ -26,7 +26,9 @@ #include #include "Configure.h" -// #include + +#include "configure.pb.h" +#include "configure_parser.h" #include "common/utils.h" #include "common/types.h" diff --git a/predictor/framework/infer.h b/predictor/framework/infer.h index 7943ef10..5d41c144 100644 --- a/predictor/framework/infer.h +++ b/predictor/framework/infer.h @@ -13,12 +13,14 @@ namespace baidu { namespace paddle_serving { namespace predictor { +using configure::ModelToolkitConf; + class InferEngine { public: virtual ~InferEngine() {} - virtual int proc_initialize(const comcfg::ConfigUnit& conf, bool version) { + virtual int proc_initialize(const configure::EngineDesc& conf, bool version) { return proc_initialize_impl(conf, version); } virtual int proc_finalize() { @@ -43,7 +45,7 @@ public: // begin: framework inner call virtual int proc_initialize_impl( - const comcfg::ConfigUnit& conf, bool version) = 0; + const configure::EngineDesc& conf, bool version) = 0; virtual int thrd_initialize_impl() = 0; virtual int thrd_finalize_impl() = 0; virtual int thrd_clear_impl() = 0; @@ -68,13 +70,13 @@ public: virtual int load(const std::string& data_path) = 0; - int proc_initialize_impl(const comcfg::ConfigUnit& conf, bool version) { - _reload_tag_file = conf["ReloadableMeta"].to_cstr(); - _reload_mode_tag = conf["ReloadableType"].to_cstr(); - _model_data_path = conf["ModelDataPath"].to_cstr(); - _infer_thread_num = conf["RuntimeThreadNum"].to_uint32(); - _infer_batch_size = conf["BatchInferSize"].to_uint32(); - _infer_batch_align = conf["EnableBatchAlign"].to_uint32(); + int proc_initialize_impl(const configure::EngineDesc& conf, bool version) { + _reload_tag_file = conf.reloadable_meta(); + _reload_mode_tag = conf.reloadable_type(); + _model_data_path = conf.model_data_path(); + _infer_thread_num = conf.runtime_thread_num(); + _infer_batch_size = conf.batch_infer_size(); + _infer_batch_align = conf.enable_batch_align(); if (!check_need_reload() || load(_model_data_path) != 0) { LOG(FATAL) << "Failed load model_data_path" << _model_data_path; return -1; @@ -89,7 +91,7 @@ public: return 0; } - int proc_initialize(const comcfg::ConfigUnit& conf, bool version) { + int proc_initialize(const configure::EngineDesc& conf, bool version) { if (proc_initialize_impl(conf, version) != 0) { LOG(FATAL) << "Failed proc initialize impl"; return -1; @@ -178,10 +180,10 @@ public: } private: - int parse_version_info(const comcfg::ConfigUnit& config, bool version) { + int parse_version_info(const configure::EngineDesc& config, bool version) { try { - std::string version_file = config["VersionFile"].to_cstr(); - std::string version_type = config["VersionType"].to_cstr(); + std::string version_file = config.version_file(); + std::string version_type = config.version_type(); if (version_type == "abacus_version") { if (parse_abacus_version(version_file) != 0) { @@ -387,7 +389,7 @@ class DBReloadableInferEngine : public ReloadableInferEngine { public: virtual ~DBReloadableInferEngine() {} - int proc_initialize(const comcfg::ConfigUnit& conf, bool version) { + int proc_initialize(const configure::EngineDesc& conf, bool version) { THREAD_KEY_CREATE(&_skey, NULL); THREAD_MUTEX_INIT(&_mutex, NULL); return ReloadableInferEngine::proc_initialize(conf, version); @@ -486,7 +488,7 @@ class CloneDBReloadableInferEngine : public DBReloadableInferEngine public: virtual ~CloneDBReloadableInferEngine() {} - virtual int proc_initialize(const comcfg::ConfigUnit& conf, bool version) { + virtual int proc_initialize(const configure::EngineDesc& conf, bool version) { _pd = new (std::nothrow) ModelData; if (!_pd) { LOG(FATAL) << "Failed to allocate for ProcData"; @@ -754,30 +756,30 @@ public: } ~VersionedInferEngine() {} - int proc_initialize(const comcfg::ConfigUnit& conf) { - size_t version_num = conf["Version"].size(); + int proc_initialize(const configure::VersionedEngine& conf) { + size_t version_num = conf.versions_size(); for (size_t vi = 0; vi < version_num; ++vi) { - if (proc_initialize(conf["Version"][vi], true) != 0) { + if (proc_initialize(conf.versions(vi), true) != 0) { LOG(FATAL) << "Failed proc initialize version: " - << vi << ", model: " << conf["Name"].to_cstr(); + << vi << ", model: " << conf.name().c_str(); return -1; } } if (version_num == 0) { - if (proc_initialize(conf, false) != 0) { + if (proc_initialize(conf.default_version(), false) != 0) { LOG(FATAL) << "Failed proc intialize engine: " - << conf["Name"].to_cstr(); + << conf.name().c_str(); return -1; } } LOG(WARNING) - << "Succ proc initialize engine: " << conf["Name"].to_cstr(); + << "Succ proc initialize engine: " << conf.name().c_str(); return 0; } - int proc_initialize(const comcfg::ConfigUnit& conf, bool version) { - std::string engine_type = conf["Type"].to_cstr(); + int proc_initialize(const configure::EngineDesc& conf, bool version) { + std::string engine_type = conf.type(); InferEngine* engine = StaticInferFactory::instance().generate_object( engine_type); @@ -938,7 +940,7 @@ public: } // -- - int proc_initialize_impl(const comcfg::ConfigUnit& conf, bool) { return -1; } + int proc_initialize_impl(const configure::EngineDesc& conf, bool) { return -1; } int thrd_initialize_impl() { return -1; } int thrd_finalize_impl() { return -1; } int thrd_clear_impl() { return -1; } @@ -958,23 +960,23 @@ public: } int proc_initialize(const char* path, const char* file) { - comcfg::Configure conf; - if (conf.load(path, file) != 0) { - LOG(FATAL) << "failed load infer config, path:" + ModelToolkitConf model_toolkit_conf; + if (configure::read_proto_conf(path, file, &model_toolkit_conf) != 0) { + LOG(FATAL) << "failed load infer config, path: " << path << "/" << file; return -1; } - size_t engine_num = conf["Engine"].size(); + size_t engine_num = model_toolkit_conf.engines_size(); for (size_t ei = 0; ei < engine_num; ++ei) { - std::string engine_name = conf["Engine"][ei]["Name"].to_cstr(); + std::string engine_name = model_toolkit_conf.engines(ei).name(); VersionedInferEngine* engine = new (std::nothrow) VersionedInferEngine(); if (!engine) { LOG(FATAL) << "Failed generate versioned engine: " << engine_name; return -1; } - if (engine->proc_initialize(conf["Engine"][ei]) != 0) { + if (engine->proc_initialize(model_toolkit_conf.engines(ei)) != 0) { LOG(FATAL) << "Failed initialize version engine, name:" << engine_name; return -1; diff --git a/predictor/framework/manager.h b/predictor/framework/manager.h index 43dfe0fd..d7fa50f6 100644 --- a/predictor/framework/manager.h +++ b/predictor/framework/manager.h @@ -10,6 +10,8 @@ namespace baidu { namespace paddle_serving { namespace predictor { +using configure::WorkflowConf; + class Workflow; //class InferService; //class ParallelInferService; @@ -37,11 +39,9 @@ public: } int initialize(const std::string path, const std::string file) { - comcfg::Configure conf; - if (conf.load(path.c_str(), file.c_str()) != 0) { - LOG(FATAL) - << "Failed load manager<" << typeid(T).name() - << "> configure!"; + WorkflowConf workflow_conf; + if (configure::read_proto_conf(path, file, &workflow_conf) != 0) { + LOG(FATAL) << "Failed load manager<" << typeid.name() << "> configure!"; return -1; } diff --git a/predictor/framework/resource.cpp b/predictor/framework/resource.cpp index f675cedd..99c239a5 100644 --- a/predictor/framework/resource.cpp +++ b/predictor/framework/resource.cpp @@ -6,6 +6,8 @@ namespace baidu { namespace paddle_serving { namespace predictor { +using configure::ResourceConf; + // __thread bool p_thread_initialized = false; static void dynamic_resource_deleter(void* d) { @@ -28,9 +30,9 @@ int DynamicResource::clear() { } int Resource::initialize(const std::string& path, const std::string& file) { - comcfg::Configure conf; - if (conf.load(path.c_str(), file.c_str()) != 0) { - LOG(ERROR) << "Failed initialize resource from: " + ResourceConf resource_conf; + if (configure::read_proto_conf(path, file, &resource_conf) != 0) { + LOG(ERROR) << "Failed initialize resource from: " << path << "/" << file; return -1; } @@ -44,13 +46,13 @@ int Resource::initialize(const std::string& path, const std::string& file) { if (FLAGS_enable_model_toolkit) { int err = 0; - std::string model_toolkit_path = conf["model_toolkit_path"].to_cstr(&err); + std::string model_toolkit_path = resource_conf.model_toolkit_path(); if (err != 0) { LOG(ERROR) << "read model_toolkit_path failed, path[" << path << "], file[" << file << "]"; return -1; } - std::string model_toolkit_file = conf["model_toolkit_file"].to_cstr(&err); + std::string model_toolkit_file = resource_conf.model_toolkit_file(); if (err != 0) { LOG(ERROR) << "read model_toolkit_file failed, path[" << path << "], file[" << file << "]"; diff --git a/predictor/framework/server.cpp b/predictor/framework/server.cpp index 68c27bf1..f8d1152b 100644 --- a/predictor/framework/server.cpp +++ b/predictor/framework/server.cpp @@ -24,11 +24,6 @@ bool ServerManager::_compare_string_piece_without_case( ServerManager::ServerManager() { _format_services.clear(); _options.idle_timeout_sec = FLAGS_idle_timeout_s; - if (FLAGS_enable_nshead_protocol) { - LOG(INFO) << "FLAGS_enable_nshead_protocol on, try to set FLAGS_nshead_protocol[" - << FLAGS_nshead_protocol << "] in server side"; - _set_server_option_by_protocol(FLAGS_nshead_protocol); - } _options.max_concurrency = FLAGS_max_concurrency; _options.num_threads = FLAGS_num_threads; } diff --git a/predictor/framework/service.h b/predictor/framework/service.h index 1fdeb01b..c685c8d9 100644 --- a/predictor/framework/service.h +++ b/predictor/framework/service.h @@ -14,7 +14,7 @@ public: typedef OpChannel BuiltinChannel; static const char* tag() { - return "Service"; + return "service"; } InferService() : diff --git a/predictor/framework/workflow.h b/predictor/framework/workflow.h index d2b4ebb5..9c4f7743 100644 --- a/predictor/framework/workflow.h +++ b/predictor/framework/workflow.h @@ -17,7 +17,7 @@ public: Workflow() {} static const char* tag() { - return "Workflow"; + return "workflow"; } // Each workflow object corresponds to an independent diff --git a/proto_configure/CMakeLists.txt b/proto_configure/CMakeLists.txt new file mode 100644 index 00000000..5cd32c4e --- /dev/null +++ b/proto_configure/CMakeLists.txt @@ -0,0 +1,18 @@ +LIST(APPEND protofiles + ${CMAKE_CURRENT_LIST_DIR}/proto/configure.proto +) + +PROTOBUF_GENERATE_CPP(configure_proto_srcs configure_proto_hdrs ${protofiles}) +list(APPEND proto_configure_srcs ${configure_proto_srcs}) + +list(APPEND proto_configure_srcs ${CMAKE_CURRENT_LIST_DIR}/src/configure_parser.cpp) + +add_library(proto_configure ${proto_configure_srcs}) + +add_executable(test_configure + ${CMAKE_CURRENT_LIST_DIR}/tests/test_configure.cpp) +target_include_directories(test_configure PUBLIC + ${CMAKE_CURRENT_BINARY_DIR}/ + ${CMAKE_CURRENT_LIST_DIR}/include + ) +target_link_libraries(test_configure proto_configure protobuf) diff --git a/proto_configure/include/configure_parser.h b/proto_configure/include/configure_parser.h new file mode 100644 index 00000000..f84871fc --- /dev/null +++ b/proto_configure/include/configure_parser.h @@ -0,0 +1,17 @@ +#pragma once +#include + +namespace baidu { +namespace paddle_serving { +namespace configure { +int read_proto_conf(const std::string &conf_path, + const std::string &conf_file, + google::protobuf::Message *conf); + +int write_proto_conf(google::protobuf::Message *message, + const std::string &output_path, + const std::string &output_file); + +} // configure +} // paddle_serving +} // baidu diff --git a/proto_configure/proto/configure.proto b/proto_configure/proto/configure.proto new file mode 100644 index 00000000..7ccf8f54 --- /dev/null +++ b/proto_configure/proto/configure.proto @@ -0,0 +1,66 @@ +syntax="proto2"; +package baidu.paddle_serving.configure; + +message EngineDesc { + required string type = 1; + required string reloadable_meta = 2; + required string reloadable_type = 3; + required string model_data_path = 4; + required uint32 runtime_thread_num = 5; + required uint32 batch_infer_size = 6; + required uint32 enable_batch_align = 7; + optional string version_file = 8; + optional string version_type = 9; +}; + +message VersionedEngine { + required string name = 1; + repeated EngineDesc versions = 2; + optional EngineDesc default_version = 3; +}; + +// model_toolkit conf +message ModelToolkitConf { + repeated VersionedEngine engines = 1; +}; + +// reource conf +message ResourceConf { + required string model_toolkit_path = 1; + required string model_toolkit_file = 2; +}; + +// DAG node depency info +message DAGNodeDependency { + required string name = 1; + required string mode = 2; +}; + +// DAG Node +message DAGNode { + required string name = 1; + required string type = 2; + repeated DAGNodeDependency dependencies = 3; +}; + +// workflow entry +message Workflow { + required string name = 1; + required string workflow_type = 2; + repeated DAGNode nodes = 3; +}; + +// Workflow conf +message WorkflowConf { + repeated Workflow workflow = 1; +} + +message InferService { + required string name = 1; + repeated string workflow = 2; +}; + +// InferService conf +message InferServiceConf { + repeated InferService service = 1; +}; diff --git a/proto_configure/src/configure_parser.cpp b/proto_configure/src/configure_parser.cpp new file mode 100644 index 00000000..ba41caf3 --- /dev/null +++ b/proto_configure/src/configure_parser.cpp @@ -0,0 +1,58 @@ +#include +#include +#include +#include +#include "butil/logging.h" + +#include +#include + +namespace baidu { +namespace paddle_serving { +namespace configure { + +int read_proto_conf(const std::string &conf_path, + const std::string &conf_file, + google::protobuf::Message *conf) +{ + std::string file_str = conf_path + conf_file; + int fd = open(file_str.c_str(), O_RDONLY); + if (fd == -1) { + LOG(WARNING) << "File not found: " << file_str.c_str(); + return -1; + } + + google::protobuf::io::FileInputStream input(fd); + bool success = google::protobuf::TextFormat::Parse(&input, conf); + close(fd); + if (!success) { + return -1; + } + + return 0; +} + +int write_proto_conf(google::protobuf::Message *message, + const std::string &output_path, + const std::string &output_file) +{ + std::string binary_str; + google::protobuf::TextFormat::PrintToString(*message, &binary_str); + + std::string file_str = output_path + output_file; + std::ofstream fout_bin((file_str.c_str())); + if (!fout_bin) { + LOG(WARNING) << "Open file error: " << file_str.c_str(); + return -1; + } + + fout_bin.write((char *)binary_str.c_str(), binary_str.size()); + fout_bin.close(); + + return 0; +} + +} // configure +} // paddle_serving +} // baidu +/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ diff --git a/proto_configure/tests/test_configure.cpp b/proto_configure/tests/test_configure.cpp new file mode 100644 index 00000000..8f1cac41 --- /dev/null +++ b/proto_configure/tests/test_configure.cpp @@ -0,0 +1,204 @@ +#include +#include +#include +#include +#include "configure.pb.h" +#include "configure_parser.h" + +using baidu::paddle_serving::configure::EngineDesc; +using baidu::paddle_serving::configure::VersionedEngine; +using baidu::paddle_serving::configure::ModelToolkitConf; + +using baidu::paddle_serving::configure::ResourceConf; + +using baidu::paddle_serving::configure::DAGNodeDependency; +using baidu::paddle_serving::configure::DAGNode; +using baidu::paddle_serving::configure::Workflow; +using baidu::paddle_serving::configure::WorkflowConf; + +using baidu::paddle_serving::configure::InferService; +using baidu::paddle_serving::configure::InferServiceConf; + +const std::string output_dir = "./conf/"; +const std::string model_toolkit_conf_file = "model_toolkit.prototxt"; +const std::string resource_conf_file = "resource.prototxt"; +const std::string workflow_conf_file = "workflow.prototxt"; +const std::string service_conf_file = "service.prototxt"; + +int test_write_conf() +{ + // model_toolkit conf + ModelToolkitConf model_toolkit_conf; + + // This engine has a default version + VersionedEngine *engine = model_toolkit_conf.add_engines(); + engine->set_name("image_classification_resnet"); + EngineDesc *engine_desc = engine->mutable_default_version(); + engine_desc->set_type("FLUID_CPU_NATIVE_V2"); + engine_desc->set_reloadable_meta("./data/model/paddle/fluid_time_file"); + engine_desc->set_reloadable_type("timestamp_ne"); + engine_desc->set_model_data_path("./data/model/paddle/fluid/SE_ResNeXt50_32x4d"); + engine_desc->set_runtime_thread_num(0); + engine_desc->set_batch_infer_size(0); + engine_desc->set_enable_batch_align(0); + + // This engine has two versioned branches + engine = model_toolkit_conf.add_engines(); + engine->set_name("image_classification_resnet_versioned"); + // Version 1 + engine_desc = engine->add_versions(); + engine_desc->set_type("FLUID_CPU_NATIVE_DIR"); + engine_desc->set_reloadable_meta("./data/model/paddle/fluid_time_file"); + engine_desc->set_reloadable_type("timestamp_ne"); + engine_desc->set_model_data_path("./data/model/paddle/fluid/SE_ResNeXt50_32x4d"); + engine_desc->set_runtime_thread_num(0); + engine_desc->set_batch_infer_size(0); + engine_desc->set_enable_batch_align(0); + // Version 2 + engine_desc = engine->add_versions(); + engine_desc->set_type("FLUID_CPU_NATIVE_DIR"); + engine_desc->set_reloadable_meta("./data/model/paddle/fluid_time_file_2"); + engine_desc->set_reloadable_type("timestamp_ne_2"); + engine_desc->set_model_data_path("./data/model/paddle/fluid/SE_ResNeXt50_32x4d_2"); + engine_desc->set_runtime_thread_num(0); + engine_desc->set_batch_infer_size(0); + engine_desc->set_enable_batch_align(0); + + int ret = baidu::paddle_serving::configure::write_proto_conf(&model_toolkit_conf, output_dir, model_toolkit_conf_file); + if (ret != 0) { + return ret; + } + + // resource conf + ResourceConf resource_conf; + resource_conf.set_model_toolkit_path(output_dir); + resource_conf.set_model_toolkit_file("resource.prototxt"); + ret = baidu::paddle_serving::configure::write_proto_conf(&resource_conf, output_dir, resource_conf_file); + if (ret != 0) { + return ret; + } + + // workflow entries conf + WorkflowConf workflow_conf; + Workflow *workflow = workflow_conf.add_workflow(); + workflow->set_name("workflow1"); + workflow->set_workflow_type("Sequence"); + + DAGNode *dag_node = workflow->add_nodes(); + dag_node->set_name("image_reader_op"); + dag_node->set_type("ReaderOp"); + + dag_node = workflow->add_nodes(); + dag_node->set_name("imag_classify_op"); + dag_node->set_type("ClassifyOp"); + DAGNodeDependency *node_dependency = dag_node->add_dependencies(); + node_dependency->set_name("image_reader_op"); + node_dependency->set_mode("RO"); + + dag_node = workflow->add_nodes(); + dag_node->set_name("write_json_op"); + dag_node->set_type("WriteOp"); + node_dependency = dag_node->add_dependencies(); + node_dependency->set_name("image_classify_op"); + node_dependency->set_mode("RO"); + + workflow = workflow_conf.add_workflow(); + workflow->set_name("workflow2"); + workflow->set_workflow_type("Sequence"); + + dag_node = workflow->add_nodes(); + dag_node->set_name("dense_op"); + dag_node->set_type("DenseOp"); + + ret = baidu::paddle_serving::configure::write_proto_conf(&workflow_conf, output_dir, workflow_conf_file); + if (ret != 0) { + return ret; + } + + InferServiceConf infer_service_conf; + InferService *infer_service = infer_service_conf.add_service(); + infer_service->set_name("ImageClassifyService"); + infer_service->add_workflow("workflow1"); + infer_service->add_workflow("workflow2"); + + infer_service = infer_service_conf.add_service(); + infer_service->set_name("BuiltinDenseFormatService"); + infer_service->add_workflow("workflow2"); + + ret = baidu::paddle_serving::configure::write_proto_conf(&infer_service_conf, output_dir, service_conf_file); + if (ret != 0) { + return ret; + } + return 0; +} + +int test_read_conf() +{ + int ret = 0; + + ModelToolkitConf model_toolkit_conf; + ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, model_toolkit_conf_file, &model_toolkit_conf); + if (ret != 0) { + std::cout << "Read conf fail: " << model_toolkit_conf_file << std::endl; + return -1; + } + + ResourceConf resource_conf; + ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, resource_conf_file, &resource_conf); + if (ret != 0) { + std::cout << "Read conf fail: " << resource_conf_file << std::endl; + return -1; + } + + WorkflowConf workflow_conf; + ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, workflow_conf_file, &workflow_conf); + if (ret != 0) { + std::cout << "Read conf fail: " << workflow_conf_file << std::endl; + return -1; + } + + InferServiceConf service_conf; + ret = baidu::paddle_serving::configure::read_proto_conf(output_dir, service_conf_file, &service_conf); + if (ret != 0) { + std::cout << "Read conf fail: " << service_conf_file << std::endl; + return -1; + } + + return 0; +} + +int main() +{ + int ret = 0; + struct stat stat_buf; + if (stat(output_dir.c_str(), &stat_buf) != 0) { + int ret = mkdir("./conf", 0777); + if (ret != 0) { + std::cout << "mkdir ./conf fail" << std::endl; + return -1; + } + if (stat("./conf", &stat_buf) != 0) { + std::cout << "./conf not exist and creating it failed" << std::endl; + return -1; + } + } + + ret = test_write_conf(); + if (ret != 0) { + std::cout << "test_write_conf fail" << std::endl; + return -1; + } + + std::cout << "test_write_conf success" << std::endl; + + ret = test_read_conf(); + if (ret != 0) { + std::cout << "test_read_conf fail" << std::endl; + return -1; + } + std::cout << "test_read_conf success" << std::endl; + + return 0; +} + +/* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ -- GitLab