diff --git a/predictor/common/constant.cpp b/predictor/common/constant.cpp index c1cd1c8b557e8a201089cd03455b0ac0cccefdb8..36519868ab5327093af20e174b5877191695da2a 100644 --- a/predictor/common/constant.cpp +++ b/predictor/common/constant.cpp @@ -41,6 +41,9 @@ DEFINE_int32(reload_interval_s, 10, ""); DEFINE_bool(enable_model_toolkit, false, "enable model toolkit"); DEFINE_string(enable_protocol_list, "baidu_std", "set protocol list"); DEFINE_bool(enable_cube, false, "enable cube"); +DEFINE_string(general_model_path, "./conf", ""); +DEFINE_string(general_model_file, "general_model.prototxt", ""); +DEFINE_bool(enable_general_model, false, "enable general model"); const char* START_OP_NAME = "startup_op"; } // namespace predictor diff --git a/predictor/framework/resource.cpp b/predictor/framework/resource.cpp index c8346ab9817dcf39a136248f2890390b704f50ec..9b6535e666f9c0927746902948d25b8d3a00f723 100644 --- a/predictor/framework/resource.cpp +++ b/predictor/framework/resource.cpp @@ -21,7 +21,7 @@ namespace paddle_serving { namespace predictor { using configure::ResourceConf; -using configure::GeneralModelConf; +using configure::GeneralModelConfig; using configure::Shape; using rec::mcube::CubeAPI; // __thread bool p_thread_initialized = false; @@ -104,29 +104,29 @@ int Resource::general_model_initialize( return 0; } - GeneralModelConf model_config; + GeneralModelConfig model_config; if (configure::read_proto_conf(path, file, &model_config) != 0) { LOG(ERROR) << "Failed initialize resource from: " << path << "/" << file; return -1; } _config.reset(new PaddleGeneralModelConfig()); - _config->_feed_type.resize(model_config.is_feed_type_size()); + _config->_feed_type.resize(model_config.feed_type_size()); _config->_is_lod_feed.resize(model_config.is_lod_feed_size()); _config->_capacity.resize(model_config.feed_shape_size()); _config->_feed_shape.resize(model_config.feed_shape_size()); for (int i = 0; i < model_config.is_lod_feed_size(); ++i) { - _config->feed_type[i] = model_config.feed_type(i); + _config->_feed_type[i] = model_config.feed_type(i); if (model_config.is_lod_feed(i)) { _config->_feed_shape[i] = {-1}; _config->_is_lod_feed[i] = true; } else { - _config->capacity[i] = 1; + _config->_capacity[i] = 1; _config->_is_lod_feed[i] = false; for (int j = 0; j < model_config.feed_shape(i).shape_size(); ++j) { - int dim = model_cnofig.feed_shape(i).shape(j); + int dim = model_config.feed_shape(i).shape(j); _config->_feed_shape[i].push_back(dim); - _config->capacity[i] *= dim; + _config->_capacity[i] *= dim; } } } diff --git a/predictor/framework/resource.h b/predictor/framework/resource.h index 1384f72ad2ead6f6f6f7c0f62ddc4a94ed0e9d87..600cf2112e6d86d236ae7ca670015fe1ff37f851 100644 --- a/predictor/framework/resource.h +++ b/predictor/framework/resource.h @@ -27,11 +27,10 @@ namespace paddle_serving { namespace predictor { class PaddleGeneralModelConfig { - PaddleGeneralModelConfig(); - - ~PaddleGeneralModelConfig(); + public: + PaddleGeneralModelConfig() {} - void load_config(std::string); + ~PaddleGeneralModelConfig() {} public: diff --git a/predictor/src/pdserving.cpp b/predictor/src/pdserving.cpp index 243a0df64a89acff3b6af7d0a2ef6d0067c8e568..c8e15fca3674e9838ba148f64b9b467d73a73d9b 100644 --- a/predictor/src/pdserving.cpp +++ b/predictor/src/pdserving.cpp @@ -45,6 +45,8 @@ using baidu::paddle_serving::predictor::FLAGS_logger_path; using baidu::paddle_serving::predictor::FLAGS_logger_file; using baidu::paddle_serving::predictor::FLAGS_resource_path; using baidu::paddle_serving::predictor::FLAGS_resource_file; +using baidu::paddle_serving::predictor::FLAGS_general_model_path; +using baidu::paddle_serving::predictor::FLAGS_general_model_file; using baidu::paddle_serving::predictor::FLAGS_reload_interval_s; using baidu::paddle_serving::predictor::FLAGS_port; @@ -225,7 +227,7 @@ int main(int argc, char** argv) { return -1; } - LOG(INFO) << "Succ initialize general model" + LOG(INFO) << "Succ initialize general model"; // FATAL messages are output to stderr FLAGS_stderrthreshold = 3;