提交 d2906933 编写于 作者: M MRXLT

optimize general server config

上级 1dd2066f
......@@ -15,17 +15,15 @@
syntax = "proto2";
package baidu.paddle_serving.configure;
message Shape { repeated int32 shape = 1; };
message FeedVar {
required string name = 1;
required bool is_lod_tensor = 2;
required int32 feed_type = 3;
required Shape feed_shape = 4;
repeated int32 shape = 4;
}
message FetchVar {
required string name = 1;
required Shape fetch_shape = 2;
repeated int32 shape = 2;
}
message GeneralModelConfig {
repeated FeedVar feed_var = 1;
......
......@@ -23,7 +23,6 @@ namespace predictor {
using configure::ResourceConf;
using configure::GeneralModelConfig;
using configure::Shape;
using rec::mcube::CubeAPI;
// __thread bool p_thread_initialized = false;
......@@ -198,9 +197,8 @@ int Resource::general_model_initialize(const std::string& path,
} else {
_config->_capacity[i] = 1;
_config->_is_lod_feed[i] = false;
for (int j = 0; j < model_config.feed_var(i).feed_shape().shape_size();
++j) {
int32_t dim = model_config.feed_var(i).feed_shape().shape(j);
for (int j = 0; j < model_config.feed_var(i).shape_size(); ++j) {
int32_t dim = model_config.feed_var(i).shape(j);
_config->_feed_shape[i].push_back(dim);
_config->_capacity[i] *= dim;
}
......@@ -212,9 +210,8 @@ int Resource::general_model_initialize(const std::string& path,
_config->_fetch_shape.resize(fetch_var_num);
for (int i = 0; i < fetch_var_num; ++i) {
_config->_fetch_name[i] = model_config.fetch_var(i).name();
for (int j = 0; j < model_config.fetch_var(i).fetch_shape().shape_size();
++j) {
int dim = model_config.fetch_var(i).fetch_shape().shape(j);
for (int j = 0; j < model_config.fetch_var(i).shape_size(); ++j) {
int dim = model_config.fetch_var(i).shape(j);
_config->_fetch_shape[i].push_back(dim);
}
}
......
feed_var {
name: "words"
is_lod_tensor: false
is_lod_tensor: true
feed_type: 0
feed_shape {
shape: -1
}
shape: -1
}
feed_var {
name: "label"
is_lod_tensor: false
feed_type: 0
feed_shape {
shape: 1
}
shape: 1
}
fetch_var {
name: "mean_0.tmp_0"
fetch_shape {
shape: 1
}
name: "cost"
shape: 1
}
fetch_var {
name: "accuracy_0.tmp_0"
fetch_shape {
shape: 1
}
name: "acc"
shape: 1
}
fetch_var {
name: "fc_1.tmp_2"
fetch_shape {
shape: 1
}
name: "prediction"
shape: 2
}
......@@ -48,16 +48,6 @@ int GeneralModelOp::inference() {
std::vector<int> elem_size;
std::vector<int> capacity;
// config
LOG(INFO) << "start to call load general model_conf op";
baidu::paddle_serving::predictor::Resource &resource =
baidu::paddle_serving::predictor::Resource::instance();
LOG(INFO) << "get resource pointer done.";
std::shared_ptr<PaddleGeneralModelConfig> model_config =
resource.get_general_model_config();
LOG(INFO) << "get general model config pointer done.";
resource.print_general_model_config(model_config);
// infer
if (batch_size > 0) {
int var_num = req->insts(0).tensor_array_size();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册