提交 e7dabd0b 编写于 作者: M MRXLT 提交者: GitHub

Merge pull request #145 from guru4elephant/fix_general_infer_name

fix general op reader problem, make fetch var changable during infere…
...@@ -25,7 +25,8 @@ message FeedVar { ...@@ -25,7 +25,8 @@ message FeedVar {
message FetchVar { message FetchVar {
optional string name = 1; optional string name = 1;
optional string alias_name = 2; optional string alias_name = 2;
repeated int32 shape = 3; optional bool is_lod_tensor = 3 [ default = false ];
repeated int32 shape = 4;
} }
message GeneralModelConfig { message GeneralModelConfig {
repeated FeedVar feed_var = 1; repeated FeedVar feed_var = 1;
......
...@@ -108,7 +108,11 @@ std::vector<std::vector<float>> PredictorClient::predict( ...@@ -108,7 +108,11 @@ std::vector<std::vector<float>> PredictorClient::predict(
VLOG(2) << "fetch general model predictor done."; VLOG(2) << "fetch general model predictor done.";
VLOG(2) << "float feed name size: " << float_feed_name.size(); VLOG(2) << "float feed name size: " << float_feed_name.size();
VLOG(2) << "int feed name size: " << int_feed_name.size(); VLOG(2) << "int feed name size: " << int_feed_name.size();
VLOG(2) << "fetch name size: " << fetch_name.size();
Request req; Request req;
for (auto & name : fetch_name) {
req.add_fetch_var_names(name);
}
std::vector<Tensor *> tensor_vec; std::vector<Tensor *> tensor_vec;
FeedInst *inst = req.add_insts(); FeedInst *inst = req.add_insts();
for (auto &name : float_feed_name) { for (auto &name : float_feed_name) {
......
...@@ -30,8 +30,10 @@ namespace serving { ...@@ -30,8 +30,10 @@ namespace serving {
using baidu::paddle_serving::predictor::MempoolWrapper; using baidu::paddle_serving::predictor::MempoolWrapper;
using baidu::paddle_serving::predictor::general_model::Tensor; using baidu::paddle_serving::predictor::general_model::Tensor;
using baidu::paddle_serving::predictor::general_model::Response; using baidu::paddle_serving::predictor::general_model::Response;
using baidu::paddle_serving::predictor::general_model::Request;
using baidu::paddle_serving::predictor::general_model::FetchInst; using baidu::paddle_serving::predictor::general_model::FetchInst;
using baidu::paddle_serving::predictor::InferManager; using baidu::paddle_serving::predictor::InferManager;
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralInferOp::inference() { int GeneralInferOp::inference() {
const GeneralReaderOutput *reader_out = const GeneralReaderOutput *reader_out =
...@@ -57,40 +59,65 @@ int GeneralInferOp::inference() { ...@@ -57,40 +59,65 @@ int GeneralInferOp::inference() {
return -1; return -1;
} }
const Request *req = dynamic_cast<const Request *>(get_request_message());
VLOG(2) << "start to call load general model_conf op";
baidu::paddle_serving::predictor::Resource &resource =
baidu::paddle_serving::predictor::Resource::instance();
VLOG(2) << "get resource pointer done.";
std::shared_ptr<PaddleGeneralModelConfig> model_config =
resource.get_general_model_config();
std::vector<int> fetch_index;
fetch_index.resize(req->fetch_var_names_size());
for (int i = 0; i < req->fetch_var_names_size(); ++i) {
fetch_index[i] =
model_config->_fetch_alias_name_to_index[req->fetch_var_names(i)];
}
// response inst with only fetch_var_names
Response *res = mutable_data<Response>(); Response *res = mutable_data<Response>();
for (int i = 0; i < batch_size; ++i) { for (int i = 0; i < batch_size; ++i) {
FetchInst *fetch_inst = res->add_insts(); FetchInst *fetch_inst = res->add_insts();
for (int j = 0; j < out->size(); ++j) { for (auto & idx : fetch_index) {
Tensor *tensor = fetch_inst->add_tensor_array(); Tensor *tensor = fetch_inst->add_tensor_array();
// currently only response float tensor or lod_tensor
tensor->set_elem_type(1); tensor->set_elem_type(1);
if (out->at(j).lod.size() == 1) { if (model_config->_is_lod_fetch[idx]) {
VLOG(2) << "out[" << idx << " is lod_tensor";
tensor->add_shape(-1); tensor->add_shape(-1);
} else { } else {
for (int k = 1; k < out->at(j).shape.size(); ++k) { VLOG(2) << "out[" << idx << "] is tensor";
tensor->add_shape(out->at(j).shape[k]); for (int k = 1; k < out->at(idx).shape.size(); ++k) {
VLOG(2) << "shape[" << k - 1 << "]: "
<< out->at(idx).shape[k];
tensor->add_shape(out->at(idx).shape[k]);
} }
} }
} }
} }
for (int i = 0; i < out->size(); ++i) { int var_idx = 0;
float *data_ptr = static_cast<float *>(out->at(i).data.data()); for (auto & idx : fetch_index) {
float *data_ptr = static_cast<float *>(out->at(idx).data.data());
int cap = 1; int cap = 1;
for (int j = 1; j < out->at(i).shape.size(); ++j) { for (int j = 1; j < out->at(idx).shape.size(); ++j) {
cap *= out->at(i).shape[j]; cap *= out->at(idx).shape[j];
} }
if (out->at(i).lod.size() == 1) { if (model_config->_is_lod_fetch[idx]) {
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
for (int k = out->at(i).lod[0][j]; k < out->at(i).lod[0][j + 1]; k++) { for (int k = out->at(idx).lod[0][j];
res->mutable_insts(j)->mutable_tensor_array(i)->add_data( k < out->at(idx).lod[0][j + 1]; k++) {
res->mutable_insts(j)->mutable_tensor_array(var_idx)->add_data(
reinterpret_cast<char *>(&(data_ptr[k])), sizeof(float)); reinterpret_cast<char *>(&(data_ptr[k])), sizeof(float));
} }
} }
} else { } else {
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
for (int k = j * cap; k < (j + 1) * cap; ++k) { for (int k = j * cap; k < (j + 1) * cap; ++k) {
res->mutable_insts(j)->mutable_tensor_array(i)->add_data( res->mutable_insts(j)->mutable_tensor_array(var_idx)->add_data(
reinterpret_cast<char *>(&(data_ptr[k])), sizeof(float)); reinterpret_cast<char *>(&(data_ptr[k])), sizeof(float));
} }
} }
......
...@@ -39,6 +39,9 @@ int conf_check(const Request *req, ...@@ -39,6 +39,9 @@ int conf_check(const Request *req,
LOG(ERROR) << "feed var number not match."; LOG(ERROR) << "feed var number not match.";
return -1; return -1;
} }
VLOG(2) << "fetch var num in reader op: " << req->fetch_var_names_size();
for (int i = 0; i < var_num; ++i) { for (int i = 0; i < var_num; ++i) {
if (model_config->_feed_type[i] != if (model_config->_feed_type[i] !=
req->insts(0).tensor_array(i).elem_type()) { req->insts(0).tensor_array(i).elem_type()) {
...@@ -89,15 +92,15 @@ int GeneralReaderOp::inference() { ...@@ -89,15 +92,15 @@ int GeneralReaderOp::inference() {
VLOG(2) << "var num: " << var_num; VLOG(2) << "var num: " << var_num;
// read config // read config
LOG(INFO) << "start to call load general model_conf op"; VLOG(2) << "start to call load general model_conf op";
baidu::paddle_serving::predictor::Resource &resource = baidu::paddle_serving::predictor::Resource &resource =
baidu::paddle_serving::predictor::Resource::instance(); baidu::paddle_serving::predictor::Resource::instance();
LOG(INFO) << "get resource pointer done."; VLOG(2) << "get resource pointer done.";
std::shared_ptr<PaddleGeneralModelConfig> model_config = std::shared_ptr<PaddleGeneralModelConfig> model_config =
resource.get_general_model_config(); resource.get_general_model_config();
LOG(INFO) << "print general model config done."; VLOG(2) << "print general model config done.";
// check // check
res->reader_status = conf_check(req, model_config); res->reader_status = conf_check(req, model_config);
...@@ -111,8 +114,8 @@ int GeneralReaderOp::inference() { ...@@ -111,8 +114,8 @@ int GeneralReaderOp::inference() {
elem_type.resize(var_num); elem_type.resize(var_num);
elem_size.resize(var_num); elem_size.resize(var_num);
capacity.resize(var_num); capacity.resize(var_num);
paddle::PaddleTensor lod_tensor;
for (int i = 0; i < var_num; ++i) { for (int i = 0; i < var_num; ++i) {
paddle::PaddleTensor lod_tensor;
elem_type[i] = req->insts(0).tensor_array(i).elem_type(); elem_type[i] = req->insts(0).tensor_array(i).elem_type();
VLOG(2) << "var[" << i << "] has elem type: " << elem_type[i]; VLOG(2) << "var[" << i << "] has elem type: " << elem_type[i];
if (elem_type[i] == 0) { // int64 if (elem_type[i] == 0) { // int64
...@@ -138,11 +141,7 @@ int GeneralReaderOp::inference() { ...@@ -138,11 +141,7 @@ int GeneralReaderOp::inference() {
} }
VLOG(2) << "var[" << i << "] is tensor, capacity: " << capacity[i]; VLOG(2) << "var[" << i << "] is tensor, capacity: " << capacity[i];
} }
if (i == 0) { lod_tensor.name = model_config->_feed_name[i];
lod_tensor.name = "words";
} else {
lod_tensor.name = "label";
}
in->push_back(lod_tensor); in->push_back(lod_tensor);
} }
......
...@@ -35,6 +35,7 @@ message FetchInst { ...@@ -35,6 +35,7 @@ message FetchInst {
message Request { message Request {
repeated FeedInst insts = 1; repeated FeedInst insts = 1;
repeated string fetch_var_names = 2;
}; };
message Response { message Response {
......
...@@ -189,14 +189,18 @@ int Resource::general_model_initialize(const std::string& path, ...@@ -189,14 +189,18 @@ int Resource::general_model_initialize(const std::string& path,
VLOG(2) << "load general model config"; VLOG(2) << "load general model config";
VLOG(2) << "feed var num: " << feed_var_num; VLOG(2) << "feed var num: " << feed_var_num;
_config->_feed_name.resize(feed_var_num); _config->_feed_name.resize(feed_var_num);
_config->_feed_alias_name.resize(feed_var_num);
_config->_feed_type.resize(feed_var_num); _config->_feed_type.resize(feed_var_num);
_config->_is_lod_feed.resize(feed_var_num); _config->_is_lod_feed.resize(feed_var_num);
_config->_capacity.resize(feed_var_num); _config->_capacity.resize(feed_var_num);
_config->_feed_shape.resize(feed_var_num); _config->_feed_shape.resize(feed_var_num);
for (int i = 0; i < feed_var_num; ++i) { for (int i = 0; i < feed_var_num; ++i) {
_config->_feed_name[i] = model_config.feed_var(i).name(); _config->_feed_name[i] = model_config.feed_var(i).name();
_config->_feed_alias_name[i] = model_config.feed_var(i).alias_name();
VLOG(2) << "feed var[" << i << "]: " VLOG(2) << "feed var[" << i << "]: "
<< _config->_feed_name[i]; << _config->_feed_name[i];
VLOG(2) << "feed var[" << i << "]: "
<< _config->_feed_alias_name[i];
_config->_feed_type[i] = model_config.feed_var(i).feed_type(); _config->_feed_type[i] = model_config.feed_var(i).feed_type();
VLOG(2) << "feed type[" << i << "]: " VLOG(2) << "feed type[" << i << "]: "
<< _config->_feed_type[i]; << _config->_feed_type[i];
...@@ -219,15 +223,27 @@ int Resource::general_model_initialize(const std::string& path, ...@@ -219,15 +223,27 @@ int Resource::general_model_initialize(const std::string& path,
} }
int fetch_var_num = model_config.fetch_var_size(); int fetch_var_num = model_config.fetch_var_size();
_config->_is_lod_fetch.resize(fetch_var_num);
_config->_fetch_name.resize(fetch_var_num); _config->_fetch_name.resize(fetch_var_num);
_config->_fetch_alias_name.resize(fetch_var_num);
_config->_fetch_shape.resize(fetch_var_num); _config->_fetch_shape.resize(fetch_var_num);
for (int i = 0; i < fetch_var_num; ++i) { for (int i = 0; i < fetch_var_num; ++i) {
_config->_fetch_name[i] = model_config.fetch_var(i).name(); _config->_fetch_name[i] = model_config.fetch_var(i).name();
_config->_fetch_alias_name[i] = model_config.fetch_var(i).alias_name();
_config->_fetch_name_to_index[_config->_fetch_name[i]] = i;
_config->_fetch_alias_name_to_index[_config->_fetch_alias_name[i]] = i;
if (model_config.fetch_var(i).is_lod_tensor()) {
VLOG(2) << "fetch var[" << i << "] is lod tensor";
_config->_fetch_shape[i] = {-1};
_config->_is_lod_fetch[i] = true;
} else {
_config->_is_lod_fetch[i] = false;
for (int j = 0; j < model_config.fetch_var(i).shape_size(); ++j) { for (int j = 0; j < model_config.fetch_var(i).shape_size(); ++j) {
int dim = model_config.fetch_var(i).shape(j); int dim = model_config.fetch_var(i).shape(j);
_config->_fetch_shape[i].push_back(dim); _config->_fetch_shape[i].push_back(dim);
} }
} }
}
return 0; return 0;
} }
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include <map>
#include "core/cube/cube-api/include/cube_api.h" #include "core/cube/cube-api/include/cube_api.h"
#include "core/kvdb/include/kvdb/paddle_rocksdb.h" #include "core/kvdb/include/kvdb/paddle_rocksdb.h"
#include "core/predictor/common/inner_common.h" #include "core/predictor/common/inner_common.h"
...@@ -34,8 +35,10 @@ class PaddleGeneralModelConfig { ...@@ -34,8 +35,10 @@ class PaddleGeneralModelConfig {
public: public:
std::vector<std::string> _feed_name; std::vector<std::string> _feed_name;
std::vector<std::string> _feed_alias_name;
std::vector<int> _feed_type; // 0 int64, 1 float std::vector<int> _feed_type; // 0 int64, 1 float
std::vector<bool> _is_lod_feed; // true lod tensor std::vector<bool> _is_lod_feed; // true lod tensor
std::vector<bool> _is_lod_fetch; // whether a fetch var is lod_tensor
std::vector<int> _capacity; // capacity for each tensor std::vector<int> _capacity; // capacity for each tensor
/* /*
feed_shape_ for feeded variable feed_shape_ for feeded variable
...@@ -45,7 +48,10 @@ class PaddleGeneralModelConfig { ...@@ -45,7 +48,10 @@ class PaddleGeneralModelConfig {
std::vector<std::vector<int>> _feed_shape; std::vector<std::vector<int>> _feed_shape;
std::vector<std::string> _fetch_name; std::vector<std::string> _fetch_name;
std::vector<std::string> _fetch_alias_name;
std::vector<std::vector<int>> _fetch_shape; std::vector<std::vector<int>> _fetch_shape;
std::map<std::string, int> _fetch_name_to_index;
std::map<std::string, int> _fetch_alias_name_to_index;
}; };
class BaseRdDict; class BaseRdDict;
......
...@@ -35,6 +35,7 @@ message FetchInst { ...@@ -35,6 +35,7 @@ message FetchInst {
message Request { message Request {
repeated FeedInst insts = 1; repeated FeedInst insts = 1;
repeated string fetch_var_names = 2;
}; };
message Response { message Response {
......
...@@ -41,7 +41,7 @@ def save_model(server_model_folder, ...@@ -41,7 +41,7 @@ def save_model(server_model_folder,
feed_var = model_conf.FeedVar() feed_var = model_conf.FeedVar()
feed_var.alias_name = key feed_var.alias_name = key
feed_var.name = feed_var_dict[key].name feed_var.name = feed_var_dict[key].name
feed_var.is_lod_tensor = feed_var_dict[key].lod_level == 1 feed_var.is_lod_tensor = feed_var_dict[key].lod_level >= 1
if feed_var_dict[key].dtype == core.VarDesc.VarType.INT32 or \ if feed_var_dict[key].dtype == core.VarDesc.VarType.INT32 or \
feed_var_dict[key].dtype == core.VarDesc.VarType.INT64: feed_var_dict[key].dtype == core.VarDesc.VarType.INT64:
feed_var.feed_type = 0 feed_var.feed_type = 0
...@@ -61,7 +61,15 @@ def save_model(server_model_folder, ...@@ -61,7 +61,15 @@ def save_model(server_model_folder,
fetch_var = model_conf.FetchVar() fetch_var = model_conf.FetchVar()
fetch_var.alias_name = key fetch_var.alias_name = key
fetch_var.name = fetch_var_dict[key].name fetch_var.name = fetch_var_dict[key].name
fetch_var.shape.extend(fetch_var_dict[key].shape) fetch_var.is_lod_tensor = fetch_var_dict[key].lod_level >= 1
if fetch_var.is_lod_tensor:
fetch_var.shape.extend([-1])
else:
tmp_shape = []
for v in fetch_var_dict[key].shape:
if v >= 0:
tmp_shape.append(v)
fetch_var.shape.extend(tmp_shape)
config.fetch_var.extend([fetch_var]) config.fetch_var.extend([fetch_var])
cmd = "mkdir -p {}".format(client_config_folder) cmd = "mkdir -p {}".format(client_config_folder)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册