未验证 提交 2ba9f25f 编写于 作者: D Dong Daxiang 提交者: GitHub

Merge pull request #581 from MRXLT/lod-numpy

lod tensor feed support numpy array input
...@@ -131,7 +131,7 @@ int GeneralReaderOp::inference() { ...@@ -131,7 +131,7 @@ int GeneralReaderOp::inference() {
lod_tensor.dtype = paddle::PaddleDType::FLOAT32; lod_tensor.dtype = paddle::PaddleDType::FLOAT32;
} }
if (req->insts(0).tensor_array(i).shape(0) == -1) { if (model_config->_is_lod_feed[i]) {
lod_tensor.lod.resize(1); lod_tensor.lod.resize(1);
lod_tensor.lod[0].push_back(0); lod_tensor.lod[0].push_back(0);
VLOG(2) << "var[" << i << "] is lod_tensor"; VLOG(2) << "var[" << i << "] is lod_tensor";
...@@ -153,6 +153,7 @@ int GeneralReaderOp::inference() { ...@@ -153,6 +153,7 @@ int GeneralReaderOp::inference() {
// specify the memory needed for output tensor_vector // specify the memory needed for output tensor_vector
for (int i = 0; i < var_num; ++i) { for (int i = 0; i < var_num; ++i) {
if (out->at(i).lod.size() == 1) { if (out->at(i).lod.size() == 1) {
int tensor_size = 0;
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
const Tensor &tensor = req->insts(j).tensor_array(i); const Tensor &tensor = req->insts(j).tensor_array(i);
int data_len = 0; int data_len = 0;
...@@ -162,15 +163,28 @@ int GeneralReaderOp::inference() { ...@@ -162,15 +163,28 @@ int GeneralReaderOp::inference() {
data_len = tensor.float_data_size(); data_len = tensor.float_data_size();
} }
VLOG(2) << "tensor size for var[" << i << "]: " << data_len; VLOG(2) << "tensor size for var[" << i << "]: " << data_len;
tensor_size += data_len;
int cur_len = out->at(i).lod[0].back(); int cur_len = out->at(i).lod[0].back();
VLOG(2) << "current len: " << cur_len; VLOG(2) << "current len: " << cur_len;
out->at(i).lod[0].push_back(cur_len + data_len); int sample_len = 0;
VLOG(2) << "new len: " << cur_len + data_len; if (tensor.shape_size() == 1) {
sample_len = data_len;
} else {
sample_len = tensor.shape(0);
}
out->at(i).lod[0].push_back(cur_len + sample_len);
VLOG(2) << "new len: " << cur_len + sample_len;
}
out->at(i).data.Resize(tensor_size * elem_size[i]);
out->at(i).shape = {out->at(i).lod[0].back()};
for (int j = 1; j < req->insts(0).tensor_array(i).shape_size(); ++j) {
out->at(i).shape.push_back(req->insts(0).tensor_array(i).shape(j));
}
if (out->at(i).shape.size() == 1) {
out->at(i).shape.push_back(1);
} }
out->at(i).data.Resize(out->at(i).lod[0].back() * elem_size[i]);
out->at(i).shape = {out->at(i).lod[0].back(), 1};
VLOG(2) << "var[" << i VLOG(2) << "var[" << i
<< "] is lod_tensor and len=" << out->at(i).lod[0].back(); << "] is lod_tensor and len=" << out->at(i).lod[0].back();
} else { } else {
......
...@@ -15,8 +15,10 @@ ...@@ -15,8 +15,10 @@
#include "core/general-server/op/general_response_op.h" #include "core/general-server/op/general_response_op.h"
#include <algorithm> #include <algorithm>
#include <iostream> #include <iostream>
#include <map>
#include <memory> #include <memory>
#include <sstream> #include <sstream>
#include <utility>
#include "core/general-server/op/general_infer_helper.h" #include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/infer.h" #include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h" #include "core/predictor/framework/memory.h"
...@@ -86,37 +88,51 @@ int GeneralResponseOp::inference() { ...@@ -86,37 +88,51 @@ int GeneralResponseOp::inference() {
// To get the order of model return values // To get the order of model return values
output->set_engine_name(pre_name); output->set_engine_name(pre_name);
FetchInst *fetch_inst = output->add_insts(); FetchInst *fetch_inst = output->add_insts();
std::map<std::string, int> fetch_index_map;
for (int i = 0; i < in->size(); ++i) {
VLOG(2) << "index " << i << " var " << in->at(i).name;
fetch_index_map.insert(std::pair<std::string, int>(in->at(i).name, i));
}
for (auto &idx : fetch_index) { for (auto &idx : fetch_index) {
Tensor *tensor = fetch_inst->add_tensor_array(); Tensor *tensor = fetch_inst->add_tensor_array();
tensor->set_elem_type(1); tensor->set_elem_type(1);
int true_idx = fetch_index_map[model_config->_fetch_name[idx]];
if (model_config->_is_lod_fetch[idx]) { if (model_config->_is_lod_fetch[idx]) {
VLOG(2) << "out[" << idx << "] is lod_tensor"; VLOG(2) << "out[" << idx << "] " << model_config->_fetch_name[idx]
for (int k = 0; k < in->at(idx).shape.size(); ++k) { << " is lod_tensor";
for (int k = 0; k < in->at(true_idx).shape.size(); ++k) {
VLOG(2) << "shape[" << k << "]: " << in->at(idx).shape[k]; VLOG(2) << "shape[" << k << "]: " << in->at(idx).shape[k];
tensor->add_shape(in->at(idx).shape[k]); tensor->add_shape(in->at(true_idx).shape[k]);
} }
} else { } else {
VLOG(2) << "out[" << idx << "] is tensor"; VLOG(2) << "out[" << idx << "] " << model_config->_fetch_name[idx]
for (int k = 0; k < in->at(idx).shape.size(); ++k) { << " is tensor";
VLOG(2) << "shape[" << k << "]: " << in->at(idx).shape[k]; for (int k = 0; k < in->at(true_idx).shape.size(); ++k) {
tensor->add_shape(in->at(idx).shape[k]); VLOG(2) << "shape[" << k << "]: " << in->at(true_idx).shape[k];
tensor->add_shape(in->at(true_idx).shape[k]);
} }
} }
} }
int var_idx = 0; int var_idx = 0;
for (auto &idx : fetch_index) { for (auto &idx : fetch_index) {
int true_idx = fetch_index_map[model_config->_fetch_name[idx]];
int cap = 1; int cap = 1;
for (int j = 0; j < in->at(idx).shape.size(); ++j) { for (int j = 0; j < in->at(true_idx).shape.size(); ++j) {
cap *= in->at(idx).shape[j]; cap *= in->at(true_idx).shape[j];
} }
if (in->at(idx).dtype == paddle::PaddleDType::INT64) { if (in->at(true_idx).dtype == paddle::PaddleDType::INT64) {
int64_t *data_ptr = static_cast<int64_t *>(in->at(idx).data.data()); VLOG(2) << "Prepare float var [" << model_config->_fetch_name[idx]
<< "].";
int64_t *data_ptr =
static_cast<int64_t *>(in->at(true_idx).data.data());
if (model_config->_is_lod_fetch[idx]) { if (model_config->_is_lod_fetch[idx]) {
FetchInst *fetch_p = output->mutable_insts(0); FetchInst *fetch_p = output->mutable_insts(0);
for (int j = 0; j < in->at(idx).lod[0].size(); ++j) { for (int j = 0; j < in->at(true_idx).lod[0].size(); ++j) {
fetch_p->mutable_tensor_array(var_idx)->add_lod( fetch_p->mutable_tensor_array(var_idx)->add_lod(
in->at(idx).lod[0][j]); in->at(true_idx).lod[0][j]);
} }
for (int j = 0; j < cap; ++j) { for (int j = 0; j < cap; ++j) {
fetch_p->mutable_tensor_array(var_idx)->add_int64_data(data_ptr[j]); fetch_p->mutable_tensor_array(var_idx)->add_int64_data(data_ptr[j]);
...@@ -127,14 +143,17 @@ int GeneralResponseOp::inference() { ...@@ -127,14 +143,17 @@ int GeneralResponseOp::inference() {
fetch_p->mutable_tensor_array(var_idx)->add_int64_data(data_ptr[j]); fetch_p->mutable_tensor_array(var_idx)->add_int64_data(data_ptr[j]);
} }
} }
VLOG(2) << "fetch var [" << model_config->_fetch_name[idx] << "] ready";
var_idx++; var_idx++;
} else if (in->at(idx).dtype == paddle::PaddleDType::FLOAT32) { } else if (in->at(true_idx).dtype == paddle::PaddleDType::FLOAT32) {
float *data_ptr = static_cast<float *>(in->at(idx).data.data()); VLOG(2) << "Prepare float var [" << model_config->_fetch_name[idx]
<< "].";
float *data_ptr = static_cast<float *>(in->at(true_idx).data.data());
if (model_config->_is_lod_fetch[idx]) { if (model_config->_is_lod_fetch[idx]) {
FetchInst *fetch_p = output->mutable_insts(0); FetchInst *fetch_p = output->mutable_insts(0);
for (int j = 0; j < in->at(idx).lod[0].size(); ++j) { for (int j = 0; j < in->at(true_idx).lod[0].size(); ++j) {
fetch_p->mutable_tensor_array(var_idx)->add_lod( fetch_p->mutable_tensor_array(var_idx)->add_lod(
in->at(idx).lod[0][j]); in->at(true_idx).lod[0][j]);
} }
for (int j = 0; j < cap; ++j) { for (int j = 0; j < cap; ++j) {
fetch_p->mutable_tensor_array(var_idx)->add_float_data(data_ptr[j]); fetch_p->mutable_tensor_array(var_idx)->add_float_data(data_ptr[j]);
...@@ -145,6 +164,7 @@ int GeneralResponseOp::inference() { ...@@ -145,6 +164,7 @@ int GeneralResponseOp::inference() {
fetch_p->mutable_tensor_array(var_idx)->add_float_data(data_ptr[j]); fetch_p->mutable_tensor_array(var_idx)->add_float_data(data_ptr[j]);
} }
} }
VLOG(2) << "fetch var [" << model_config->_fetch_name[idx] << "] ready";
var_idx++; var_idx++;
} }
} }
......
...@@ -203,7 +203,12 @@ class Client(object): ...@@ -203,7 +203,12 @@ class Client(object):
def shape_check(self, feed, key): def shape_check(self, feed, key):
if key in self.lod_tensor_set: if key in self.lod_tensor_set:
return return
if len(feed[key]) != self.feed_tensor_len[key]: if isinstance(feed[key],
list) and len(feed[key]) != self.feed_tensor_len[key]:
raise SystemExit("The shape of feed tensor {} not match.".format(
key))
if type(feed[key]).__module__ == np.__name__ and np.size(feed[
key]) != self.feed_tensor_len[key]:
raise SystemExit("The shape of feed tensor {} not match.".format( raise SystemExit("The shape of feed tensor {} not match.".format(
key)) key))
...@@ -254,23 +259,16 @@ class Client(object): ...@@ -254,23 +259,16 @@ class Client(object):
for key in feed_i: for key in feed_i:
if key not in self.feed_names_: if key not in self.feed_names_:
raise ValueError("Wrong feed name: {}.".format(key)) raise ValueError("Wrong feed name: {}.".format(key))
if not isinstance(feed_i[key], np.ndarray): #if not isinstance(feed_i[key], np.ndarray):
self.shape_check(feed_i, key) self.shape_check(feed_i, key)
if self.feed_types_[key] == int_type: if self.feed_types_[key] == int_type:
if i == 0: if i == 0:
int_feed_names.append(key) int_feed_names.append(key)
if isinstance(feed_i[key], np.ndarray): if isinstance(feed_i[key], np.ndarray):
if key in self.lod_tensor_set:
raise ValueError(
"LodTensor var can not be ndarray type.")
int_shape.append(list(feed_i[key].shape)) int_shape.append(list(feed_i[key].shape))
else: else:
int_shape.append(self.feed_shapes_[key]) int_shape.append(self.feed_shapes_[key])
if isinstance(feed_i[key], np.ndarray): if isinstance(feed_i[key], np.ndarray):
if key in self.lod_tensor_set:
raise ValueError(
"LodTensor var can not be ndarray type.")
#int_slot.append(np.reshape(feed_i[key], (-1)).tolist())
int_slot.append(feed_i[key]) int_slot.append(feed_i[key])
self.has_numpy_input = True self.has_numpy_input = True
else: else:
...@@ -280,17 +278,10 @@ class Client(object): ...@@ -280,17 +278,10 @@ class Client(object):
if i == 0: if i == 0:
float_feed_names.append(key) float_feed_names.append(key)
if isinstance(feed_i[key], np.ndarray): if isinstance(feed_i[key], np.ndarray):
if key in self.lod_tensor_set:
raise ValueError(
"LodTensor var can not be ndarray type.")
float_shape.append(list(feed_i[key].shape)) float_shape.append(list(feed_i[key].shape))
else: else:
float_shape.append(self.feed_shapes_[key]) float_shape.append(self.feed_shapes_[key])
if isinstance(feed_i[key], np.ndarray): if isinstance(feed_i[key], np.ndarray):
if key in self.lod_tensor_set:
raise ValueError(
"LodTensor var can not be ndarray type.")
#float_slot.append(np.reshape(feed_i[key], (-1)).tolist())
float_slot.append(feed_i[key]) float_slot.append(feed_i[key])
self.has_numpy_input = True self.has_numpy_input = True
else: else:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册