提交 5bb92dc8 编写于 作者: G guru4elephant

add general response op for common response

上级 79b00c9d
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT
#endif
#include <string>
namespace baidu {
namespace paddle_serving {
namespace serving {
static const char* GENERAL_MODEL_NAME = "general_model";
struct GeneralBlob {
std::vector<paddle::PaddleTensor> tensor_vector;
double infer_time;
std::vector<std::string> fetch_name_vector;
void Clear() {
size_t tensor_count = tensor_vector.size();
for (size_t ti = 0; ti < tensor_count; ++ti) {
tensor_vector[ti].shape.clear();
}
tensor_vector.clear();
}
int GetBatchSize() {
if (tensor_vector.size() > 0) {
if (tensor_vector[0].lod.size() == 1) {
return tensor_vector[0].lod[0].size() - 1;
} else {
return tensor_vector[0].shape[0];
}
} else {
return -1;
}
}
std::string ShortDebugString() const { return "Not implemented!"; }
}
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
...@@ -16,8 +16,8 @@ ...@@ -16,8 +16,8 @@
#include <iostream> #include <iostream>
#include <memory> #include <memory>
#include <sstream> #include <sstream>
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/op/general_infer_op.h" #include "core/general-server/op/general_infer_op.h"
#include "core/general-server/op/general_reader_op.h"
#include "core/predictor/framework/infer.h" #include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h" #include "core/predictor/framework/memory.h"
#include "core/predictor/framework/resource.h" #include "core/predictor/framework/resource.h"
...@@ -37,23 +37,20 @@ using baidu::paddle_serving::predictor::InferManager; ...@@ -37,23 +37,20 @@ using baidu::paddle_serving::predictor::InferManager;
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralInferOp::inference() { int GeneralInferOp::inference() {
const GeneralReaderOutput *reader_out = const GeneralBlob * input_blob =
get_depend_argument<GeneralReaderOutput>("general_reader_op"); get_depend_argument<GeneralBlob>(pre_name());
if (!reader_out) {
LOG(ERROR) << "Failed mutable depended argument, op:"
<< "general_reader_op";
return -1;
}
int reader_status = reader_out->reader_status; if (!input_blob) {
if (reader_status != 0) { LOG(ERROR) << "Failed mutable depended argument, op:"
LOG(ERROR) << "Read request wrong."; << pre_name();
return -1; return -1;
} }
const TensorVector *in = &reader_out->tensor_vector; const TensorVector *in = &input_blob->tensor_vector;
TensorVector *out = butil::get_object<TensorVector>(); TensorVector *out = butil::get_object<TensorVector>();
int batch_size = (*in)[0].shape[0]; int batch_size = in->GetBatchSize();
VLOG(2) << "infer batch size: " << batch_size;
// infer // infer
Timer timeline; Timer timeline;
double infer_time = 0.0; double infer_time = 0.0;
...@@ -65,73 +62,6 @@ int GeneralInferOp::inference() { ...@@ -65,73 +62,6 @@ int GeneralInferOp::inference() {
timeline.Pause(); timeline.Pause();
infer_time = timeline.ElapsedUS(); infer_time = timeline.ElapsedUS();
const Request *req = dynamic_cast<const Request *>(get_request_message());
VLOG(2) << "start to call load general model_conf op";
baidu::paddle_serving::predictor::Resource &resource =
baidu::paddle_serving::predictor::Resource::instance();
VLOG(2) << "get resource pointer done.";
std::shared_ptr<PaddleGeneralModelConfig> model_config =
resource.get_general_model_config();
std::vector<int> fetch_index;
fetch_index.resize(req->fetch_var_names_size());
for (int i = 0; i < req->fetch_var_names_size(); ++i) {
fetch_index[i] =
model_config->_fetch_alias_name_to_index[req->fetch_var_names(i)];
}
// response inst with only fetch_var_names
Response *res = mutable_data<Response>();
res->set_mean_infer_us(infer_time);
for (int i = 0; i < batch_size; ++i) {
FetchInst *fetch_inst = res->add_insts();
for (auto & idx : fetch_index) {
Tensor *tensor = fetch_inst->add_tensor_array();
// currently only response float tensor or lod_tensor
tensor->set_elem_type(1);
if (model_config->_is_lod_fetch[idx]) {
VLOG(2) << "out[" << idx << " is lod_tensor";
tensor->add_shape(-1);
} else {
VLOG(2) << "out[" << idx << "] is tensor";
for (int k = 1; k < out->at(idx).shape.size(); ++k) {
VLOG(2) << "shape[" << k - 1 << "]: "
<< out->at(idx).shape[k];
tensor->add_shape(out->at(idx).shape[k]);
}
}
}
}
int var_idx = 0;
for (auto & idx : fetch_index) {
float *data_ptr = static_cast<float *>(out->at(idx).data.data());
int cap = 1;
for (int j = 1; j < out->at(idx).shape.size(); ++j) {
cap *= out->at(idx).shape[j];
}
if (model_config->_is_lod_fetch[idx]) {
for (int j = 0; j < batch_size; ++j) {
for (int k = out->at(idx).lod[0][j];
k < out->at(idx).lod[0][j + 1]; k++) {
res->mutable_insts(j)->mutable_tensor_array(var_idx)->add_data(
reinterpret_cast<char *>(&(data_ptr[k])), sizeof(float));
}
}
} else {
for (int j = 0; j < batch_size; ++j) {
for (int k = j * cap; k < (j + 1) * cap; ++k) {
res->mutable_insts(j)->mutable_tensor_array(var_idx)->add_data(
reinterpret_cast<char *>(&(data_ptr[k])), sizeof(float));
}
}
}
var_idx++;
}
return 0; return 0;
} }
DEFINE_OP(GeneralInferOp); DEFINE_OP(GeneralInferOp);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <string>
#include <vector> #include <vector>
#ifdef BCLOUD #ifdef BCLOUD
#ifdef WITH_GPU #ifdef WITH_GPU
...@@ -29,17 +30,15 @@ namespace baidu { ...@@ -29,17 +30,15 @@ namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace serving { namespace serving {
static const char* GENERAL_MODEL_NAME = "general_model";
class GeneralInferOp class GeneralInferOp
: public baidu::paddle_serving::predictor::OpWithChannel< : public baidu::paddle_serving::predictor::OpWithChannel<GeneralBlob> {
baidu::paddle_serving::predictor::general_model::Response> {
public: public:
typedef std::vector<paddle::PaddleTensor> TensorVector; typedef std::vector<paddle::PaddleTensor> TensorVector;
DECLARE_OP(GeneralInferOp); DECLARE_OP(GeneralInferOp);
int inference(); int inference();
}; };
} // namespace serving } // namespace serving
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <iostream> #include <iostream>
#include <memory> #include <memory>
#include <sstream> #include <sstream>
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/op/general_reader_op.h" #include "core/general-server/op/general_reader_op.h"
#include "core/predictor/framework/infer.h" #include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h" #include "core/predictor/framework/memory.h"
...@@ -77,16 +78,12 @@ int GeneralReaderOp::inference() { ...@@ -77,16 +78,12 @@ int GeneralReaderOp::inference() {
std::vector<int64_t> elem_size; std::vector<int64_t> elem_size;
std::vector<int64_t> capacity; std::vector<int64_t> capacity;
GeneralReaderOutput *res = mutable_data<GeneralReaderOutput>(); GeneralBlob *res = mutable_data<GeneralBlob>();
TensorVector *in = &res->tensor_vector; TensorVector *out = &res->tensor_vector;
if (!res) { if (!res) {
LOG(ERROR) << "Failed get op tls reader object output"; LOG(ERROR) << "Failed get op tls reader object output";
} }
if (batch_size <= 0) {
res->reader_status = -1;
return 0;
}
int var_num = req->insts(0).tensor_array_size(); int var_num = req->insts(0).tensor_array_size();
VLOG(2) << "var num: " << var_num; VLOG(2) << "var num: " << var_num;
...@@ -102,7 +99,7 @@ int GeneralReaderOp::inference() { ...@@ -102,7 +99,7 @@ int GeneralReaderOp::inference() {
VLOG(2) << "print general model config done."; VLOG(2) << "print general model config done.";
// check // TODO(guru4elephant): how to do conditional check?
res->reader_status = conf_check(req, model_config); res->reader_status = conf_check(req, model_config);
if (res->reader_status != 0) { if (res->reader_status != 0) {
LOG(INFO) << "model conf of server:"; LOG(INFO) << "model conf of server:";
...@@ -142,26 +139,26 @@ int GeneralReaderOp::inference() { ...@@ -142,26 +139,26 @@ int GeneralReaderOp::inference() {
VLOG(2) << "var[" << i << "] is tensor, capacity: " << capacity[i]; VLOG(2) << "var[" << i << "] is tensor, capacity: " << capacity[i];
} }
lod_tensor.name = model_config->_feed_name[i]; lod_tensor.name = model_config->_feed_name[i];
in->push_back(lod_tensor); out->push_back(lod_tensor);
} }
for (int i = 0; i < var_num; ++i) { for (int i = 0; i < var_num; ++i) {
if (in->at(i).lod.size() == 1) { if (out->at(i).lod.size() == 1) {
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
const Tensor &tensor = req->insts(j).tensor_array(i); const Tensor &tensor = req->insts(j).tensor_array(i);
int data_len = tensor.data_size(); int data_len = tensor.data_size();
VLOG(2) << "tensor size for var[" << i << "]: " << tensor.data_size(); VLOG(2) << "tensor size for var[" << i << "]: " << tensor.data_size();
int cur_len = in->at(i).lod[0].back(); int cur_len = out->at(i).lod[0].back();
VLOG(2) << "current len: " << cur_len; VLOG(2) << "current len: " << cur_len;
in->at(i).lod[0].push_back(cur_len + data_len); out->at(i).lod[0].push_back(cur_len + data_len);
VLOG(2) << "new len: " << cur_len + data_len; VLOG(2) << "new len: " << cur_len + data_len;
} }
in->at(i).data.Resize(in->at(i).lod[0].back() * elem_size[i]); out->at(i).data.Resize(out->at(i).lod[0].back() * elem_size[i]);
in->at(i).shape = {in->at(i).lod[0].back(), 1}; out->at(i).shape = {out->at(i).lod[0].back(), 1};
VLOG(2) << "var[" << i VLOG(2) << "var[" << i
<< "] is lod_tensor and len=" << in->at(i).lod[0].back(); << "] is lod_tensor and len=" << out->at(i).lod[0].back();
} else { } else {
in->at(i).data.Resize(batch_size * capacity[i] * elem_size[i]); out->at(i).data.Resize(batch_size * capacity[i] * elem_size[i]);
VLOG(2) << "var[" << i VLOG(2) << "var[" << i
<< "] is tensor and capacity=" << batch_size * capacity[i]; << "] is tensor and capacity=" << batch_size * capacity[i];
} }
...@@ -169,29 +166,29 @@ int GeneralReaderOp::inference() { ...@@ -169,29 +166,29 @@ int GeneralReaderOp::inference() {
for (int i = 0; i < var_num; ++i) { for (int i = 0; i < var_num; ++i) {
if (elem_type[i] == 0) { if (elem_type[i] == 0) {
int64_t *dst_ptr = static_cast<int64_t *>(in->at(i).data.data()); int64_t *dst_ptr = static_cast<int64_t *>(out->at(i).data.data());
int offset = 0; int offset = 0;
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
for (int k = 0; k < req->insts(j).tensor_array(i).data_size(); ++k) { for (int k = 0; k < req->insts(j).tensor_array(i).data_size(); ++k) {
dst_ptr[offset + k] = dst_ptr[offset + k] =
*(const int64_t *)req->insts(j).tensor_array(i).data(k).c_str(); *(const int64_t *)req->insts(j).tensor_array(i).data(k).c_str();
} }
if (in->at(i).lod.size() == 1) { if (out->at(i).lod.size() == 1) {
offset = in->at(i).lod[0][j + 1]; offset = out->at(i).lod[0][j + 1];
} else { } else {
offset += capacity[i]; offset += capacity[i];
} }
} }
} else { } else {
float *dst_ptr = static_cast<float *>(in->at(i).data.data()); float *dst_ptr = static_cast<float *>(out->at(i).data.data());
int offset = 0; int offset = 0;
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
for (int k = 0; k < req->insts(j).tensor_array(i).data_size(); ++k) { for (int k = 0; k < req->insts(j).tensor_array(i).data_size(); ++k) {
dst_ptr[offset + k] = dst_ptr[offset + k] =
*(const float *)req->insts(j).tensor_array(i).data(k).c_str(); *(const float *)req->insts(j).tensor_array(i).data(k).c_str();
} }
if (in->at(i).lod.size() == 1) { if (out->at(i).lod.size() == 1) {
offset = in->at(i).lod[0][j + 1]; offset = out->at(i).lod[0][j + 1];
} else { } else {
offset += capacity[i]; offset += capacity[i];
} }
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#endif #endif
#include <string> #include <string>
#include "core/predictor/framework/resource.h" #include "core/predictor/framework/resource.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/general_model_service.pb.h" #include "core/general-server/general_model_service.pb.h"
#include "core/general-server/load_general_model_service.pb.h" #include "core/general-server/load_general_model_service.pb.h"
...@@ -32,28 +33,15 @@ namespace baidu { ...@@ -32,28 +33,15 @@ namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace serving { namespace serving {
struct GeneralReaderOutput {
std::vector<paddle::PaddleTensor> tensor_vector;
int reader_status = 0;
void Clear() {
size_t tensor_count = tensor_vector.size();
for (size_t ti = 0; ti < tensor_count; ++ti) {
tensor_vector[ti].shape.clear();
}
tensor_vector.clear();
}
std::string ShortDebugString() const { return "Not implemented!"; }
};
class GeneralReaderOp : public baidu::paddle_serving::predictor::OpWithChannel< class GeneralReaderOp : public baidu::paddle_serving::predictor::OpWithChannel<
GeneralReaderOutput> { GeneralBlob> {
public: public:
typedef std::vector<paddle::PaddleTensor> TensorVector; typedef std::vector<paddle::PaddleTensor> TensorVector;
DECLARE_OP(GeneralReaderOp); DECLARE_OP(GeneralReaderOp);
int inference(); int inference();
}; };
} // namespace serving } // namespace serving
......
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <iostream>
#include <memory>
#include <sstream>
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/op/general_response_op.h"
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
#include "core/predictor/framework/resource.h"
#include "core/util/include/timer.h"
namespace baidu {
namespace paddle_serving {
namespace serving {
using baidu::paddle_serving::Timer;
using baidu::paddle_serving::predictor::MempoolWrapper;
using baidu::paddle_serving::predictor::general_model::Tensor;
using baidu::paddle_serving::predictor::general_model::Response;
using baidu::paddle_serving::predictor::general_model::Request;
using baidu::paddle_serving::predictor::general_model::FetchInst;
using baidu::paddle_serving::predictor::InferManager;
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralResponseOp::inference() {
const GeneralBlob *input_blob =
get_depend_argument<GeneralBlob>(pre_name());
if (!input_blob) {
LOG(ERROR) << "Failed mutable depended argument, op: "
<< pre_name();
return -1;
}
const TensorVector *in = &input_blob->tensor_vector;
int batch_size = in->GetBatchSize();
double infer_time = in->infer_time;
VLOG(2) << "input batch size: " << batch_size;
const Request *req = dynamic_cast<const Request *>(get_request_message());
VLOG(2) << "start to call load general model_conf op";
baidu::paddle_serving::predictor::Resource &resource =
baidu::paddle_serving::predictor::Resource::instance();
VLOG(2) << "get resource pointer done.";
std::shared_ptr<PaddleGeneralModelConfig> model_config =
resource.get_general_model_config();
std::vector<int> fetch_index;
fetch_index.resize(req->fetch_var_names_size());
for (int i = 0; i < req->fetch_var_names_size(); ++i) {
fetch_index[i] =
model_config->_fetch_alias_name_to_index[req->fetch_var_names(i)];
}
// response inst with only fetch_var_names
Response *res = mutable_data<Response>();
res->set_mean_infer_us(infer_time);
for (int i = 0; i < batch_size; ++i) {
FetchInst *fetch_inst = res->add_insts();
for (auto & idx : fetch_index) {
Tensor *tensor = fetch_inst->add_tensor_array();
// currently only response float tensor or lod_tensor
tensor->set_elem_type(1);
if (model_config->_is_lod_fetch[idx]) {
VLOG(2) << "out[" << idx << " is lod_tensor";
tensor->add_shape(-1);
} else {
VLOG(2) << "out[" << idx << "] is tensor";
for (int k = 1; k < in->at(idx).shape.size(); ++k) {
VLOG(2) << "shape[" << k - 1 << "]: "
<< in->at(idx).shape[k];
tensor->add_shape(in->at(idx).shape[k]);
}
}
}
}
int var_idx = 0;
for (auto & idx : fetch_index) {
float *data_ptr = static_cast<float *>(in->at(idx).data.data());
int cap = 1;
for (int j = 1; j < in->at(idx).shape.size(); ++j) {
cap *= in->at(idx).shape[j];
}
if (model_config->_is_lod_fetch[idx]) {
for (int j = 0; j < batch_size; ++j) {
for (int k = in->at(idx).lod[0][j];
k < in->at(idx).lod[0][j + 1]; k++) {
res->mutable_insts(j)->mutable_tensor_array(var_idx)->add_data(
reinterpret_cast<char *>(&(data_ptr[k])), sizeof(float));
}
}
} else {
for (int j = 0; j < batch_size; ++j) {
for (int k = j * cap; k < (j + 1) * cap; ++k) {
res->mutable_insts(j)->mutable_tensor_array(var_idx)->add_data(
reinterpret_cast<char *>(&(data_ptr[k])), sizeof(float));
}
}
}
var_idx++;
}
return 0;
}
DEFINE_OP(GeneralResponseOp);
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <string>
#include <vector> #include <vector>
#ifdef BCLOUD #ifdef BCLOUD
#ifdef WITH_GPU #ifdef WITH_GPU
...@@ -29,15 +30,16 @@ namespace baidu { ...@@ -29,15 +30,16 @@ namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace serving { namespace serving {
class GeneralTextInferOp class GeneralResponseOp
: public baidu::paddle_serving::predictor::OpWithChannel< : public baidu::paddle_serving::predictor::OpWithChannel<
baidu::paddle_serving::predictor::general_model::Response> { baidu::paddle_serving::predictor::general_model::Response> {
public: public:
typedef std::vector<paddle::PaddleTensor> TensorVector; typedef std::vector<paddle::PaddleTensor> TensorVector;
DECLARE_OP(GeneralTextInferOp); DECLARE_OP(GeneralResponseOp);
int inference(); int inference();
}; };
} // namespace serving } // namespace serving
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <memory> #include <memory>
#include <sstream> #include <sstream>
#include "core/general-server/op/general_text_reader_op.h" #include "core/general-server/op/general_text_reader_op.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/infer.h" #include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h" #include "core/predictor/framework/memory.h"
...@@ -42,7 +43,7 @@ int GeneralTextReaderOp::inference() { ...@@ -42,7 +43,7 @@ int GeneralTextReaderOp::inference() {
std::vector<int64_t> elem_size; std::vector<int64_t> elem_size;
std::vector<int64_t> capacity; std::vector<int64_t> capacity;
GeneralTextReaderOutput *res = mutable_data<GeneralTextReaderOutput>(); GeneralBlob *res = mutable_data<GeneralBlob>();
TensorVector *in = &res->tensor_vector; TensorVector *in = &res->tensor_vector;
if (!res) { if (!res) {
......
...@@ -32,23 +32,8 @@ namespace baidu { ...@@ -32,23 +32,8 @@ namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace serving { namespace serving {
struct GeneralTextReaderOutput {
std::vector<paddle::PaddleTensor> tensor_vector;
int reader_status = 0;
void Clear() {
size_t tensor_count = tensor_vector.size();
for (size_t ti = 0; ti < tensor_count; ++ti) {
tensor_vector[ti].shape.clear();
}
tensor_vector.clear();
}
std::string ShortDebugString() const { return "Not implemented!"; }
};
class GeneralTextReaderOp : class GeneralTextReaderOp :
public baidu::paddle_serving::predictor::OpWithChannel< public baidu::paddle_serving::predictor::OpWithChannel<GeneralBlob> {
GeneralTextReaderOutput> {
public: public:
typedef std::vector<paddle::PaddleTensor> TensorVector; typedef std::vector<paddle::PaddleTensor> TensorVector;
......
...@@ -16,10 +16,8 @@ ...@@ -16,10 +16,8 @@
#include <iostream> #include <iostream>
#include <memory> #include <memory>
#include <sstream> #include <sstream>
#include "core/general-server/op/general_text_infer_op.h" #include "core/general-server/op/general_text_response_op.h"
#include "core/general-server/op/general_infer_op.h" #include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/op/general_text_reader_op.h"
#include "core/general-server/op/general_reader_op.h"
#include "core/predictor/framework/infer.h" #include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h" #include "core/predictor/framework/memory.h"
#include "core/predictor/framework/resource.h" #include "core/predictor/framework/resource.h"
...@@ -29,7 +27,6 @@ namespace baidu { ...@@ -29,7 +27,6 @@ namespace baidu {
namespace paddle_serving { namespace paddle_serving {
namespace serving { namespace serving {
using baidu::paddle_serving::serving::GENERAL_MODEL_NAME;
using baidu::paddle_serving::Timer; using baidu::paddle_serving::Timer;
using baidu::paddle_serving::predictor::MempoolWrapper; using baidu::paddle_serving::predictor::MempoolWrapper;
using baidu::paddle_serving::predictor::general_model::Tensor; using baidu::paddle_serving::predictor::general_model::Tensor;
...@@ -40,42 +37,20 @@ using baidu::paddle_serving::predictor::InferManager; ...@@ -40,42 +37,20 @@ using baidu::paddle_serving::predictor::InferManager;
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
int GeneralTextInferOp::inference() { int GeneralTextInferOp::inference() {
const GeneralTextReaderOutput *reader_out = const GeneralBlob *blob_input =
get_depend_argument<GeneralTextReaderOutput>(pre_name()); get_depend_argument<GeneralBlob>(pre_name());
VLOG(2) << "Going to get previous node name: " << pre_name(); if (!blob_input) {
LOG(ERROR) << "Failed mutable depended argument, op: "
if (!reader_out) { << pre_name();
LOG(ERROR) << "Failed mutable depended argument, op:"
<< "general_text_reader_op";
return -1; return -1;
} }
int reader_status = reader_out->reader_status; const TensorVector *in = &blob_input->tensor_vector;
if (reader_status != 0) { int batch_size = in->GetBatchSize();
LOG(ERROR) << "Read request wrong.";
return -1;
}
const TensorVector *in = &reader_out->tensor_vector;
TensorVector *out = butil::get_object<TensorVector>();
int batch_size = 0;
if (in->at(0).lod.size() == 1) {
batch_size = in->at(0).lod[0].size() - 1;
} else {
batch_size = in->at(0).shape[0];
}
VLOG(2) << "infer batch size: " << batch_size; VLOG(2) << "infer batch size: " << batch_size;
// infer // infer
Timer timeline;
double infer_time = 0.0;
timeline.Start();
if (InferManager::instance().infer(GENERAL_MODEL_NAME, in, out, batch_size)) {
LOG(ERROR) << "Failed do infer in fluid model: " << GENERAL_MODEL_NAME;
return -1;
}
timeline.Pause();
infer_time = timeline.ElapsedUS();
const Request *req = dynamic_cast<const Request *>(get_request_message()); const Request *req = dynamic_cast<const Request *>(get_request_message());
...@@ -110,10 +85,10 @@ int GeneralTextInferOp::inference() { ...@@ -110,10 +85,10 @@ int GeneralTextInferOp::inference() {
tensor->add_shape(-1); tensor->add_shape(-1);
} else { } else {
VLOG(2) << "out[" << idx << "] is tensor"; VLOG(2) << "out[" << idx << "] is tensor";
for (int k = 1; k < out->at(idx).shape.size(); ++k) { for (int k = 1; k < in->at(idx).shape.size(); ++k) {
VLOG(2) << "shape[" << k - 1 << "]: " VLOG(2) << "shape[" << k - 1 << "]: "
<< out->at(idx).shape[k]; << in->at(idx).shape[k];
tensor->add_shape(out->at(idx).shape[k]); tensor->add_shape(in->at(idx).shape[k]);
} }
} }
} }
...@@ -121,15 +96,15 @@ int GeneralTextInferOp::inference() { ...@@ -121,15 +96,15 @@ int GeneralTextInferOp::inference() {
int var_idx = 0; int var_idx = 0;
for (auto & idx : fetch_index) { for (auto & idx : fetch_index) {
float *data_ptr = static_cast<float *>(out->at(idx).data.data()); float *data_ptr = static_cast<float *>(in->at(idx).data.data());
int cap = 1; int cap = 1;
for (int j = 1; j < out->at(idx).shape.size(); ++j) { for (int j = 1; j < in->at(idx).shape.size(); ++j) {
cap *= out->at(idx).shape[j]; cap *= in->at(idx).shape[j];
} }
if (model_config->_is_lod_fetch[idx]) { if (model_config->_is_lod_fetch[idx]) {
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
for (int k = out->at(idx).lod[0][j]; for (int k = in->at(idx).lod[0][j];
k < out->at(idx).lod[0][j + 1]; k++) { k < in->at(idx).lod[0][j + 1]; k++) {
res->mutable_insts(j)->mutable_tensor_array(var_idx)->add_float_data( res->mutable_insts(j)->mutable_tensor_array(var_idx)->add_float_data(
data_ptr[k]); data_ptr[k]);
} }
...@@ -146,7 +121,7 @@ int GeneralTextInferOp::inference() { ...@@ -146,7 +121,7 @@ int GeneralTextInferOp::inference() {
} }
return 0; return 0;
} }
DEFINE_OP(GeneralTextInferOp); DEFINE_OP(GeneralTextResponseOp);
} // namespace serving } // namespace serving
} // namespace paddle_serving } // namespace paddle_serving
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#ifdef BCLOUD
#ifdef WITH_GPU
#include "paddle/paddle_inference_api.h"
#else
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#endif
#else
#include "paddle_inference_api.h" // NOLINT
#endif
#include "core/general-server/general_model_service.pb.h"
namespace baidu {
namespace paddle_serving {
namespace serving {
class GeneralTextResponseOp
: public baidu::paddle_serving::predictor::OpWithChannel<
baidu::paddle_serving::predictor::general_model::Response> {
public:
typedef std::vector<paddle::PaddleTensor> TensorVector;
DECLARE_OP(GeneralTextResponseOp);
int inference();
};
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册