提交 c90fab43 编写于 作者: H HexToString

fix_bug

上级 53f5b807
......@@ -40,7 +40,7 @@ using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
using baidu::paddle_serving::predictor::CubeCache;
// DistKV Infer Op: seek cube and then call paddle inference
// op seq: general_reader-> dist_kv_infer -> general_response
// op seq: GeneralReaderOp-> dist_kv_infer -> general_response
int GeneralDistKVInferOp::inference() {
VLOG(2) << "Going to run inference";
const std::vector<std::string> pre_node_names = pre_names();
......@@ -186,9 +186,9 @@ int GeneralDistKVInferOp::inference() {
if (values.size() != keys.size() || values[0].buff.size() == 0) {
LOG(ERROR) << "cube value return null";
}
size_t EMBEDDING_SIZE = values[0].buff.size() / sizeof(float);
size_t EMBEDDING_SIZE = values[0].buff.size() / sizeof(float);
// size_t EMBEDDING_SIZE = (values[0].buff.size() - 10) / sizeof(float);
//size_t EMBEDDING_SIZE = 9;
// size_t EMBEDDING_SIZE = 9;
TensorVector sparse_out;
sparse_out.resize(sparse_count);
TensorVector dense_out;
......@@ -241,7 +241,7 @@ int GeneralDistKVInferOp::inference() {
// The data generated by pslib has 10 bytes of information to be filtered
// out
memcpy(data_ptr, cur_val->buff.data(), cur_val->buff.size() );
memcpy(data_ptr, cur_val->buff.data(), cur_val->buff.size());
// VLOG(3) << keys[cube_val_idx] << ":" << data_ptr[0] << ", " <<
// data_ptr[1] << ", " <<data_ptr[2] << ", " <<data_ptr[3] << ", "
// <<data_ptr[4] << ", " <<data_ptr[5] << ", " <<data_ptr[6] << ", "
......
......@@ -45,13 +45,13 @@ from paddle_serving_server import OpGraphMaker
from paddle_serving_server import Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
read_op = op_maker.create('GeneralReaderOp')
cnn_infer_op = op_maker.create(
'general_infer', engine_name='cnn', inputs=[read_op])
'GeneralInferOp', engine_name='cnn', inputs=[read_op])
bow_infer_op = op_maker.create(
'general_infer', engine_name='bow', inputs=[read_op])
'GeneralInferOp', engine_name='bow', inputs=[read_op])
response_op = op_maker.create(
'general_response', inputs=[cnn_infer_op, bow_infer_op])
'GeneralResponseOp', inputs=[cnn_infer_op, bow_infer_op])
op_graph_maker = OpGraphMaker()
op_graph_maker.add_op(read_op)
......
......@@ -45,13 +45,13 @@ from paddle_serving_server import OpGraphMaker
from paddle_serving_server import Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
read_op = op_maker.create('GeneralReaderOp')
cnn_infer_op = op_maker.create(
'general_infer', engine_name='cnn', inputs=[read_op])
'GeneralInferOp', engine_name='cnn', inputs=[read_op])
bow_infer_op = op_maker.create(
'general_infer', engine_name='bow', inputs=[read_op])
'GeneralInferOp', engine_name='bow', inputs=[read_op])
response_op = op_maker.create(
'general_response', inputs=[cnn_infer_op, bow_infer_op])
'GeneralResponseOp', inputs=[cnn_infer_op, bow_infer_op])
op_graph_maker = OpGraphMaker()
op_graph_maker.add_op(read_op)
......
......@@ -138,18 +138,21 @@ DEFINE_OP(GeneralInferOp);
``` python
self.op_dict = {
"general_infer": "GeneralInferOp",
"general_reader": "GeneralReaderOp",
"general_response": "GeneralResponseOp",
"general_text_reader": "GeneralTextReaderOp",
"general_text_response": "GeneralTextResponseOp",
"general_single_kv": "GeneralSingleKVOp",
"general_dist_kv": "GeneralDistKVOp"
}
self.op_list = [
"GeneralInferOp",
"GeneralReaderOp",
"GeneralResponseOp",
"GeneralTextReaderOp",
"GeneralTextResponseOp",
"GeneralSingleKVOp",
"GeneralDistKVInferOp",
"GeneralDistKVOp",
"GeneralCopyOp",
"GeneralDetectionOp",
]
```
`python/paddle_serving_server/server.py`文件中仅添加`需要加载模型,执行推理预测的自定义的C++OP类的类名`。例如`general_reader`由于只是做一些简单的数据处理而不加载模型调用预测,故在👆的代码中需要添加,而不添加在👇的代码中。
`python/paddle_serving_server/server.py`文件中仅添加`需要加载模型,执行推理预测的自定义的C++OP类的类名`。例如`GeneralReaderOp`由于只是做一些简单的数据处理而不加载模型调用预测,故在👆的代码中需要添加,而不添加在👇的代码中。
``` python
default_engine_types = [
'GeneralInferOp',
......
......@@ -136,20 +136,23 @@ After you have defined a C++ operator on server side for Paddle Serving, the las
``` python
self.op_dict = {
"general_infer": "GeneralInferOp",
"general_reader": "GeneralReaderOp",
"general_response": "GeneralResponseOp",
"general_text_reader": "GeneralTextReaderOp",
"general_text_response": "GeneralTextResponseOp",
"general_single_kv": "GeneralSingleKVOp",
"general_dist_kv": "GeneralDistKVOp"
}
self.op_list = [
"GeneralInferOp",
"GeneralReaderOp",
"GeneralResponseOp",
"GeneralTextReaderOp",
"GeneralTextResponseOp",
"GeneralSingleKVOp",
"GeneralDistKVInferOp",
"GeneralDistKVOp",
"GeneralCopyOp",
"GeneralDetectionOp",
]
```
In `python/paddle_serving_server/server.py` file, only the class name of the C++ OP class that needs to load the model and execute prediction is added.
For example, `general_reader`, need to be added in the 👆 code, but not in the 👇 code. Because it only does some simple data processing without loading the model and call prediction.
For example, `GeneralReaderOp`, need to be added in the 👆 code, but not in the 👇 code. Because it only does some simple data processing without loading the model and call prediction.
``` python
default_engine_types = [
'GeneralInferOp',
......
......@@ -19,9 +19,9 @@ from paddle_serving_server import OpSeqMaker
from paddle_serving_server import Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
read_op = op_maker.create('GeneralReaderOp')
general_infer_op = op_maker.create('GeneralInferOp')
general_response_op = op_maker.create('GeneralResponseOp')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
......
......@@ -19,9 +19,9 @@ from paddle_serving_server import OpSeqMaker
from paddle_serving_server import Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
read_op = op_maker.create('GeneralReaderOp')
general_infer_op = op_maker.create('GeneralInferOp')
general_response_op = op_maker.create('GeneralResponseOp')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
......
......@@ -20,14 +20,14 @@ from paddle_serving_server import OpSeqMaker
from paddle_serving_server import Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
response_op = op_maker.create('general_response')
read_op = op_maker.create('GeneralReaderOp')
general_infer_op = op_maker.create('GeneralInferOp')
general_response_op = op_maker.create('GeneralResponseOp')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(response_op)
op_seq_maker.add_op(general_response_op)
server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
......
......@@ -189,12 +189,11 @@ class LocalServiceHandler(object):
"mkldnn_bf16_op_list:{}, use_ascend_cl:{}, min_subgraph_size:{},"
"is_set_dynamic_shape_info:{}".format(
model_config, self._device_name, self._use_gpu, self._use_trt,
self._use_lite, self._use_xpu, device_type, self._devices,
self._mem_optim, self._ir_optim, self._use_profile,
self._thread_num, self._client_type, self._fetch_names,
self._precision, self._use_calib, self._use_mkldnn,
self._mkldnn_cache_capacity, self._mkldnn_op_list,
self._mkldnn_bf16_op_list, self._use_ascend_cl,
self._use_lite, self._use_xpu, device_type, self._devices, self.
_mem_optim, self._ir_optim, self._use_profile, self._thread_num,
self._client_type, self._fetch_names, self._precision, self.
_use_calib, self._use_mkldnn, self._mkldnn_cache_capacity, self.
_mkldnn_op_list, self._mkldnn_bf16_op_list, self._use_ascend_cl,
self.min_subgraph_size, bool(len(self.dynamic_shape_info))))
def get_fetch_list(self):
......@@ -282,9 +281,9 @@ class LocalServiceHandler(object):
if self._device_name == "cpu":
from paddle_serving_server import OpMaker, OpSeqMaker, Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
read_op = op_maker.create('GeneralReaderOp')
general_infer_op = op_maker.create('GeneralInferOp')
general_response_op = op_maker.create('GeneralResponseOp')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
......@@ -296,9 +295,9 @@ class LocalServiceHandler(object):
#gpu or arm
from paddle_serving_server import OpMaker, OpSeqMaker, Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
read_op = op_maker.create('GeneralReaderOp')
general_infer_op = op_maker.create('GeneralInferOp')
general_response_op = op_maker.create('GeneralResponseOp')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册