Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Serving
提交
13484bd1
S
Serving
项目概览
PaddlePaddle
/
Serving
大约 1 年 前同步成功
通知
186
Star
833
Fork
253
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
105
列表
看板
标记
里程碑
合并请求
10
Wiki
2
Wiki
分析
仓库
DevOps
项目成员
Pages
S
Serving
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
105
Issue
105
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
2
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
13484bd1
编写于
2月 16, 2020
作者:
G
guru4elephant
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add design of general blob as data struct between op on server side
上级
5287b5dc
变更
9
显示空白变更内容
内联
并排
Showing
9 changed file
with
18 addition
and
19 deletion
+18
-19
core/general-server/op/general_infer_helper.h
core/general-server/op/general_infer_helper.h
+2
-2
core/general-server/op/general_infer_op.cpp
core/general-server/op/general_infer_op.cpp
+1
-2
core/general-server/op/general_infer_op.h
core/general-server/op/general_infer_op.h
+1
-0
core/general-server/op/general_reader_op.cpp
core/general-server/op/general_reader_op.cpp
+2
-2
core/general-server/op/general_response_op.cpp
core/general-server/op/general_response_op.cpp
+2
-3
core/general-server/op/general_text_reader_op.cpp
core/general-server/op/general_text_reader_op.cpp
+2
-3
core/general-server/op/general_text_reader_op.h
core/general-server/op/general_text_reader_op.h
+1
-0
core/general-server/op/general_text_response_op.cpp
core/general-server/op/general_text_response_op.cpp
+6
-7
core/general-server/op/general_text_response_op.h
core/general-server/op/general_text_response_op.h
+1
-0
未找到文件。
core/general-server/op/general_infer_helper.h
浏览文件 @
13484bd1
...
...
@@ -45,7 +45,7 @@ struct GeneralBlob {
tensor_vector
.
clear
();
}
int
GetBatchSize
()
{
int
GetBatchSize
()
const
{
if
(
tensor_vector
.
size
()
>
0
)
{
if
(
tensor_vector
[
0
].
lod
.
size
()
==
1
)
{
return
tensor_vector
[
0
].
lod
[
0
].
size
()
-
1
;
...
...
@@ -58,7 +58,7 @@ struct GeneralBlob {
}
std
::
string
ShortDebugString
()
const
{
return
"Not implemented!"
;
}
}
}
;
}
// namespace serving
}
// namespace paddle_serving
...
...
core/general-server/op/general_infer_op.cpp
浏览文件 @
13484bd1
...
...
@@ -16,7 +16,6 @@
#include <iostream>
#include <memory>
#include <sstream>
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/op/general_infer_op.h"
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
...
...
@@ -48,7 +47,7 @@ int GeneralInferOp::inference() {
const
TensorVector
*
in
=
&
input_blob
->
tensor_vector
;
TensorVector
*
out
=
butil
::
get_object
<
TensorVector
>
();
int
batch_size
=
in
->
GetBatchSize
();
int
batch_size
=
in
put_blob
->
GetBatchSize
();
VLOG
(
2
)
<<
"infer batch size: "
<<
batch_size
;
// infer
...
...
core/general-server/op/general_infer_op.h
浏览文件 @
13484bd1
...
...
@@ -25,6 +25,7 @@
#include "paddle_inference_api.h" // NOLINT
#endif
#include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h"
namespace
baidu
{
namespace
paddle_serving
{
...
...
core/general-server/op/general_reader_op.cpp
浏览文件 @
13484bd1
...
...
@@ -100,8 +100,8 @@ int GeneralReaderOp::inference() {
VLOG
(
2
)
<<
"print general model config done."
;
// TODO(guru4elephant): how to do conditional check?
res
->
reader_status
=
conf_check
(
req
,
model_config
);
if
(
re
s
->
reader_status
!=
0
)
{
int
ret
=
conf_check
(
req
,
model_config
);
if
(
re
t
!=
0
)
{
LOG
(
INFO
)
<<
"model conf of server:"
;
resource
.
print_general_model_config
(
model_config
);
return
0
;
...
...
core/general-server/op/general_response_op.cpp
浏览文件 @
13484bd1
...
...
@@ -47,8 +47,7 @@ int GeneralResponseOp::inference() {
}
const
TensorVector
*
in
=
&
input_blob
->
tensor_vector
;
int
batch_size
=
in
->
GetBatchSize
();
double
infer_time
=
in
->
infer_time
;
int
batch_size
=
input_blob
->
GetBatchSize
();
VLOG
(
2
)
<<
"input batch size: "
<<
batch_size
;
...
...
@@ -72,7 +71,7 @@ int GeneralResponseOp::inference() {
// response inst with only fetch_var_names
Response
*
res
=
mutable_data
<
Response
>
();
res
->
set_mean_infer_us
(
infer_time
);
//
res->set_mean_infer_us(infer_time);
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
FetchInst
*
fetch_inst
=
res
->
add_insts
();
...
...
core/general-server/op/general_text_reader_op.cpp
浏览文件 @
13484bd1
...
...
@@ -17,7 +17,6 @@
#include <memory>
#include <sstream>
#include "core/general-server/op/general_text_reader_op.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
...
...
@@ -51,8 +50,8 @@ int GeneralTextReaderOp::inference() {
}
if
(
batch_size
<=
0
)
{
res
->
reader_status
=
-
1
;
return
0
;
LOG
(
ERROR
)
<<
"Batch size < 0"
;
return
-
1
;
}
int
var_num
=
req
->
insts
(
0
).
tensor_array_size
();
...
...
core/general-server/op/general_text_reader_op.h
浏览文件 @
13484bd1
...
...
@@ -25,6 +25,7 @@
#endif
#include <string>
#include "core/predictor/framework/resource.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/general-server/general_model_service.pb.h"
#include "core/general-server/load_general_model_service.pb.h"
...
...
core/general-server/op/general_text_response_op.cpp
浏览文件 @
13484bd1
...
...
@@ -17,7 +17,6 @@
#include <memory>
#include <sstream>
#include "core/general-server/op/general_text_response_op.h"
#include "core/general-server/op/general_infer_helper.h"
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
#include "core/predictor/framework/resource.h"
...
...
@@ -36,18 +35,18 @@ using baidu::paddle_serving::predictor::general_model::FetchInst;
using
baidu
::
paddle_serving
::
predictor
::
InferManager
;
using
baidu
::
paddle_serving
::
predictor
::
PaddleGeneralModelConfig
;
int
GeneralText
Infer
Op
::
inference
()
{
const
GeneralBlob
*
blob_input
=
int
GeneralText
Response
Op
::
inference
()
{
const
GeneralBlob
*
input_blob
=
get_depend_argument
<
GeneralBlob
>
(
pre_name
());
if
(
!
blob_input
)
{
if
(
!
input_blob
)
{
LOG
(
ERROR
)
<<
"Failed mutable depended argument, op: "
<<
pre_name
();
return
-
1
;
}
const
TensorVector
*
in
=
&
blob_input
->
tensor_vector
;
int
batch_size
=
in
->
GetBatchSize
();
const
TensorVector
*
in
=
&
input_blob
->
tensor_vector
;
int
batch_size
=
in
put_blob
->
GetBatchSize
();
VLOG
(
2
)
<<
"infer batch size: "
<<
batch_size
;
// infer
...
...
@@ -72,7 +71,7 @@ int GeneralTextInferOp::inference() {
// response inst with only fetch_var_names
Response
*
res
=
mutable_data
<
Response
>
();
res
->
set_mean_infer_us
(
infer_time
);
//
res->set_mean_infer_us(infer_time);
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
FetchInst
*
fetch_inst
=
res
->
add_insts
();
...
...
core/general-server/op/general_text_response_op.h
浏览文件 @
13484bd1
...
...
@@ -25,6 +25,7 @@
#include "paddle_inference_api.h" // NOLINT
#endif
#include "core/general-server/general_model_service.pb.h"
#include "core/general-server/op/general_infer_helper.h"
namespace
baidu
{
namespace
paddle_serving
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录