Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Serving
提交
84c3dc93
S
Serving
项目概览
PaddlePaddle
/
Serving
接近 2 年 前同步成功
通知
186
Star
833
Fork
253
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
105
列表
看板
标记
里程碑
合并请求
10
Wiki
2
Wiki
分析
仓库
DevOps
项目成员
Pages
S
Serving
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
105
Issue
105
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
2
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
84c3dc93
编写于
3月 12, 2021
作者:
H
HexToString
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix 2.0 bug final
上级
9bd6b3f6
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
46 addition
and
67 deletion
+46
-67
core/general-server/op/general_infer_op.cpp
core/general-server/op/general_infer_op.cpp
+0
-3
core/general-server/op/general_reader_op.cpp
core/general-server/op/general_reader_op.cpp
+0
-2
core/general-server/op/general_response_op.cpp
core/general-server/op/general_response_op.cpp
+2
-8
core/predictor/framework/infer.h
core/predictor/framework/infer.h
+39
-40
core/predictor/framework/infer_data.h
core/predictor/framework/infer_data.h
+5
-9
core/predictor/framework/service.cpp
core/predictor/framework/service.cpp
+0
-4
core/predictor/op/op.cpp
core/predictor/op/op.cpp
+0
-1
未找到文件。
core/general-server/op/general_infer_op.cpp
浏览文件 @
84c3dc93
...
@@ -80,12 +80,9 @@ int GeneralInferOp::inference() {
...
@@ -80,12 +80,9 @@ int GeneralInferOp::inference() {
}
}
int64_t
end
=
timeline
.
TimeStampUS
();
int64_t
end
=
timeline
.
TimeStampUS
();
std
::
cout
<<
"GeneralInferOp ---ysl"
<<
std
::
endl
;
LOG
(
ERROR
)
<<
"GeneralInferOp ---ysl"
;
CopyBlobInfo
(
input_blob
,
output_blob
);
CopyBlobInfo
(
input_blob
,
output_blob
);
AddBlobInfo
(
output_blob
,
start
);
AddBlobInfo
(
output_blob
,
start
);
AddBlobInfo
(
output_blob
,
end
);
AddBlobInfo
(
output_blob
,
end
);
std
::
cout
<<
"GeneralInferOp ---ysl222"
<<
std
::
endl
;
return
0
;
return
0
;
}
}
DEFINE_OP
(
GeneralInferOp
);
DEFINE_OP
(
GeneralInferOp
);
...
...
core/general-server/op/general_reader_op.cpp
浏览文件 @
84c3dc93
...
@@ -244,8 +244,6 @@ int GeneralReaderOp::inference() {
...
@@ -244,8 +244,6 @@ int GeneralReaderOp::inference() {
AddBlobInfo
(
res
,
end
);
AddBlobInfo
(
res
,
end
);
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") read data from client success"
;
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") read data from client success"
;
LOG
(
ERROR
)
<<
"GeneralReaderOp ---ysl"
;
std
::
cout
<<
"GeneralReaderOp ---ysl"
<<
std
::
endl
;
return
0
;
return
0
;
}
}
DEFINE_OP
(
GeneralReaderOp
);
DEFINE_OP
(
GeneralReaderOp
);
...
...
core/general-server/op/general_response_op.cpp
浏览文件 @
84c3dc93
...
@@ -96,6 +96,7 @@ int GeneralResponseOp::inference() {
...
@@ -96,6 +96,7 @@ int GeneralResponseOp::inference() {
for
(
auto
&
idx
:
fetch_index
)
{
for
(
auto
&
idx
:
fetch_index
)
{
Tensor
*
tensor
=
fetch_inst
->
add_tensor_array
();
Tensor
*
tensor
=
fetch_inst
->
add_tensor_array
();
//tensor->set_elem_type(1);
if
(
model_config
->
_is_lod_fetch
[
idx
])
{
if
(
model_config
->
_is_lod_fetch
[
idx
])
{
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") out["
<<
idx
<<
"] "
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") out["
<<
idx
<<
"] "
<<
model_config
->
_fetch_name
[
idx
]
<<
" is lod_tensor"
;
<<
model_config
->
_fetch_name
[
idx
]
<<
" is lod_tensor"
;
...
@@ -124,7 +125,6 @@ int GeneralResponseOp::inference() {
...
@@ -124,7 +125,6 @@ int GeneralResponseOp::inference() {
FetchInst
*
fetch_p
=
output
->
mutable_insts
(
0
);
FetchInst
*
fetch_p
=
output
->
mutable_insts
(
0
);
auto
dtype
=
in
->
at
(
idx
).
dtype
;
auto
dtype
=
in
->
at
(
idx
).
dtype
;
if
(
dtype
==
paddle
::
PaddleDType
::
INT64
)
{
if
(
dtype
==
paddle
::
PaddleDType
::
INT64
)
{
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") Prepare int64 var ["
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") Prepare int64 var ["
<<
model_config
->
_fetch_name
[
idx
]
<<
"]."
;
<<
model_config
->
_fetch_name
[
idx
]
<<
"]."
;
...
@@ -141,15 +141,12 @@ int GeneralResponseOp::inference() {
...
@@ -141,15 +141,12 @@ int GeneralResponseOp::inference() {
<<
model_config
->
_fetch_name
[
idx
]
<<
"]."
;
<<
model_config
->
_fetch_name
[
idx
]
<<
"]."
;
float
*
data_ptr
=
static_cast
<
float
*>
(
in
->
at
(
idx
).
data
.
data
());
float
*
data_ptr
=
static_cast
<
float
*>
(
in
->
at
(
idx
).
data
.
data
());
std
::
cout
<<
" response op ---- for"
<<
std
::
endl
;
for
(
int
k
=
0
;
k
<
cap
;
++
k
){
std
::
cout
<<
"i am ysl -response op-copy idx = "
<<
k
<<
"num = "
<<
*
(
data_ptr
+
k
)
<<
std
::
endl
;
}
google
::
protobuf
::
RepeatedField
<
float
>
tmp_data
(
data_ptr
,
google
::
protobuf
::
RepeatedField
<
float
>
tmp_data
(
data_ptr
,
data_ptr
+
cap
);
data_ptr
+
cap
);
fetch_p
->
mutable_tensor_array
(
var_idx
)
->
mutable_float_data
()
->
Swap
(
fetch_p
->
mutable_tensor_array
(
var_idx
)
->
mutable_float_data
()
->
Swap
(
&
tmp_data
);
&
tmp_data
);
}
else
if
(
dtype
==
paddle
::
PaddleDType
::
INT32
)
{
}
else
if
(
dtype
==
paddle
::
PaddleDType
::
INT32
)
{
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
")Prepare int32 var ["
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
")Prepare int32 var ["
<<
model_config
->
_fetch_name
[
idx
]
<<
"]."
;
<<
model_config
->
_fetch_name
[
idx
]
<<
"]."
;
int32_t
*
data_ptr
=
static_cast
<
int32_t
*>
(
in
->
at
(
idx
).
data
.
data
());
int32_t
*
data_ptr
=
static_cast
<
int32_t
*>
(
in
->
at
(
idx
).
data
.
data
());
...
@@ -198,9 +195,6 @@ int GeneralResponseOp::inference() {
...
@@ -198,9 +195,6 @@ int GeneralResponseOp::inference() {
res
->
add_profile_time
(
start
);
res
->
add_profile_time
(
start
);
res
->
add_profile_time
(
end
);
res
->
add_profile_time
(
end
);
}
}
std
::
cout
<<
"GeneralResponseOp ---ysl"
<<
std
::
endl
;
LOG
(
ERROR
)
<<
"GeneralResponseOp ---ysl"
;
return
0
;
return
0
;
}
}
...
...
core/predictor/framework/infer.h
浏览文件 @
84c3dc93
...
@@ -24,7 +24,7 @@
...
@@ -24,7 +24,7 @@
#include "core/predictor/framework/bsf.h"
#include "core/predictor/framework/bsf.h"
#include "core/predictor/framework/factory.h"
#include "core/predictor/framework/factory.h"
#include "core/predictor/framework/infer_data.h"
#include "core/predictor/framework/infer_data.h"
//
#include "paddle_inference_api.h" // NOLINT
#include "paddle_inference_api.h" // NOLINT
namespace
baidu
{
namespace
baidu
{
namespace
paddle_serving
{
namespace
paddle_serving
{
namespace
predictor
{
namespace
predictor
{
...
@@ -593,9 +593,8 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
...
@@ -593,9 +593,8 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
public:
// NOLINT
public:
// NOLINT
FluidInferEngine
()
{}
FluidInferEngine
()
{}
~
FluidInferEngine
()
{}
~
FluidInferEngine
()
{}
typedef
std
::
vector
<
paddle
::
PaddleTensor
>
TensorVector
;
int
infer_impl1
(
const
void
*
in
,
void
*
out
,
uint32_t
batch_size
=
-
1
)
{
int
infer_impl1
(
const
void
*
in
,
void
*
out
,
uint32_t
batch_size
=
-
1
)
{
LOG
(
ERROR
)
<<
"come in infer_impl1 ---ysl"
;
FluidFamilyCore
*
core
=
DBReloadableInferEngine
<
FluidFamilyCore
>::
get_core
();
FluidFamilyCore
*
core
=
DBReloadableInferEngine
<
FluidFamilyCore
>::
get_core
();
if
(
!
core
||
!
core
->
get
())
{
if
(
!
core
||
!
core
->
get
())
{
LOG
(
ERROR
)
<<
"Failed get fluid core in infer_impl()"
;
LOG
(
ERROR
)
<<
"Failed get fluid core in infer_impl()"
;
...
@@ -603,33 +602,29 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
...
@@ -603,33 +602,29 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
}
}
//set inputHandle
//set inputHandle
const
BatchTensor
*
batchTensor_pointer_in
=
reinterpret_cast
<
const
BatchTensor
*>
(
in
);
const
TensorVector
*
tensorVector_in_pointer
=
reinterpret_cast
<
const
TensorVector
*>
(
in
);
std
::
cout
<<
"input tensor: "
<<
batchTensor_pointer_in
->
count
()
<<
std
::
endl
;
for
(
int
i
=
0
;
i
<
tensorVector_in_pointer
->
size
();
++
i
){
for
(
int
i
=
0
;
i
<
batchTensor_pointer_in
->
count
();
++
i
){
auto
lod_tensor_in
=
core
->
GetInputHandle
((
*
tensorVector_in_pointer
)[
i
].
name
);
Tensor
tensor_in_batchTensor
=
(
*
batchTensor_pointer_in
)[
i
];
lod_tensor_in
->
SetLoD
((
*
tensorVector_in_pointer
)[
i
].
lod
);
auto
lod_tensor_in
=
core
->
GetInputHandle
(
tensor_in_batchTensor
.
name
);
lod_tensor_in
->
Reshape
((
*
tensorVector_in_pointer
)[
i
].
shape
);
lod_tensor_in
->
SetLoD
(
tensor_in_batchTensor
.
lod
);
void
*
origin_data
=
(
*
tensorVector_in_pointer
)[
i
].
data
.
data
();
lod_tensor_in
->
Reshape
(
tensor_in_batchTensor
.
shape
);
if
((
*
tensorVector_in_pointer
)[
i
].
dtype
==
paddle
::
PaddleDType
::
FLOAT32
){
void
*
origin_data
=
tensor_in_batchTensor
.
data
.
data
();
float
*
data
=
static_cast
<
float
*>
(
origin_data
);
if
(
tensor_in_batchTensor
.
type
==
FLOAT32
){
float
*
data
=
reinterpret_cast
<
float
*>
(
origin_data
);
lod_tensor_in
->
CopyFromCpu
(
data
);
lod_tensor_in
->
CopyFromCpu
(
data
);
}
else
if
(
tensor_in_batchTensor
.
type
==
INT64
){
}
else
if
(
(
*
tensorVector_in_pointer
)[
i
].
dtype
==
paddle
::
PaddleDType
::
INT64
){
int64_t
*
data
=
reinterpret
_cast
<
int64_t
*>
(
origin_data
);
int64_t
*
data
=
static
_cast
<
int64_t
*>
(
origin_data
);
lod_tensor_in
->
CopyFromCpu
(
data
);
lod_tensor_in
->
CopyFromCpu
(
data
);
}
/*else if(tensor_in_batchTensor.type ==
INT32){
}
else
if
((
*
tensorVector_in_pointer
)[
i
].
dtype
==
paddle
::
PaddleDType
::
INT32
){
int32_t* data =
reinterpret
_cast<int32_t*>(origin_data);
int32_t
*
data
=
static
_cast
<
int32_t
*>
(
origin_data
);
lod_tensor_in
->
CopyFromCpu
(
data
);
lod_tensor_in
->
CopyFromCpu
(
data
);
}
*/
}
}
}
if
(
!
core
->
Run
())
{
if
(
!
core
->
Run
())
{
LOG
(
ERROR
)
<<
"Failed run fluid family core"
;
LOG
(
ERROR
)
<<
"Failed run fluid family core"
;
return
-
1
;
return
-
1
;
}
}
LOG
(
ERROR
)
<<
"Run infer_impl1 ---ysl"
;
//get out and copy to void* out
//get out and copy to void* out
BatchTensor
*
batchTensor_pointer_out
=
reinterpret_cast
<
BatchTensor
*>
(
out
);
TensorVector
*
tensorVector_out_pointer
=
reinterpret_cast
<
TensorVector
*>
(
out
);
LOG
(
ERROR
)
<<
"reinterpret_cast infer_impl1 ---ysl"
;
std
::
vector
<
std
::
string
>
outnames
=
core
->
GetOutputNames
();
std
::
vector
<
std
::
string
>
outnames
=
core
->
GetOutputNames
();
for
(
int
i
=
0
;
i
<
outnames
.
size
();
++
i
){
for
(
int
i
=
0
;
i
<
outnames
.
size
();
++
i
){
auto
lod_tensor_out
=
core
->
GetOutputHandle
(
outnames
[
i
]);
auto
lod_tensor_out
=
core
->
GetOutputHandle
(
outnames
[
i
]);
...
@@ -638,32 +633,26 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
...
@@ -638,32 +633,26 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
int
dataType
=
lod_tensor_out
->
type
();
int
dataType
=
lod_tensor_out
->
type
();
char
*
databuf_data
=
NULL
;
char
*
databuf_data
=
NULL
;
size_t
databuf_size
=
0
;
size_t
databuf_size
=
0
;
if
(
dataType
==
FLOAT32
){
if
(
dataType
==
paddle
::
PaddleDType
::
FLOAT32
){
float
*
data_out
=
new
float
[
out_num
];
float
*
data_out
=
new
float
[
out_num
];
lod_tensor_out
->
CopyToCpu
(
data_out
);
lod_tensor_out
->
CopyToCpu
(
data_out
);
for
(
int
j
=
0
;
j
<
out_num
;
j
++
)
{
std
::
cout
<<
"ysl----data_out[+ "
<<
j
<<
"]) : "
;
std
::
cout
<<
*
(
data_out
+
j
)
<<
std
::
endl
;}
databuf_data
=
reinterpret_cast
<
char
*>
(
data_out
);
databuf_data
=
reinterpret_cast
<
char
*>
(
data_out
);
databuf_size
=
out_num
*
sizeof
(
float
);
databuf_size
=
out_num
*
sizeof
(
float
);
}
else
if
(
dataType
==
INT64
){
}
else
if
(
dataType
==
paddle
::
PaddleDType
::
INT64
){
int64_t
*
data_out
=
new
int64_t
[
out_num
];
int64_t
*
data_out
=
new
int64_t
[
out_num
];
lod_tensor_out
->
CopyToCpu
(
data_out
);
lod_tensor_out
->
CopyToCpu
(
data_out
);
for
(
int
j
=
0
;
j
<
out_num
;
j
++
)
{
std
::
cout
<<
"ysl----data_out[+ "
<<
j
<<
"]) : "
;
std
::
cout
<<
*
(
data_out
+
j
)
<<
std
::
endl
;}
databuf_data
=
reinterpret_cast
<
char
*>
(
data_out
);
databuf_data
=
reinterpret_cast
<
char
*>
(
data_out
);
databuf_size
=
out_num
*
sizeof
(
int64_t
);
databuf_size
=
out_num
*
sizeof
(
int64_t
);
}
/*else (dataType ==
INT32){
}
else
if
(
dataType
==
paddle
::
PaddleDType
::
INT32
){
int32_t
*
data_out
=
new
int32_t
[
out_num
];
int32_t
*
data_out
=
new
int32_t
[
out_num
];
lod_tensor_out
->
CopyToCpu
(
data_out
);
lod_tensor_out
->
CopyToCpu
(
data_out
);
for ( int j = 0; j < out_num; j++ )
{std::cout << "ysl----data_out[+ " << j << "]) : ";std::cout << *(data_out + j) << std::endl;}
databuf_data
=
reinterpret_cast
<
char
*>
(
data_out
);
databuf_data
=
reinterpret_cast
<
char
*>
(
data_out
);
databuf_size
=
out_num
*
sizeof
(
int32_t
);
databuf_size
=
out_num
*
sizeof
(
int32_t
);
}*/
}
Tensor
*
tensor_out
=
new
Tensor
();
/*
paddle::PaddleTensor* tensor_out = new paddle::PaddleTensor();
tensor_out->name = outnames[i];
tensor_out->name = outnames[i];
std
::
cout
<<
"i am test ----outnames:"
<<
outnames
[
i
]
<<
std
::
endl
;
tensor_out->dtype = paddle::PaddleDType(dataType);
tensor_out
->
type
=
DataType
(
dataType
);
tensor_out->shape.assign(output_shape.begin(), output_shape.end());
tensor_out->shape.assign(output_shape.begin(), output_shape.end());
std::vector<std::vector<size_t>> out_lod = lod_tensor_out->lod();
std::vector<std::vector<size_t>> out_lod = lod_tensor_out->lod();
for (int li = 0; li < out_lod.size(); ++li) {
for (int li = 0; li < out_lod.size(); ++li) {
...
@@ -671,14 +660,24 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
...
@@ -671,14 +660,24 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<FluidFamilyCore> {
lod_element.assign(out_lod[li].begin(), out_lod[li].end());
lod_element.assign(out_lod[li].begin(), out_lod[li].end());
tensor_out->lod.push_back(lod_element);
tensor_out->lod.push_back(lod_element);
}
}
LOG
(
ERROR
)
<<
"DataBuf infer_impl1 ---ysl"
;
paddle::PaddleBuf* newData = new paddle::PaddleBuf(databuf_data,databuf_size);
DataBuf
*
newData
=
new
DataBuf
(
databuf_data
,
databuf_size
,
false
);
tensor_out->data = *newData;
tensor_out->data = *newData;
batchTensor_pointer_out
->
push_back
(
*
tensor_out
);
tensorVector_out_pointer->push_back(*tensor_out);
LOG
(
ERROR
)
<<
"push_back infer_impl1 ---ysl"
;
*/
paddle
::
PaddleTensor
tensor_out
;
tensor_out
.
name
=
outnames
[
i
];
tensor_out
.
dtype
=
paddle
::
PaddleDType
(
dataType
);
tensor_out
.
shape
.
assign
(
output_shape
.
begin
(),
output_shape
.
end
());
std
::
vector
<
std
::
vector
<
size_t
>>
out_lod
=
lod_tensor_out
->
lod
();
for
(
int
li
=
0
;
li
<
out_lod
.
size
();
++
li
)
{
std
::
vector
<
size_t
>
lod_element
;
lod_element
.
assign
(
out_lod
[
li
].
begin
(),
out_lod
[
li
].
end
());
tensor_out
.
lod
.
push_back
(
lod_element
);
}
paddle
::
PaddleBuf
paddleBuf
(
databuf_data
,
databuf_size
);
tensor_out
.
data
=
paddleBuf
;
tensorVector_out_pointer
->
push_back
(
tensor_out
);
}
}
LOG
(
ERROR
)
<<
"return infer_impl1 ---ysl"
;
std
::
cout
<<
(
*
batchTensor_pointer_in
)[
0
].
shape
.
size
()
<<
"(*batchTensor_pointer_in)[0].shape.size()"
<<
std
::
endl
;
return
0
;
return
0
;
}
}
...
...
core/predictor/framework/infer_data.h
浏览文件 @
84c3dc93
...
@@ -21,7 +21,7 @@ namespace baidu {
...
@@ -21,7 +21,7 @@ namespace baidu {
namespace
paddle_serving
{
namespace
paddle_serving
{
namespace
predictor
{
namespace
predictor
{
enum
DataType
{
FLOAT32
,
INT64
};
enum
DataType
{
FLOAT32
,
INT64
,
INT32
};
class
DataBuf
{
class
DataBuf
{
public:
public:
...
@@ -84,9 +84,11 @@ struct Tensor {
...
@@ -84,9 +84,11 @@ struct Tensor {
size_t
ele_byte
()
const
{
size_t
ele_byte
()
const
{
if
(
type
==
INT64
)
{
if
(
type
==
INT64
)
{
return
sizeof
(
int64_t
);
return
sizeof
(
int64_t
);
}
else
{
}
else
if
(
type
==
FLOAT32
)
{
return
sizeof
(
float
);
return
sizeof
(
float
);
}
}
else
{
return
sizeof
(
int32_t
);
}
}
}
bool
valid
()
const
{
bool
valid
()
const
{
...
@@ -146,12 +148,6 @@ class BatchTensor {
...
@@ -146,12 +148,6 @@ class BatchTensor {
void
push_back
(
const
Tensor
&
tensor
)
{
_features
.
push_back
(
tensor
);
}
void
push_back
(
const
Tensor
&
tensor
)
{
_features
.
push_back
(
tensor
);
}
void
push_back_owned
(
const
Tensor
&
tensor
){
_features
.
push_back
(
tensor
);
//change the DataBuf parameter "owned"= true
_features
[
count
()
-
1
].
data
.
set_owned
(
true
);
}
size_t
count
()
const
{
return
_features
.
size
();
}
size_t
count
()
const
{
return
_features
.
size
();
}
size_t
size
()
const
{
size_t
size
()
const
{
...
...
core/predictor/framework/service.cpp
浏览文件 @
84c3dc93
...
@@ -183,7 +183,6 @@ int InferService::inference(const google::protobuf::Message* request,
...
@@ -183,7 +183,6 @@ int InferService::inference(const google::protobuf::Message* request,
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") enable map request == False"
;
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") enable map request == False"
;
TRACEPRINTF
(
"(logid=%"
PRIu64
") start to execute one workflow"
,
log_id
);
TRACEPRINTF
(
"(logid=%"
PRIu64
") start to execute one workflow"
,
log_id
);
size_t
fsize
=
_flows
.
size
();
size_t
fsize
=
_flows
.
size
();
std
::
cout
<<
"ysl--total workflow:"
<<
fsize
<<
std
::
endl
;
for
(
size_t
fi
=
0
;
fi
<
fsize
;
++
fi
)
{
for
(
size_t
fi
=
0
;
fi
<
fsize
;
++
fi
)
{
TRACEPRINTF
(
TRACEPRINTF
(
"(logid=%"
PRIu64
") start to execute one workflow-%lu"
,
log_id
,
fi
);
"(logid=%"
PRIu64
") start to execute one workflow-%lu"
,
log_id
,
fi
);
...
@@ -198,7 +197,6 @@ int InferService::inference(const google::protobuf::Message* request,
...
@@ -198,7 +197,6 @@ int InferService::inference(const google::protobuf::Message* request,
}
}
}
}
}
}
std
::
cout
<<
"ysl----InferService::inference finish"
<<
std
::
endl
;
return
ERR_OK
;
return
ERR_OK
;
}
}
...
@@ -268,10 +266,8 @@ int InferService::_execute_workflow(Workflow* workflow,
...
@@ -268,10 +266,8 @@ int InferService::_execute_workflow(Workflow* workflow,
WORKFLOW_METRIC_PREFIX
+
dv
->
full_name
(),
workflow_time
.
u_elapsed
());
WORKFLOW_METRIC_PREFIX
+
dv
->
full_name
(),
workflow_time
.
u_elapsed
());
// return tls data to object pool
// return tls data to object pool
std
::
cout
<<
"ysl ------- _execute_workflow------"
<<
std
::
endl
;
workflow
->
return_dag_view
(
dv
);
workflow
->
return_dag_view
(
dv
);
TRACEPRINTF
(
"(logid=%"
PRIu64
") finish to return dag view"
,
log_id
);
TRACEPRINTF
(
"(logid=%"
PRIu64
") finish to return dag view"
,
log_id
);
std
::
cout
<<
"ysl ------- _execute_workflow return_dag_view------"
<<
std
::
endl
;
return
ERR_OK
;
return
ERR_OK
;
}
}
...
...
core/predictor/op/op.cpp
浏览文件 @
84c3dc93
...
@@ -163,7 +163,6 @@ int Op::process(const uint64_t log_id, bool debug) {
...
@@ -163,7 +163,6 @@ int Op::process(const uint64_t log_id, bool debug) {
OP_METRIC_PREFIX
+
full_name
(),
op_time
.
u_elapsed
());
OP_METRIC_PREFIX
+
full_name
(),
op_time
.
u_elapsed
());
LOG
(
INFO
)
<<
"(logid="
<<
log_id
<<
") "
<<
name
()
<<
"_time=["
LOG
(
INFO
)
<<
"(logid="
<<
log_id
<<
") "
<<
name
()
<<
"_time=["
<<
op_time
.
u_elapsed
()
<<
"]"
;
<<
op_time
.
u_elapsed
()
<<
"]"
;
std
::
cout
<<
"op process finish --ysl"
<<
_name
<<
std
::
endl
;
return
ERR_OK
;
return
ERR_OK
;
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录