Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Serving
提交
0b92634d
S
Serving
项目概览
PaddlePaddle
/
Serving
1 年多 前同步成功
通知
186
Star
833
Fork
253
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
105
列表
看板
标记
里程碑
合并请求
10
Wiki
2
Wiki
分析
仓库
DevOps
项目成员
Pages
S
Serving
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
105
Issue
105
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
2
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0b92634d
编写于
4月 11, 2021
作者:
H
HexToString
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix code style
上级
028b6a7f
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
86 addition
and
72 deletion
+86
-72
CMakeLists.txt
CMakeLists.txt
+10
-10
cmake/external/zlib.cmake
cmake/external/zlib.cmake
+1
-2
core/general-client/src/general_model.cpp
core/general-client/src/general_model.cpp
+20
-6
core/general-server/op/general_infer_op.cpp
core/general-server/op/general_infer_op.cpp
+1
-4
core/general-server/op/general_reader_op.cpp
core/general-server/op/general_reader_op.cpp
+27
-26
core/predictor/framework/infer.h
core/predictor/framework/infer.h
+3
-3
core/predictor/framework/resource.cpp
core/predictor/framework/resource.cpp
+6
-8
java/examples/src/main/java/PipelineClientExample.java
java/examples/src/main/java/PipelineClientExample.java
+4
-4
java/examples/src/main/java/StaticPipelineClient.java
java/examples/src/main/java/StaticPipelineClient.java
+2
-2
java/src/main/java/io/paddle/serving/client/PipelineClient.java
...rc/main/java/io/paddle/serving/client/PipelineClient.java
+1
-1
python/paddle_serving_client/client.py
python/paddle_serving_client/client.py
+11
-6
未找到文件。
CMakeLists.txt
浏览文件 @
0b92634d
...
@@ -37,6 +37,8 @@ if(NOT CMAKE_BUILD_TYPE)
...
@@ -37,6 +37,8 @@ if(NOT CMAKE_BUILD_TYPE)
"Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel"
"Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel"
FORCE
)
FORCE
)
endif
()
endif
()
SET
(
CMAKE_CXX_FLAGS_DEBUG
"$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb"
)
SET
(
CMAKE_CXX_FLAGS_RELEASE
"$ENV{CXXFLAGS} -O3 -Wall"
)
set
(
THIRD_PARTY_PATH
"
${
CMAKE_BINARY_DIR
}
/third_party"
CACHE STRING
set
(
THIRD_PARTY_PATH
"
${
CMAKE_BINARY_DIR
}
/third_party"
CACHE STRING
"A path setting third party libraries download & build directories."
)
"A path setting third party libraries download & build directories."
)
...
@@ -49,21 +51,19 @@ option(WITH_GPU "Compile Paddle Serving with NVIDIA GPU"
...
@@ -49,21 +51,19 @@ option(WITH_GPU "Compile Paddle Serving with NVIDIA GPU"
option
(
WITH_LITE
"Compile Paddle Serving with Paddle Lite Engine"
OFF
)
option
(
WITH_LITE
"Compile Paddle Serving with Paddle Lite Engine"
OFF
)
option
(
WITH_XPU
"Compile Paddle Serving with Baidu Kunlun"
OFF
)
option
(
WITH_XPU
"Compile Paddle Serving with Baidu Kunlun"
OFF
)
option
(
WITH_PYTHON
"Compile Paddle Serving with Python"
ON
)
option
(
WITH_PYTHON
"Compile Paddle Serving with Python"
ON
)
option
(
CLIENT
"Compile Paddle Serving Client"
OFF
)
option
(
CLIENT
"Compile Paddle Serving Client"
OFF
)
option
(
SERVER
"Compile Paddle Serving Server"
OFF
)
option
(
SERVER
"Compile Paddle Serving Server"
OFF
)
option
(
APP
"Compile Paddle Serving App package"
OFF
)
option
(
APP
"Compile Paddle Serving App package"
OFF
)
option
(
WITH_ELASTIC_CTR
"Compile ELASITC-CTR solution"
OFF
)
option
(
WITH_ELASTIC_CTR
"Compile ELASITC-CTR solution"
OFF
)
option
(
PACK
"Compile for whl"
OFF
)
option
(
PACK
"Compile for whl"
OFF
)
option
(
WITH_TRT
"Compile Paddle Serving with TRT"
OFF
)
option
(
WITH_TRT
"Compile Paddle Serving with TRT"
OFF
)
option
(
PADDLE_ON_INFERENCE
"Compile for encryption"
ON
)
option
(
PADDLE_ON_INFERENCE
"Compile for encryption"
ON
)
option
(
WITH_OPENCV
"Compile Paddle Serving with OPENCV"
OFF
)
option
(
WITH_OPENCV
"Compile Paddle Serving with OPENCV"
OFF
)
option
(
WITH_GDB
"Compile Paddle Serving with GDB"
OFF
)
if
(
WITH_GDB
)
SET
(
CMAKE_BUILD_TYPE
"Debug"
)
SET
(
CMAKE_CXX_FLAGS_DEBUG
"$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb"
)
SET
(
CMAKE_CXX_FLAGS_RELEASE
"$ENV{CXXFLAGS} -O3 -Wall"
)
endif
()
if
(
WITH_OPENCV
)
if
(
WITH_OPENCV
)
SET
(
OPENCV_DIR
""
CACHE PATH
"Location of libraries"
)
SET
(
OPENCV_DIR
""
CACHE PATH
"Location of libraries"
)
...
...
cmake/external/zlib.cmake
浏览文件 @
0b92634d
...
@@ -54,8 +54,7 @@ ELSE(WIN32)
...
@@ -54,8 +54,7 @@ ELSE(WIN32)
SET
(
ZLIB_LIBRARIES
"
${
ZLIB_INSTALL_DIR
}
/lib/libz.a"
CACHE FILEPATH
"zlib library."
FORCE
)
SET
(
ZLIB_LIBRARIES
"
${
ZLIB_INSTALL_DIR
}
/lib/libz.a"
CACHE FILEPATH
"zlib library."
FORCE
)
ENDIF
(
WIN32
)
ENDIF
(
WIN32
)
IF
(
WITH_OPENCV
)
IF
(
NOT WITH_OPENCV
)
ELSE
()
ADD_LIBRARY
(
zlib STATIC IMPORTED GLOBAL
)
ADD_LIBRARY
(
zlib STATIC IMPORTED GLOBAL
)
ENDIF
()
ENDIF
()
...
...
core/general-client/src/general_model.cpp
浏览文件 @
0b92634d
...
@@ -64,7 +64,7 @@ int PredictorClient::init(const std::vector<std::string> &conf_file) {
...
@@ -64,7 +64,7 @@ int PredictorClient::init(const std::vector<std::string> &conf_file) {
<<
", file path: "
<<
conf_file
[
0
];
<<
", file path: "
<<
conf_file
[
0
];
return
-
1
;
return
-
1
;
}
}
_feed_name_to_idx
.
clear
();
_feed_name_to_idx
.
clear
();
_fetch_name_to_idx
.
clear
();
_fetch_name_to_idx
.
clear
();
_shape
.
clear
();
_shape
.
clear
();
...
@@ -88,7 +88,7 @@ int PredictorClient::init(const std::vector<std::string> &conf_file) {
...
@@ -88,7 +88,7 @@ int PredictorClient::init(const std::vector<std::string> &conf_file) {
_shape
.
push_back
(
tmp_feed_shape
);
_shape
.
push_back
(
tmp_feed_shape
);
}
}
if
(
conf_file
.
size
()
>
1
)
{
if
(
conf_file
.
size
()
>
1
)
{
model_config
.
Clear
();
model_config
.
Clear
();
if
(
configure
::
read_proto_conf
(
conf_file
[
conf_file
.
size
()
-
1
].
c_str
(),
&
model_config
)
!=
0
)
{
if
(
configure
::
read_proto_conf
(
conf_file
[
conf_file
.
size
()
-
1
].
c_str
(),
&
model_config
)
!=
0
)
{
LOG
(
ERROR
)
<<
"Failed to load general model config"
LOG
(
ERROR
)
<<
"Failed to load general model config"
...
@@ -162,8 +162,8 @@ int PredictorClient::numpy_predict(
...
@@ -162,8 +162,8 @@ int PredictorClient::numpy_predict(
PredictorRes
&
predict_res_batch
,
PredictorRes
&
predict_res_batch
,
const
int
&
pid
,
const
int
&
pid
,
const
uint64_t
log_id
)
{
const
uint64_t
log_id
)
{
int
batch_size
=
std
::
max
(
float_feed_batch
.
size
(),
int_feed_batch
.
size
()
);
int
batch_size
=
std
::
max
(
float_feed_batch
.
size
(),
int_feed_batch
.
size
()
);
batch_size
=
batch_size
>
string_feed_batch
.
size
()
?
batch_size
:
string_feed_batch
.
size
();
batch_size
=
batch_size
>
string_feed_batch
.
size
()
?
batch_size
:
string_feed_batch
.
size
();
VLOG
(
2
)
<<
"batch size: "
<<
batch_size
;
VLOG
(
2
)
<<
"batch size: "
<<
batch_size
;
predict_res_batch
.
clear
();
predict_res_batch
.
clear
();
Timer
timeline
;
Timer
timeline
;
...
@@ -186,6 +186,8 @@ int PredictorClient::numpy_predict(
...
@@ -186,6 +186,8 @@ int PredictorClient::numpy_predict(
req
.
add_fetch_var_names
(
name
);
req
.
add_fetch_var_names
(
name
);
}
}
int
vec_idx
=
0
;
for
(
int
bi
=
0
;
bi
<
batch_size
;
bi
++
)
{
for
(
int
bi
=
0
;
bi
<
batch_size
;
bi
++
)
{
VLOG
(
2
)
<<
"prepare batch "
<<
bi
;
VLOG
(
2
)
<<
"prepare batch "
<<
bi
;
std
::
vector
<
Tensor
*>
tensor_vec
;
std
::
vector
<
Tensor
*>
tensor_vec
;
...
@@ -207,9 +209,13 @@ int PredictorClient::numpy_predict(
...
@@ -207,9 +209,13 @@ int PredictorClient::numpy_predict(
VLOG
(
2
)
<<
"batch ["
<<
bi
<<
"] "
<<
"prepared"
;
VLOG
(
2
)
<<
"batch ["
<<
bi
<<
"] "
<<
"prepared"
;
int
vec_idx
=
0
;
vec_idx
=
0
;
for
(
auto
&
name
:
float_feed_name
)
{
for
(
auto
&
name
:
float_feed_name
)
{
int
idx
=
_feed_name_to_idx
[
name
];
int
idx
=
_feed_name_to_idx
[
name
];
if
(
idx
>=
tensor_vec
.
size
())
{
LOG
(
ERROR
)
<<
"idx > tensor_vec.size()"
;
return
-
1
;
}
Tensor
*
tensor
=
tensor_vec
[
idx
];
Tensor
*
tensor
=
tensor_vec
[
idx
];
VLOG
(
2
)
<<
"prepare float feed "
<<
name
<<
" shape size "
VLOG
(
2
)
<<
"prepare float feed "
<<
name
<<
" shape size "
<<
float_shape
[
vec_idx
].
size
();
<<
float_shape
[
vec_idx
].
size
();
...
@@ -272,6 +278,10 @@ int PredictorClient::numpy_predict(
...
@@ -272,6 +278,10 @@ int PredictorClient::numpy_predict(
vec_idx
=
0
;
vec_idx
=
0
;
for
(
auto
&
name
:
int_feed_name
)
{
for
(
auto
&
name
:
int_feed_name
)
{
int
idx
=
_feed_name_to_idx
[
name
];
int
idx
=
_feed_name_to_idx
[
name
];
if
(
idx
>=
tensor_vec
.
size
())
{
LOG
(
ERROR
)
<<
"idx > tensor_vec.size()"
;
return
-
1
;
}
Tensor
*
tensor
=
tensor_vec
[
idx
];
Tensor
*
tensor
=
tensor_vec
[
idx
];
for
(
uint32_t
j
=
0
;
j
<
int_shape
[
vec_idx
].
size
();
++
j
)
{
for
(
uint32_t
j
=
0
;
j
<
int_shape
[
vec_idx
].
size
();
++
j
)
{
...
@@ -358,6 +368,10 @@ int PredictorClient::numpy_predict(
...
@@ -358,6 +368,10 @@ int PredictorClient::numpy_predict(
vec_idx
=
0
;
vec_idx
=
0
;
for
(
auto
&
name
:
string_feed_name
)
{
for
(
auto
&
name
:
string_feed_name
)
{
int
idx
=
_feed_name_to_idx
[
name
];
int
idx
=
_feed_name_to_idx
[
name
];
if
(
idx
>=
tensor_vec
.
size
())
{
LOG
(
ERROR
)
<<
"idx > tensor_vec.size()"
;
return
-
1
;
}
Tensor
*
tensor
=
tensor_vec
[
idx
];
Tensor
*
tensor
=
tensor_vec
[
idx
];
for
(
uint32_t
j
=
0
;
j
<
string_shape
[
vec_idx
].
size
();
++
j
)
{
for
(
uint32_t
j
=
0
;
j
<
string_shape
[
vec_idx
].
size
();
++
j
)
{
...
@@ -371,7 +385,7 @@ int PredictorClient::numpy_predict(
...
@@ -371,7 +385,7 @@ int PredictorClient::numpy_predict(
const
int
string_shape_size
=
string_shape
[
vec_idx
].
size
();
const
int
string_shape_size
=
string_shape
[
vec_idx
].
size
();
//string_shape[vec_idx] = [1];cause numpy has no datatype of string.
//string_shape[vec_idx] = [1];cause numpy has no datatype of string.
//we pass string via vector<vector<string> >.
//we pass string via vector<vector<string> >.
if
(
string_shape_size
!=
1
)
{
if
(
string_shape_size
!=
1
)
{
LOG
(
ERROR
)
<<
"string_shape_size should be 1-D, but received is : "
<<
string_shape_size
;
LOG
(
ERROR
)
<<
"string_shape_size should be 1-D, but received is : "
<<
string_shape_size
;
return
-
1
;
return
-
1
;
}
}
...
...
core/general-server/op/general_infer_op.cpp
浏览文件 @
0b92634d
...
@@ -71,10 +71,7 @@ int GeneralInferOp::inference() {
...
@@ -71,10 +71,7 @@ int GeneralInferOp::inference() {
TensorVector
*
out
=
&
output_blob
->
tensor_vector
;
TensorVector
*
out
=
&
output_blob
->
tensor_vector
;
int
batch_size
=
input_blob
->
_batch_size
;
int
batch_size
=
input_blob
->
_batch_size
;
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") input batch size: "
<<
batch_size
;
output_blob
->
_batch_size
=
batch_size
;
output_blob
->
_batch_size
=
batch_size
;
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") infer batch size: "
<<
batch_size
;
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") infer batch size: "
<<
batch_size
;
Timer
timeline
;
Timer
timeline
;
...
@@ -97,4 +94,4 @@ DEFINE_OP(GeneralInferOp);
...
@@ -97,4 +94,4 @@ DEFINE_OP(GeneralInferOp);
}
// namespace serving
}
// namespace serving
}
// namespace paddle_serving
}
// namespace paddle_serving
}
// namespace baidu
}
// namespace baidu
\ No newline at end of file
core/general-server/op/general_reader_op.cpp
浏览文件 @
0b92634d
...
@@ -46,17 +46,18 @@ int conf_check(const Request *req,
...
@@ -46,17 +46,18 @@ int conf_check(const Request *req,
VLOG
(
2
)
<<
"fetch var num in reader op: "
<<
req
->
fetch_var_names_size
();
VLOG
(
2
)
<<
"fetch var num in reader op: "
<<
req
->
fetch_var_names_size
();
for
(
int
i
=
0
;
i
<
var_num
;
++
i
)
{
for
(
int
i
=
0
;
i
<
var_num
;
++
i
)
{
const
Tensor
&
tensor
=
req
->
insts
(
0
).
tensor_array
(
i
);
if
(
model_config
->
_feed_type
[
i
]
!=
if
(
model_config
->
_feed_type
[
i
]
!=
req
->
insts
(
0
).
tensor_array
(
i
)
.
elem_type
())
{
tensor
.
elem_type
())
{
LOG
(
ERROR
)
<<
"feed type not match."
;
LOG
(
ERROR
)
<<
"feed type not match."
;
return
-
1
;
return
-
1
;
}
}
if
(
model_config
->
_feed_shape
[
i
].
size
()
==
if
(
model_config
->
_feed_shape
[
i
].
size
()
==
req
->
insts
(
0
).
tensor_array
(
i
)
.
shape_size
())
{
tensor
.
shape_size
())
{
for
(
int
j
=
0
;
j
<
model_config
->
_feed_shape
[
i
].
size
();
++
j
)
{
for
(
int
j
=
0
;
j
<
model_config
->
_feed_shape
[
i
].
size
();
++
j
)
{
req
->
insts
(
0
).
tensor_array
(
i
)
.
shape
(
j
);
tensor
.
shape
(
j
);
if
(
model_config
->
_feed_shape
[
i
][
j
]
!=
if
(
model_config
->
_feed_shape
[
i
][
j
]
!=
req
->
insts
(
0
).
tensor_array
(
i
)
.
shape
(
j
))
{
tensor
.
shape
(
j
))
{
LOG
(
ERROR
)
<<
"feed shape not match."
;
LOG
(
ERROR
)
<<
"feed shape not match."
;
return
-
1
;
return
-
1
;
}
}
...
@@ -124,7 +125,7 @@ int GeneralReaderOp::inference() {
...
@@ -124,7 +125,7 @@ int GeneralReaderOp::inference() {
paddle
::
PaddleTensor
lod_tensor
;
paddle
::
PaddleTensor
lod_tensor
;
const
Tensor
&
tensor
=
req
->
insts
(
0
).
tensor_array
(
i
);
const
Tensor
&
tensor
=
req
->
insts
(
0
).
tensor_array
(
i
);
data_len
=
0
;
data_len
=
0
;
elem_type
[
i
]
=
req
->
insts
(
0
).
tensor_array
(
i
)
.
elem_type
();
elem_type
[
i
]
=
tensor
.
elem_type
();
VLOG
(
2
)
<<
"var["
<<
i
<<
"] has elem type: "
<<
elem_type
[
i
];
VLOG
(
2
)
<<
"var["
<<
i
<<
"] has elem type: "
<<
elem_type
[
i
];
if
(
elem_type
[
i
]
==
P_INT64
)
{
// int64
if
(
elem_type
[
i
]
==
P_INT64
)
{
// int64
elem_size
[
i
]
=
sizeof
(
int64_t
);
elem_size
[
i
]
=
sizeof
(
int64_t
);
...
@@ -150,16 +151,16 @@ int GeneralReaderOp::inference() {
...
@@ -150,16 +151,16 @@ int GeneralReaderOp::inference() {
// implement lod tensor here
// implement lod tensor here
// only support 1-D lod
// only support 1-D lod
// TODO:support 2-D lod
// TODO:support 2-D lod
if
(
req
->
insts
(
0
).
tensor_array
(
i
)
.
lod_size
()
>
0
)
{
if
(
tensor
.
lod_size
()
>
0
)
{
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") var["
<<
i
<<
"] is lod_tensor"
;
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") var["
<<
i
<<
"] is lod_tensor"
;
lod_tensor
.
lod
.
resize
(
1
);
lod_tensor
.
lod
.
resize
(
1
);
for
(
int
k
=
0
;
k
<
req
->
insts
(
0
).
tensor_array
(
i
)
.
lod_size
();
++
k
)
{
for
(
int
k
=
0
;
k
<
tensor
.
lod_size
();
++
k
)
{
lod_tensor
.
lod
[
0
].
push_back
(
req
->
insts
(
0
).
tensor_array
(
i
)
.
lod
(
k
));
lod_tensor
.
lod
[
0
].
push_back
(
tensor
.
lod
(
k
));
}
}
}
}
for
(
int
k
=
0
;
k
<
req
->
insts
(
0
).
tensor_array
(
i
)
.
shape_size
();
++
k
)
{
for
(
int
k
=
0
;
k
<
tensor
.
shape_size
();
++
k
)
{
int
dim
=
req
->
insts
(
0
).
tensor_array
(
i
)
.
shape
(
k
);
int
dim
=
tensor
.
shape
(
k
);
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") shape for var["
<<
i
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") shape for var["
<<
i
<<
"]: "
<<
dim
;
<<
"]: "
<<
dim
;
lod_tensor
.
shape
.
push_back
(
dim
);
lod_tensor
.
shape
.
push_back
(
dim
);
...
@@ -178,57 +179,57 @@ int GeneralReaderOp::inference() {
...
@@ -178,57 +179,57 @@ int GeneralReaderOp::inference() {
if
(
elem_type
[
i
]
==
P_INT64
)
{
if
(
elem_type
[
i
]
==
P_INT64
)
{
int64_t
*
dst_ptr
=
static_cast
<
int64_t
*>
(
out
->
at
(
i
).
data
.
data
());
int64_t
*
dst_ptr
=
static_cast
<
int64_t
*>
(
out
->
at
(
i
).
data
.
data
());
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") first element data in var["
<<
i
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") first element data in var["
<<
i
<<
"] is "
<<
req
->
insts
(
0
).
tensor_array
(
i
)
.
int64_data
(
0
);
<<
"] is "
<<
tensor
.
int64_data
(
0
);
if
(
!
dst_ptr
)
{
if
(
!
dst_ptr
)
{
LOG
(
ERROR
)
<<
"dst_ptr is nullptr"
;
LOG
(
ERROR
)
<<
"dst_ptr is nullptr"
;
return
-
1
;
return
-
1
;
}
}
memcpy
(
dst_ptr
,
req
->
insts
(
0
).
tensor_array
(
i
)
.
int64_data
().
data
(),
databuf_size
[
i
]);
memcpy
(
dst_ptr
,
tensor
.
int64_data
().
data
(),
databuf_size
[
i
]);
/*
/*
int elem_num =
req->insts(0).tensor_array(i)
.int64_data_size();
int elem_num =
tensor
.int64_data_size();
for (int k = 0; k < elem_num; ++k) {
for (int k = 0; k < elem_num; ++k) {
dst_ptr[k] =
req->insts(0).tensor_array(i)
.int64_data(k);
dst_ptr[k] =
tensor
.int64_data(k);
}
}
*/
*/
}
else
if
(
elem_type
[
i
]
==
P_FLOAT32
)
{
}
else
if
(
elem_type
[
i
]
==
P_FLOAT32
)
{
float
*
dst_ptr
=
static_cast
<
float
*>
(
out
->
at
(
i
).
data
.
data
());
float
*
dst_ptr
=
static_cast
<
float
*>
(
out
->
at
(
i
).
data
.
data
());
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") first element data in var["
<<
i
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") first element data in var["
<<
i
<<
"] is "
<<
req
->
insts
(
0
).
tensor_array
(
i
)
.
float_data
(
0
);
<<
"] is "
<<
tensor
.
float_data
(
0
);
if
(
!
dst_ptr
)
{
if
(
!
dst_ptr
)
{
LOG
(
ERROR
)
<<
"dst_ptr is nullptr"
;
LOG
(
ERROR
)
<<
"dst_ptr is nullptr"
;
return
-
1
;
return
-
1
;
}
}
memcpy
(
dst_ptr
,
req
->
insts
(
0
).
tensor_array
(
i
)
.
float_data
().
data
(),
databuf_size
[
i
]);
memcpy
(
dst_ptr
,
tensor
.
float_data
().
data
(),
databuf_size
[
i
]);
/*int elem_num =
req->insts(0).tensor_array(i)
.float_data_size();
/*int elem_num =
tensor
.float_data_size();
for (int k = 0; k < elem_num; ++k) {
for (int k = 0; k < elem_num; ++k) {
dst_ptr[k] =
req->insts(0).tensor_array(i)
.float_data(k);
dst_ptr[k] =
tensor
.float_data(k);
}*/
}*/
}
else
if
(
elem_type
[
i
]
==
P_INT32
)
{
}
else
if
(
elem_type
[
i
]
==
P_INT32
)
{
int32_t
*
dst_ptr
=
static_cast
<
int32_t
*>
(
out
->
at
(
i
).
data
.
data
());
int32_t
*
dst_ptr
=
static_cast
<
int32_t
*>
(
out
->
at
(
i
).
data
.
data
());
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") first element data in var["
<<
i
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") first element data in var["
<<
i
<<
"] is "
<<
req
->
insts
(
0
).
tensor_array
(
i
)
.
int_data
(
0
);
<<
"] is "
<<
tensor
.
int_data
(
0
);
if
(
!
dst_ptr
)
{
if
(
!
dst_ptr
)
{
LOG
(
ERROR
)
<<
"dst_ptr is nullptr"
;
LOG
(
ERROR
)
<<
"dst_ptr is nullptr"
;
return
-
1
;
return
-
1
;
}
}
memcpy
(
dst_ptr
,
req
->
insts
(
0
).
tensor_array
(
i
)
.
int_data
().
data
(),
databuf_size
[
i
]);
memcpy
(
dst_ptr
,
tensor
.
int_data
().
data
(),
databuf_size
[
i
]);
/*
/*
int elem_num =
req->insts(0).tensor_array(i)
.int_data_size();
int elem_num =
tensor
.int_data_size();
for (int k = 0; k < elem_num; ++k) {
for (int k = 0; k < elem_num; ++k) {
dst_ptr[k] =
req->insts(0).tensor_array(i)
.int_data(k);
dst_ptr[k] =
tensor
.int_data(k);
}
}
*/
*/
}
else
if
(
elem_type
[
i
]
==
P_STRING
)
{
}
else
if
(
elem_type
[
i
]
==
P_STRING
)
{
std
::
string
*
dst_ptr
=
static_cast
<
std
::
string
*>
(
out
->
at
(
i
).
data
.
data
());
std
::
string
*
dst_ptr
=
static_cast
<
std
::
string
*>
(
out
->
at
(
i
).
data
.
data
());
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") first element data in var["
<<
i
VLOG
(
2
)
<<
"(logid="
<<
log_id
<<
") first element data in var["
<<
i
<<
"] is "
<<
req
->
insts
(
0
).
tensor_array
(
i
)
.
data
(
0
);
<<
"] is "
<<
tensor
.
data
(
0
);
if
(
!
dst_ptr
)
{
if
(
!
dst_ptr
)
{
LOG
(
ERROR
)
<<
"dst_ptr is nullptr"
;
LOG
(
ERROR
)
<<
"dst_ptr is nullptr"
;
return
-
1
;
return
-
1
;
}
}
int
elem_num
=
req
->
insts
(
0
).
tensor_array
(
i
)
.
data_size
();
int
elem_num
=
tensor
.
data_size
();
for
(
int
k
=
0
;
k
<
elem_num
;
++
k
)
{
for
(
int
k
=
0
;
k
<
elem_num
;
++
k
)
{
dst_ptr
[
k
]
=
req
->
insts
(
0
).
tensor_array
(
i
)
.
data
(
k
);
dst_ptr
[
k
]
=
tensor
.
data
(
k
);
}
}
}
}
}
}
...
@@ -247,4 +248,4 @@ int GeneralReaderOp::inference() {
...
@@ -247,4 +248,4 @@ int GeneralReaderOp::inference() {
DEFINE_OP
(
GeneralReaderOp
);
DEFINE_OP
(
GeneralReaderOp
);
}
// namespace serving
}
// namespace serving
}
// namespace paddle_serving
}
// namespace paddle_serving
}
// namespace baidu
}
// namespace baidu
\ No newline at end of file
core/predictor/framework/infer.h
浏览文件 @
0b92634d
...
@@ -512,7 +512,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<PaddleInferenceCore
...
@@ -512,7 +512,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<PaddleInferenceCore
//Inside each for loop, use the in[i]->name as inputName and call 'core->GetInputHandle(inputName)' to get the pointer of InputData.
//Inside each for loop, use the in[i]->name as inputName and call 'core->GetInputHandle(inputName)' to get the pointer of InputData.
//Set the lod and shape information of InputData first. then copy data from cpu to the core.
//Set the lod and shape information of InputData first. then copy data from cpu to the core.
const
TensorVector
*
tensorVector_in_pointer
=
reinterpret_cast
<
const
TensorVector
*>
(
in
);
const
TensorVector
*
tensorVector_in_pointer
=
reinterpret_cast
<
const
TensorVector
*>
(
in
);
for
(
int
i
=
0
;
i
<
tensorVector_in_pointer
->
size
();
++
i
)
{
for
(
int
i
=
0
;
i
<
tensorVector_in_pointer
->
size
();
++
i
)
{
auto
lod_tensor_in
=
core
->
GetInputHandle
((
*
tensorVector_in_pointer
)[
i
].
name
);
auto
lod_tensor_in
=
core
->
GetInputHandle
((
*
tensorVector_in_pointer
)[
i
].
name
);
lod_tensor_in
->
SetLoD
((
*
tensorVector_in_pointer
)[
i
].
lod
);
lod_tensor_in
->
SetLoD
((
*
tensorVector_in_pointer
)[
i
].
lod
);
lod_tensor_in
->
Reshape
((
*
tensorVector_in_pointer
)[
i
].
shape
);
lod_tensor_in
->
Reshape
((
*
tensorVector_in_pointer
)[
i
].
shape
);
...
@@ -552,7 +552,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<PaddleInferenceCore
...
@@ -552,7 +552,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<PaddleInferenceCore
}
}
//Get the type and shape information of OutputData first. then copy data to cpu from the core.
//Get the type and shape information of OutputData first. then copy data to cpu from the core.
//The pointer type of data_out must be one of float *,int64_t*,int32_t* instead void*.
//The pointer type of data_out must be one of float *,int64_t*,int32_t* instead void*.
for
(
int
i
=
0
;
i
<
outnames
.
size
();
++
i
)
{
for
(
int
i
=
0
;
i
<
outnames
.
size
();
++
i
)
{
auto
lod_tensor_out
=
core
->
GetOutputHandle
(
outnames
[
i
]);
auto
lod_tensor_out
=
core
->
GetOutputHandle
(
outnames
[
i
]);
output_shape
=
lod_tensor_out
->
shape
();
output_shape
=
lod_tensor_out
->
shape
();
out_num
=
std
::
accumulate
(
output_shape
.
begin
(),
output_shape
.
end
(),
1
,
std
::
multiplies
<
int
>
());
out_num
=
std
::
accumulate
(
output_shape
.
begin
(),
output_shape
.
end
(),
1
,
std
::
multiplies
<
int
>
());
...
@@ -596,7 +596,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<PaddleInferenceCore
...
@@ -596,7 +596,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine<PaddleInferenceCore
tensor_out
.
dtype
=
paddle
::
PaddleDType
(
dataType
);
tensor_out
.
dtype
=
paddle
::
PaddleDType
(
dataType
);
tensor_out
.
shape
.
assign
(
output_shape
.
begin
(),
output_shape
.
end
());
tensor_out
.
shape
.
assign
(
output_shape
.
begin
(),
output_shape
.
end
());
std
::
vector
<
std
::
vector
<
size_t
>>
out_lod
=
lod_tensor_out
->
lod
();
std
::
vector
<
std
::
vector
<
size_t
>>
out_lod
=
lod_tensor_out
->
lod
();
for
(
int
li
=
0
;
li
<
out_lod
.
size
();
++
li
)
{
for
(
int
li
=
0
;
li
<
out_lod
.
size
();
++
li
)
{
std
::
vector
<
size_t
>
lod_element
;
std
::
vector
<
size_t
>
lod_element
;
lod_element
.
assign
(
out_lod
[
li
].
begin
(),
out_lod
[
li
].
end
());
lod_element
.
assign
(
out_lod
[
li
].
begin
(),
out_lod
[
li
].
end
());
tensor_out
.
lod
.
push_back
(
lod_element
);
tensor_out
.
lod
.
push_back
(
lod_element
);
...
...
core/predictor/framework/resource.cpp
浏览文件 @
0b92634d
...
@@ -150,10 +150,8 @@ int Resource::initialize(const std::string& path, const std::string& file) {
...
@@ -150,10 +150,8 @@ int Resource::initialize(const std::string& path, const std::string& file) {
if
(
FLAGS_enable_model_toolkit
)
{
if
(
FLAGS_enable_model_toolkit
)
{
size_t
model_toolkit_num
=
resource_conf
.
model_toolkit_path_size
();
size_t
model_toolkit_num
=
resource_conf
.
model_toolkit_path_size
();
for
(
size_t
mi
=
0
;
mi
<
model_toolkit_num
;
++
mi
)
{
for
(
size_t
mi
=
0
;
mi
<
model_toolkit_num
;
++
mi
)
{
std
::
string
model_toolkit_path
=
resource_conf
.
model_toolkit_path
(
mi
);
std
::
string
model_toolkit_path
=
resource_conf
.
model_toolkit_path
(
mi
);
std
::
string
model_toolkit_file
=
resource_conf
.
model_toolkit_file
(
mi
);
std
::
string
model_toolkit_file
=
resource_conf
.
model_toolkit_file
(
mi
);
if
(
InferManager
::
instance
().
proc_initialize
(
if
(
InferManager
::
instance
().
proc_initialize
(
...
@@ -227,7 +225,7 @@ int Resource::general_model_initialize(const std::string& path,
...
@@ -227,7 +225,7 @@ int Resource::general_model_initialize(const std::string& path,
return
-
1
;
return
-
1
;
}
}
size_t
general_model_num
=
resource_conf
.
general_model_path_size
();
size_t
general_model_num
=
resource_conf
.
general_model_path_size
();
for
(
size_t
gi
=
0
;
gi
<
general_model_num
;
++
gi
)
{
for
(
size_t
gi
=
0
;
gi
<
general_model_num
;
++
gi
)
{
std
::
string
general_model_path
=
resource_conf
.
general_model_path
(
gi
);
std
::
string
general_model_path
=
resource_conf
.
general_model_path
(
gi
);
...
@@ -251,7 +249,7 @@ int Resource::general_model_initialize(const std::string& path,
...
@@ -251,7 +249,7 @@ int Resource::general_model_initialize(const std::string& path,
_config
->
_is_lod_feed
.
resize
(
feed_var_num
);
_config
->
_is_lod_feed
.
resize
(
feed_var_num
);
_config
->
_capacity
.
resize
(
feed_var_num
);
_config
->
_capacity
.
resize
(
feed_var_num
);
_config
->
_feed_shape
.
resize
(
feed_var_num
);
_config
->
_feed_shape
.
resize
(
feed_var_num
);
for
(
int
i
=
0
;
i
<
feed_var_num
;
++
i
)
{
for
(
int
i
=
0
;
i
<
feed_var_num
;
++
i
)
{
_config
->
_feed_name
[
i
]
=
model_config
.
feed_var
(
i
).
name
();
_config
->
_feed_name
[
i
]
=
model_config
.
feed_var
(
i
).
name
();
_config
->
_feed_alias_name
[
i
]
=
model_config
.
feed_var
(
i
).
alias_name
();
_config
->
_feed_alias_name
[
i
]
=
model_config
.
feed_var
(
i
).
alias_name
();
VLOG
(
2
)
<<
"feed var["
<<
i
<<
"]: "
<<
_config
->
_feed_name
[
i
];
VLOG
(
2
)
<<
"feed var["
<<
i
<<
"]: "
<<
_config
->
_feed_name
[
i
];
...
@@ -267,7 +265,7 @@ int Resource::general_model_initialize(const std::string& path,
...
@@ -267,7 +265,7 @@ int Resource::general_model_initialize(const std::string& path,
VLOG
(
2
)
<<
"var["
<<
i
<<
"] is tensor"
;
VLOG
(
2
)
<<
"var["
<<
i
<<
"] is tensor"
;
_config
->
_capacity
[
i
]
=
1
;
_config
->
_capacity
[
i
]
=
1
;
_config
->
_is_lod_feed
[
i
]
=
false
;
_config
->
_is_lod_feed
[
i
]
=
false
;
for
(
int
j
=
0
;
j
<
model_config
.
feed_var
(
i
).
shape_size
();
++
j
)
{
for
(
int
j
=
0
;
j
<
model_config
.
feed_var
(
i
).
shape_size
();
++
j
)
{
int32_t
dim
=
model_config
.
feed_var
(
i
).
shape
(
j
);
int32_t
dim
=
model_config
.
feed_var
(
i
).
shape
(
j
);
VLOG
(
2
)
<<
"var["
<<
i
<<
"].shape["
<<
i
<<
"]: "
<<
dim
;
VLOG
(
2
)
<<
"var["
<<
i
<<
"].shape["
<<
i
<<
"]: "
<<
dim
;
_config
->
_feed_shape
[
i
].
push_back
(
dim
);
_config
->
_feed_shape
[
i
].
push_back
(
dim
);
...
@@ -281,7 +279,7 @@ int Resource::general_model_initialize(const std::string& path,
...
@@ -281,7 +279,7 @@ int Resource::general_model_initialize(const std::string& path,
_config
->
_fetch_name
.
resize
(
fetch_var_num
);
_config
->
_fetch_name
.
resize
(
fetch_var_num
);
_config
->
_fetch_alias_name
.
resize
(
fetch_var_num
);
_config
->
_fetch_alias_name
.
resize
(
fetch_var_num
);
_config
->
_fetch_shape
.
resize
(
fetch_var_num
);
_config
->
_fetch_shape
.
resize
(
fetch_var_num
);
for
(
int
i
=
0
;
i
<
fetch_var_num
;
++
i
)
{
for
(
int
i
=
0
;
i
<
fetch_var_num
;
++
i
)
{
_config
->
_fetch_name
[
i
]
=
model_config
.
fetch_var
(
i
).
name
();
_config
->
_fetch_name
[
i
]
=
model_config
.
fetch_var
(
i
).
name
();
_config
->
_fetch_alias_name
[
i
]
=
model_config
.
fetch_var
(
i
).
alias_name
();
_config
->
_fetch_alias_name
[
i
]
=
model_config
.
fetch_var
(
i
).
alias_name
();
_config
->
_fetch_name_to_index
[
_config
->
_fetch_name
[
i
]]
=
i
;
_config
->
_fetch_name_to_index
[
_config
->
_fetch_name
[
i
]]
=
i
;
...
@@ -292,7 +290,7 @@ int Resource::general_model_initialize(const std::string& path,
...
@@ -292,7 +290,7 @@ int Resource::general_model_initialize(const std::string& path,
_config
->
_is_lod_fetch
[
i
]
=
true
;
_config
->
_is_lod_fetch
[
i
]
=
true
;
}
else
{
}
else
{
_config
->
_is_lod_fetch
[
i
]
=
false
;
_config
->
_is_lod_fetch
[
i
]
=
false
;
for
(
int
j
=
0
;
j
<
model_config
.
fetch_var
(
i
).
shape_size
();
++
j
)
{
for
(
int
j
=
0
;
j
<
model_config
.
fetch_var
(
i
).
shape_size
();
++
j
)
{
int
dim
=
model_config
.
fetch_var
(
i
).
shape
(
j
);
int
dim
=
model_config
.
fetch_var
(
i
).
shape
(
j
);
_config
->
_fetch_shape
[
i
].
push_back
(
dim
);
_config
->
_fetch_shape
[
i
].
push_back
(
dim
);
}
}
...
...
java/examples/src/main/java/PipelineClientExample.java
100644 → 100755
浏览文件 @
0b92634d
...
@@ -32,7 +32,7 @@ public class PipelineClientExample {
...
@@ -32,7 +32,7 @@ public class PipelineClientExample {
System
.
out
.
println
(
fetch
);
System
.
out
.
println
(
fetch
);
if
(
StaticPipelineClient
.
succ
!=
true
)
{
if
(
StaticPipelineClient
.
succ
!=
true
)
{
if
(!
StaticPipelineClient
.
initClient
(
"127.0.0.1"
,
"18070"
))
{
if
(!
StaticPipelineClient
.
initClient
(
"127.0.0.1"
,
"18070"
))
{
System
.
out
.
println
(
"connect failed."
);
System
.
out
.
println
(
"connect failed."
);
return
false
;
return
false
;
}
}
...
@@ -57,7 +57,7 @@ public class PipelineClientExample {
...
@@ -57,7 +57,7 @@ public class PipelineClientExample {
List
<
String
>
fetch
=
Arrays
.
asList
(
"prediction"
);
List
<
String
>
fetch
=
Arrays
.
asList
(
"prediction"
);
System
.
out
.
println
(
fetch
);
System
.
out
.
println
(
fetch
);
if
(
StaticPipelineClient
.
succ
!=
true
)
{
if
(
StaticPipelineClient
.
succ
!=
true
)
{
if
(!
StaticPipelineClient
.
initClient
(
"127.0.0.1"
,
"18070"
))
{
if
(!
StaticPipelineClient
.
initClient
(
"127.0.0.1"
,
"18070"
))
{
System
.
out
.
println
(
"connect failed."
);
System
.
out
.
println
(
"connect failed."
);
return
false
;
return
false
;
}
}
...
@@ -86,7 +86,7 @@ public class PipelineClientExample {
...
@@ -86,7 +86,7 @@ public class PipelineClientExample {
}};
}};
List
<
String
>
fetch
=
Arrays
.
asList
(
"prediction"
);
List
<
String
>
fetch
=
Arrays
.
asList
(
"prediction"
);
if
(
StaticPipelineClient
.
succ
!=
true
)
{
if
(
StaticPipelineClient
.
succ
!=
true
)
{
if
(!
StaticPipelineClient
.
initClient
(
"127.0.0.1"
,
"9998"
))
{
if
(!
StaticPipelineClient
.
initClient
(
"127.0.0.1"
,
"9998"
))
{
System
.
out
.
println
(
"connect failed."
);
System
.
out
.
println
(
"connect failed."
);
return
false
;
return
false
;
}
}
...
@@ -105,7 +105,7 @@ public class PipelineClientExample {
...
@@ -105,7 +105,7 @@ public class PipelineClientExample {
* @param npdata INDArray type(The input data).
* @param npdata INDArray type(The input data).
* @return String (specified String type for python Numpy eval method).
* @return String (specified String type for python Numpy eval method).
*/
*/
String
convertINDArrayToString
(
INDArray
npdata
){
String
convertINDArrayToString
(
INDArray
npdata
)
{
return
"array("
+
npdata
.
toString
()+
")"
;
return
"array("
+
npdata
.
toString
()+
")"
;
}
}
...
...
java/examples/src/main/java/StaticPipelineClient.java
100644 → 100755
浏览文件 @
0b92634d
...
@@ -30,10 +30,10 @@ public class StaticPipelineClient {
...
@@ -30,10 +30,10 @@ public class StaticPipelineClient {
* @param strPort String type(The server port) such as "8891".
* @param strPort String type(The server port) such as "8891".
* @return boolean (the sign of connect status).
* @return boolean (the sign of connect status).
*/
*/
public
static
boolean
initClient
(
String
strIp
,
String
strPort
){
public
static
boolean
initClient
(
String
strIp
,
String
strPort
)
{
String
target
=
strIp
+
":"
+
strPort
;
//"172.17.0.2:18070";
String
target
=
strIp
+
":"
+
strPort
;
//"172.17.0.2:18070";
System
.
out
.
println
(
"initial connect."
);
System
.
out
.
println
(
"initial connect."
);
if
(
succ
)
{
if
(
succ
)
{
System
.
out
.
println
(
"already connect."
);
System
.
out
.
println
(
"already connect."
);
return
true
;
return
true
;
}
}
...
...
java/src/main/java/io/paddle/serving/client/PipelineClient.java
100644 → 100755
浏览文件 @
0b92634d
...
@@ -88,7 +88,7 @@ public class PipelineClient {
...
@@ -88,7 +88,7 @@ public class PipelineClient {
keys
.
add
(
entry
.
getKey
());
keys
.
add
(
entry
.
getKey
());
values
.
add
(
entry
.
getValue
());
values
.
add
(
entry
.
getValue
());
}
}
if
(
profile
)
{
if
(
profile
)
{
keys
.
add
(
_profile_key
);
keys
.
add
(
_profile_key
);
values
.
add
(
_profile_value
);
values
.
add
(
_profile_value
);
}
}
...
...
python/paddle_serving_client/client.py
浏览文件 @
0b92634d
...
@@ -31,13 +31,18 @@ sys.path.append(
...
@@ -31,13 +31,18 @@ sys.path.append(
os
.
path
.
join
(
os
.
path
.
abspath
(
os
.
path
.
dirname
(
__file__
)),
'proto'
))
os
.
path
.
join
(
os
.
path
.
abspath
(
os
.
path
.
dirname
(
__file__
)),
'proto'
))
from
.proto
import
multi_lang_general_model_service_pb2_grpc
from
.proto
import
multi_lang_general_model_service_pb2_grpc
#param 'type'(which is in feed_var or fetch_var) = 0 means dataType is int64
#param 'type'(which is in feed_var or fetch_var) = 1 means dataType is float32
#param 'type'(which is in feed_var or fetch_var) = 2 means dataType is int32
#param 'type'(which is in feed_var or fetch_var) = 3 means dataType is string(also called bytes in proto)
int64_type
=
0
int64_type
=
0
float32_type
=
1
float32_type
=
1
int32_type
=
2
int32_type
=
2
bytes_type
=
3
bytes_type
=
3
#int_type,float_type,string_type are the set of each subdivision classes.
int_type
=
set
([
int64_type
,
int32_type
])
int_type
=
set
([
int64_type
,
int32_type
])
float_type
=
set
([
float32_type
])
float_type
=
set
([
float32_type
])
string_type
=
set
([
bytes_type
])
string_type
=
set
([
bytes_type
])
class
_NOPProfiler
(
object
):
class
_NOPProfiler
(
object
):
...
@@ -172,9 +177,9 @@ class Client(object):
...
@@ -172,9 +177,9 @@ class Client(object):
self
.
client_handle_
.
init_gflags
([
sys
.
argv
[
self
.
client_handle_
.
init_gflags
([
sys
.
argv
[
0
]]
+
[
"--tryfromenv="
+
","
.
join
(
read_env_flags
)])
0
]]
+
[
"--tryfromenv="
+
","
.
join
(
read_env_flags
)])
self
.
feed_names_
=
[
var
.
alias_name
for
var
in
model_conf
.
feed_var
]
self
.
feed_names_
=
[
var
.
alias_name
for
var
in
model_conf
.
feed_var
]
self
.
feed_names_to_idx_
=
{}
#this is not useful
self
.
feed_names_to_idx_
=
{}
#this is not useful
self
.
lod_tensor_set
=
set
()
self
.
lod_tensor_set
=
set
()
self
.
feed_tensor_len
=
{}
#this is only used for shape check
self
.
feed_tensor_len
=
{}
#this is only used for shape check
self
.
key
=
None
self
.
key
=
None
for
i
,
var
in
enumerate
(
model_conf
.
feed_var
):
for
i
,
var
in
enumerate
(
model_conf
.
feed_var
):
...
@@ -420,9 +425,9 @@ class Client(object):
...
@@ -420,9 +425,9 @@ class Client(object):
res
=
self
.
client_handle_
.
numpy_predict
(
res
=
self
.
client_handle_
.
numpy_predict
(
float_slot_batch
,
float_feed_names
,
float_shape
,
float_slot_batch
,
float_feed_names
,
float_shape
,
float_lod_slot_batch
,
int_slot_batch
,
int_feed_names
,
int_shape
,
float_lod_slot_batch
,
int_slot_batch
,
int_feed_names
,
int_shape
,
int_lod_slot_batch
,
string_slot_batch
,
string_feed_names
,
string_shape
,
int_lod_slot_batch
,
string_slot_batch
,
string_feed_names
,
string_
lod_slot_batch
,
fetch_names
,
result_batch_handle
,
self
.
pid
,
string_
shape
,
string_lod_slot_batch
,
fetch_names
,
log_id
)
result_batch_handle
,
self
.
pid
,
log_id
)
elif
self
.
has_numpy_input
==
False
:
elif
self
.
has_numpy_input
==
False
:
raise
ValueError
(
raise
ValueError
(
"Please make sure all of your inputs are numpy array"
)
"Please make sure all of your inputs are numpy array"
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录